repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
archf/ansible | lib/ansible/modules/network/f5/bigip_monitor_tcp_echo.py | 16 | 16338 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_monitor_tcp_echo
short_description: Manages F5 BIG-IP LTM tcp monitors.
description: Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API.
version_added: "2.4"
options:
name:
description:
- Monitor name.
required: True
aliases:
- monitor
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(tcp)
parent on the C(Common) partition.
default: "/Common/tcp"
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'.
- If this value is an IP address, and the C(type) is C(tcp) (the default),
then a C(port) number must be specified.
interval:
description:
- The interval specifying how frequently the monitor instance of this
template will run. If this parameter is not provided when creating
a new monitor, then the default value will be 5. This value B(must)
be less than the C(timeout) value.
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second. If this parameter is not
provided when creating a new monitor, then the default value will be 16.
time_until_up:
description:
- Specifies the amount of time in seconds after the first successful
response before a node will be marked up. A value of 0 will cause a
node to be marked up immediately after a valid response is received
from the node. If this parameter is not provided when creating
a new monitor, then the default value will be 0.
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires BIG-IP software version >= 12
requirements:
- f5-sdk >= 2.2.3
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Create TCP Echo Monitor
bigip_monitor_tcp_echo:
state: "present"
server: "lb.mydomain.com"
user: "admin"
ip: 10.10.10.10
password: "secret"
name: "my_tcp_monitor"
delegate_to: localhost
- name: Remove TCP Echo Monitor
bigip_monitor_tcp_echo:
state: "absent"
server: "lb.mydomain.com"
user: "admin"
password: "secret"
name: "my_tcp_monitor"
delegate_to: localhost
'''
RETURN = '''
parent:
description: New parent template of the monitor.
returned: changed
type: string
sample: "tcp"
ip:
description: The new IP of IP/port definition.
returned: changed
type: string
sample: "10.12.13.14"
interval:
description: The new interval in which to run the monitor check.
returned: changed
type: int
sample: 2
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
time_until_up:
description: The new time in which to mark a system as up after first successful response.
returned: changed
type: int
sample: 2
'''
import os
try:
import netaddr
HAS_NETADDR = True
except ImportError:
HAS_NETADDR = False
from ansible.module_utils.f5_utils import AnsibleF5Client
from ansible.module_utils.f5_utils import AnsibleF5Parameters
from ansible.module_utils.f5_utils import HAS_F5SDK
from ansible.module_utils.f5_utils import F5ModuleError
from ansible.module_utils.f5_utils import iteritems
from ansible.module_utils.f5_utils import defaultdict
try:
from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'timeUntilUp': 'time_until_up',
'defaultsFrom': 'parent'
}
api_attributes = [
'timeUntilUp', 'defaultsFrom', 'interval', 'timeout', 'destination'
]
returnables = [
'parent', 'ip', 'interval', 'timeout', 'time_until_up'
]
updatables = [
'ip', 'interval', 'timeout', 'time_until_up'
]
def __init__(self, params=None):
self._values = defaultdict(lambda: None)
self._values['__warnings'] = []
if params:
self.update(params=params)
def update(self, params=None):
if params:
for k, v in iteritems(params):
if self.api_map is not None and k in self.api_map:
map_key = self.api_map[k]
else:
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have
# an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
except Exception:
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def interval(self):
if self._values['interval'] is None:
return None
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def ip(self):
if self._values['ip'] is None:
return None
try:
if self._values['ip'] in ['*', '0.0.0.0']:
return '*'
result = str(netaddr.IPAddress(self._values['ip']))
return result
except netaddr.core.AddrFormatError:
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def destination(self):
return self.ip
@destination.setter
def destination(self, value):
self._values['ip'] = value
@property
def time_until_up(self):
if self._values['time_until_up'] is None:
return None
return int(self._values['time_until_up'])
@property
def parent(self):
if self._values['parent'] is None:
return None
if self._values['parent'].startswith('/'):
parent = os.path.basename(self._values['parent'])
result = '/{0}/{1}'.format(self.partition, parent)
else:
result = '/{0}/{1}'.format(self.partition, self._values['parent'])
return result
@property
def type(self):
return 'tcp_echo'
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
result = self.__default(param)
return result
@property
def parent(self):
if self.want.parent != self.want.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.ip is None:
return None
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
changed[k] = change
if changed:
self.changes = Parameters(changed)
return True
return False
def _announce_deprecations(self):
warnings = []
if self.want:
warnings += self.want._values.get('__warnings', [])
if self.have:
warnings += self.have._values.get('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations()
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def create(self):
self._set_changed_options()
if self.want.timeout is None:
self.want.update({'timeout': 16})
if self.want.interval is None:
self.want.update({'interval': 5})
if self.want.time_until_up is None:
self.want.update({'time_until_up': 0})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.client.check_mode:
return True
self.create_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def absent(self):
if self.exists():
return self.remove()
return False
def remove(self):
if self.client.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the monitor.")
return True
def read_current_from_device(self):
resource = self.client.api.tm.ltm.monitor.tcp_echos.tcp_echo.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return Parameters(result)
def exists(self):
result = self.client.api.tm.ltm.monitor.tcp_echos.tcp_echo.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.ltm.monitor.tcp_echos.tcp_echo.load(
name=self.want.name,
partition=self.want.partition
)
result.modify(**params)
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.ltm.monitor.tcp_echos.tcp_echo.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove_from_device(self):
result = self.client.api.tm.ltm.monitor.tcp_echos.tcp_echo.load(
name=self.want.name,
partition=self.want.partition
)
if result:
result.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
name=dict(required=True),
parent=dict(),
ip=dict(),
interval=dict(type='int'),
timeout=dict(type='int'),
time_until_up=dict(type='int')
)
self.f5_product_name = 'bigip'
def main():
try:
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
if not HAS_NETADDR:
raise F5ModuleError("The python netaddr module is required")
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
sebrandon1/nova | nova/db/sqlalchemy/migrate_repo/versions/229_add_extra_resources_in_compute_nodes.py | 25 | 1358 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
# Add a new column extra_resources to save extra_resources info for
# compute nodes
compute_nodes = Table('compute_nodes', meta, autoload=True)
shadow_compute_nodes = Table('shadow_compute_nodes', meta, autoload=True)
extra_resources = Column('extra_resources', Text, nullable=True)
shadow_extra_resources = Column('extra_resources', Text, nullable=True)
compute_nodes.create_column(extra_resources)
shadow_compute_nodes.create_column(shadow_extra_resources)
| apache-2.0 |
danmergens/mi-instrument | mi/dataset/driver/moas/gl/dosta/driver_common.py | 7 | 1132 | ##
# OOIPLACEHOLDER
#
# Copyright 2014 Raytheon Co.
##
__author__ = "mworden"
from mi.core.log import get_logger
from mi.dataset.dataset_driver import DataSetDriver
from mi.dataset.parser.glider import GliderParser
class DostaAbcdjmGliderDriver:
def __init__(self, source_file_path, particle_data_handler, parser_config):
self._source_file_path = source_file_path
self._particle_data_handler = particle_data_handler
self._parser_config = parser_config
def process(self):
log = get_logger()
with open(self._source_file_path,"rb") as file_handle:
def exception_callback(exception):
log.debug("Exception: %s", exception)
self._particle_data_handler.setParticleDataCaptureFailure()
parser = GliderParser(self._parser_config,
file_handle,
exception_callback)
driver = DataSetDriver(parser, self._particle_data_handler)
driver.processFileStream()
return self._particle_data_handler
| bsd-2-clause |
syphar/django | tests/model_regress/models.py | 281 | 2293 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
CHOICES = (
(1, 'first'),
(2, 'second'),
)
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=100, default='Default headline')
pub_date = models.DateTimeField()
status = models.IntegerField(blank=True, null=True, choices=CHOICES)
misc_data = models.CharField(max_length=100, blank=True)
article_text = models.TextField()
class Meta:
ordering = ('pub_date', 'headline')
# A utf-8 verbose name (Ångström's Articles) to test they are valid.
verbose_name = "\xc3\x85ngstr\xc3\xb6m's Articles"
def __str__(self):
return self.headline
class Movie(models.Model):
# Test models with non-default primary keys / AutoFields #5218
movie_id = models.AutoField(primary_key=True)
name = models.CharField(max_length=60)
class Party(models.Model):
when = models.DateField(null=True)
class Event(models.Model):
when = models.DateTimeField()
@python_2_unicode_compatible
class Department(models.Model):
id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Worker(models.Model):
department = models.ForeignKey(Department, models.CASCADE)
name = models.CharField(max_length=200)
def __str__(self):
return self.name
@python_2_unicode_compatible
class BrokenUnicodeMethod(models.Model):
name = models.CharField(max_length=7)
def __str__(self):
# Intentionally broken (invalid start byte in byte string).
return b'Name\xff: %s'.decode() % self.name
class NonAutoPK(models.Model):
name = models.CharField(max_length=10, primary_key=True)
# Chained foreign keys with to_field produce incorrect query #18432
class Model1(models.Model):
pkey = models.IntegerField(unique=True, db_index=True)
class Model2(models.Model):
model1 = models.ForeignKey(Model1, models.CASCADE, unique=True, to_field='pkey')
class Model3(models.Model):
model2 = models.ForeignKey(Model2, models.CASCADE, unique=True, to_field='model1')
| bsd-3-clause |
lukeiwanski/tensorflow | tensorflow/python/ops/losses/losses.py | 61 | 1102 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Loss operations for use in neural networks.
Note: All the losses are added to the `GraphKeys.LOSSES` collection by default.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import
from tensorflow.python.ops.losses.losses_impl import *
from tensorflow.python.ops.losses.util import *
# pylint: enable=wildcard-import
| apache-2.0 |
5GExchange/escape | mininet/examples/test/test_multipoll.py | 2 | 1105 | #!/usr/bin/env python
"""
Test for multipoll.py
"""
import unittest
import pexpect
class testMultiPoll( unittest.TestCase ):
def testMultiPoll( self ):
"Verify that we receive one ping per second per host"
p = pexpect.spawn( 'python -m mininet.examples.multipoll' )
opts = [ "\*\*\* (h\d) :" ,
"(h\d+): \d+ bytes from",
"Monitoring output for (\d+) seconds",
pexpect.EOF ]
pings = {}
while True:
index = p.expect( opts )
if index == 0:
name = p.match.group( 1 )
pings[ name ] = 0
elif index == 1:
name = p.match.group( 1 )
pings[ name ] += 1
elif index == 2:
seconds = int( p.match.group( 1 ) )
else:
break
self.assertTrue( len( pings ) > 0 )
# make sure we have received at least one ping per second
for count in pings.values():
self.assertTrue( count >= seconds )
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
albertomurillo/ansible | lib/ansible/modules/network/f5/bigip_config.py | 15 | 13150 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_config
short_description: Manage BIG-IP configuration sections
description:
- Manages a BIG-IP configuration by allowing TMSH commands that
modify running configuration, or merge SCF formatted files into
the running configuration. Additionally, this module is of
significant importance because it allows you to save your running
configuration to disk. Since the F5 module only manipulate running
configuration, it is important that you utilize this module to save
that running config.
version_added: 2.4
options:
save:
description:
- The C(save) argument instructs the module to save the
running-config to startup-config.
- This operation is performed after any changes are made to the
current running config. If no changes are made, the configuration
is still saved to the startup config.
- This option will always cause the module to return changed.
type: bool
default: yes
reset:
description:
- Loads the default configuration on the device.
- If this option is specified, the default configuration will be
loaded before any commands or other provided configuration is run.
type: bool
default: no
merge_content:
description:
- Loads the specified configuration that you want to merge into
the running configuration. This is equivalent to using the
C(tmsh) command C(load sys config from-terminal merge).
- If you need to read configuration from a file or template, use
Ansible's C(file) or C(template) lookup plugins respectively.
verify:
description:
- Validates the specified configuration to see whether they are
valid to replace the running configuration.
- The running configuration will not be changed.
- When this parameter is set to C(yes), no change will be reported
by the module.
type: bool
default: no
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Save the running configuration of the BIG-IP
bigip_config:
save: yes
provider:
server: lb.mydomain.com
password: secret
user: admin
delegate_to: localhost
- name: Reset the BIG-IP configuration, for example, to RMA the device
bigip_config:
reset: yes
save: yes
provider:
server: lb.mydomain.com
password: secret
user: admin
delegate_to: localhost
- name: Load an SCF configuration
bigip_config:
merge_content: "{{ lookup('file', '/path/to/config.scf') }}"
provider:
server: lb.mydomain.com
password: secret
user: admin
delegate_to: localhost
'''
RETURN = r'''
stdout:
description: The set of responses from the options
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
'''
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import os
import tempfile
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.icontrol import upload_file
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.icontrol import upload_file
class Parameters(AnsibleF5Parameters):
returnables = ['stdout', 'stdout_lines']
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = Parameters(params=self.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(params=changed)
def _to_lines(self, stdout):
lines = list()
for item in stdout:
if isinstance(item, str):
item = str(item).split('\n')
lines.append(item)
return lines
def exec_module(self):
result = {}
changed = self.execute()
result.update(**self.changes.to_return())
result.update(dict(changed=changed))
return result
def execute(self):
responses = []
if self.want.reset:
response = self.reset()
responses.append(response)
if self.want.merge_content:
if self.want.verify:
response = self.merge(verify=True)
responses.append(response)
else:
response = self.merge(verify=False)
responses.append(response)
if self.want.save:
response = self.save()
responses.append(response)
self._detect_errors(responses)
changes = {
'stdout': responses,
'stdout_lines': self._to_lines(responses)
}
self.changes = Parameters(params=changes)
if self.want.verify:
return False
return True
def _detect_errors(self, stdout):
errors = [
'Unexpected Error:'
]
msg = [x for x in stdout for y in errors if y in x]
if msg:
# Error only contains the lines that include the error
raise F5ModuleError(' '.join(msg))
def reset(self):
if self.module.check_mode:
return True
return self.reset_device()
def reset_device(self):
command = 'tmsh load sys config default'
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='-c "{0}"'.format(command)
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'commandResult' in response:
return str(response['commandResult'])
def merge(self, verify=True):
temp_name = next(tempfile._get_candidate_names())
remote_path = "/var/config/rest/downloads/{0}".format(temp_name)
temp_path = '/tmp/' + temp_name
if self.module.check_mode:
return True
self.upload_to_device(temp_name)
self.move_on_device(remote_path)
response = self.merge_on_device(
remote_path=temp_path, verify=verify
)
self.remove_temporary_file(remote_path=temp_path)
return response
def merge_on_device(self, remote_path, verify=True):
command = 'tmsh load sys config file {0} merge'.format(
remote_path
)
if verify:
command += ' verify'
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='-c "{0}"'.format(command)
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'commandResult' in response:
return str(response['commandResult'])
def remove_temporary_file(self, remote_path):
uri = "https://{0}:{1}/mgmt/tm/util/unix-rm".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs=remote_path
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def move_on_device(self, remote_path):
uri = "https://{0}:{1}/mgmt/tm/util/unix-mv".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='{0} /tmp/{1}'.format(
remote_path, os.path.basename(remote_path)
)
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def upload_to_device(self, temp_name):
template = StringIO(self.want.merge_content)
url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
try:
upload_file(self.client, url, template, temp_name)
except F5ModuleError:
raise F5ModuleError(
"Failed to upload the file."
)
def save(self):
if self.module.check_mode:
return True
return self.save_on_device()
def save_on_device(self):
command = 'tmsh save sys config'
uri = "https://{0}:{1}/mgmt/tm/util/bash".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='-c "{0}"'.format(command)
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'commandResult' in response:
return str(response['commandResult'])
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
reset=dict(
type='bool',
default=False
),
merge_content=dict(),
verify=dict(
type='bool',
default=False
),
save=dict(
type='bool',
default='yes'
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
Beyond-Imagination/BlubBlub | ChatbotServer/ChatbotEnv/Lib/site-packages/konlpy/tag/_mecab.py | 1 | 3616 | #! /usr/bin/python
# -*- coding: utf-8 -*-
import sys
try:
from MeCab import Tagger
except ImportError:
pass
from .. import utils
__all__ = ['Mecab']
attrs = ['tags', # 품사 태그
'semantic', # 의미 부류
'has_jongsung', # 종성 유무
'read', # 읽기
'type', # 타입
'first_pos', # 첫번째 품사
'last_pos', # 마지막 품사
'original', # 원형
'indexed'] # 인덱스 표현
def parse(result, allattrs=False):
def split(elem):
if not elem: return ('', 'SY')
s, t = elem.split('\t')
return (s, t.split(',', 1)[0])
return [split(elem) for elem in result.splitlines()[:-1]]
class Mecab():
"""Wrapper for MeCab-ko morphological analyzer.
`MeCab`_, originally a Japanese morphological analyzer and POS tagger
developed by the Graduate School of Informatics in Kyoto University,
was modified to MeCab-ko by the `Eunjeon Project`_
to adapt to the Korean language.
In order to use MeCab-ko within KoNLPy, follow the directions in
:ref:`optional-installations`.
.. code-block:: python
:emphasize-lines: 1
>>> # MeCab installation needed
>>> from konlpy.tag import Mecab
>>> mecab = Mecab()
>>> print(mecab.morphs(u'영등포구청역에 있는 맛집 좀 알려주세요.'))
['영등포구', '청역', '에', '있', '는', '맛집', '좀', '알려', '주', '세요', '.']
>>> print(mecab.nouns(u'우리나라에는 무릎 치료를 잘하는 정형외과가 없는가!'))
['우리', '나라', '무릎', '치료', '정형외과']
>>> print(mecab.pos(u'자연주의 쇼핑몰은 어떤 곳인가?'))
[('자연', 'NNG'), ('주', 'NNG'), ('의', 'JKG'), ('쇼핑몰', 'NNG'), ('은', 'JX'), ('어떤', 'MM'), ('곳', 'NNG'), ('인가', 'VCP+EF'), ('?', 'SF')]
:param dicpath: The path of the MeCab-ko dictionary.
.. _MeCab: https://code.google.com/p/mecab/
.. _Eunjeon Project: http://eunjeon.blogspot.kr/
"""
# TODO: check whether flattened results equal non-flattened
def pos(self, phrase, flatten=True):
"""POS tagger.
:param flatten: If False, preserves eojeols.
"""
if sys.version_info[0] < 3:
phrase = phrase.encode('utf-8')
if flatten:
result = self.tagger.parse(phrase).decode('utf-8')
return parse(result)
else:
return [parse(self.tagger.parse(eojeol).decode('utf-8'))
for eojeol in phrase.split()]
else:
if flatten:
result = self.tagger.parse(phrase)
return parse(result)
else:
return [parse(self.tagger.parse(eojeol).decode('utf-8'))
for eojeol in phrase.split()]
def morphs(self, phrase):
"""Parse phrase to morphemes."""
return [s for s, t in self.pos(phrase)]
def nouns(self, phrase):
"""Noun extractor."""
tagged = self.pos(phrase)
return [s for s, t in tagged if t.startswith('N')]
def __init__(self, dicpath='/usr/local/lib/mecab/dic/mecab-ko-dic'):
try:
self.tagger = Tagger('-d %s' % dicpath)
self.tagset = utils.read_json('%s/data/tagset/mecab.json' % utils.installpath)
except RuntimeError:
raise Exception('Invalid MeCab dictionary path: "%s"\nInput the correct path when initiializing class: "Mecab(\'/some/dic/path\')"' % dicpath)
| gpl-3.0 |
CenturylinkTechnology/ansible-modules-extras | network/dnsimple.py | 16 | 11833 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: dnsimple
version_added: "1.6"
short_description: Interface with dnsimple.com (a DNS hosting service).
description:
- "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)"
options:
account_email:
description:
- "Account email. If omitted, the env variables DNSIMPLE_EMAIL and DNSIMPLE_API_TOKEN will be looked for. If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)"
required: false
default: null
account_api_token:
description:
- Account API token. See I(account_email) for info.
required: false
default: null
domain:
description:
- Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. If omitted, a list of domains will be returned.
- If domain is present but the domain doesn't exist, it will be created.
required: false
default: null
record:
description:
- Record to add, if blank a record for the domain will be created, supports the wildcard (*)
required: false
default: null
record_ids:
description:
- List of records to ensure they either exist or don't exist
required: false
default: null
type:
description:
- The type of DNS record to create
required: false
choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ]
default: null
ttl:
description:
- The TTL to give the new record
required: false
default: 3600 (one hour)
value:
description:
- Record value
- "Must be specified when trying to ensure a record exists"
required: false
default: null
priority:
description:
- Record priority
required: false
default: null
state:
description:
- whether the record should exist or not
required: false
choices: [ 'present', 'absent' ]
default: null
solo:
description:
- Whether the record should be the only one for that record type and record name. Only use with state=present on a record
required: false
default: null
requirements: [ dnsimple ]
author: "Alex Coomans (@drcapulet)"
'''
EXAMPLES = '''
# authenticate using email and API token
- local_action: dnsimple [email protected] account_api_token=dummyapitoken
# fetch all domains
- local_action dnsimple
register: domains
# fetch my.com domain records
- local_action: dnsimple domain=my.com state=present
register: records
# delete a domain
- local_action: dnsimple domain=my.com state=absent
# create a test.my.com A record to point to 127.0.0.01
- local_action: dnsimple domain=my.com record=test type=A value=127.0.0.1
register: record
# and then delete it
- local_action: dnsimple domain=my.com record_ids={{ record['id'] }}
# create a my.com CNAME record to example.com
- local_action: dnsimple domain=my.com record= type=CNAME value=example.com state=present
# change it's ttl
- local_action: dnsimple domain=my.com record= type=CNAME value=example.com ttl=600 state=present
# and delete the record
- local_action: dnsimpledomain=my.com record= type=CNAME value=example.com state=absent
'''
import os
try:
from dnsimple import DNSimple
from dnsimple.dnsimple import DNSimpleException
HAS_DNSIMPLE = True
except ImportError:
HAS_DNSIMPLE = False
def main():
module = AnsibleModule(
argument_spec = dict(
account_email = dict(required=False),
account_api_token = dict(required=False, no_log=True),
domain = dict(required=False),
record = dict(required=False),
record_ids = dict(required=False, type='list'),
type = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']),
ttl = dict(required=False, default=3600, type='int'),
value = dict(required=False),
priority = dict(required=False, type='int'),
state = dict(required=False, choices=['present', 'absent']),
solo = dict(required=False, type='bool'),
),
required_together = (
['record', 'value']
),
supports_check_mode = True,
)
if not HAS_DNSIMPLE:
module.fail_json(msg="dnsimple required for this module")
account_email = module.params.get('account_email')
account_api_token = module.params.get('account_api_token')
domain = module.params.get('domain')
record = module.params.get('record')
record_ids = module.params.get('record_ids')
record_type = module.params.get('type')
ttl = module.params.get('ttl')
value = module.params.get('value')
priority = module.params.get('priority')
state = module.params.get('state')
is_solo = module.params.get('solo')
if account_email and account_api_token:
client = DNSimple(email=account_email, api_token=account_api_token)
elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'):
client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN'))
else:
client = DNSimple()
try:
# Let's figure out what operation we want to do
# No domain, return a list
if not domain:
domains = client.domains()
module.exit_json(changed=False, result=[d['domain'] for d in domains])
# Domain & No record
if domain and record is None and not record_ids:
domains = [d['domain'] for d in client.domains()]
if domain.isdigit():
dr = next((d for d in domains if d['id'] == int(domain)), None)
else:
dr = next((d for d in domains if d['name'] == domain), None)
if state == 'present':
if dr:
module.exit_json(changed=False, result=dr)
else:
if module.check_mode:
module.exit_json(changed=True)
else:
module.exit_json(changed=True, result=client.add_domain(domain)['domain'])
elif state == 'absent':
if dr:
if not module.check_mode:
client.delete(domain)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
else:
module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
# need the not none check since record could be an empty string
if domain and record is not None:
records = [r['record'] for r in client.records(str(domain))]
if not record_type:
module.fail_json(msg="Missing the record type")
if not value:
module.fail_json(msg="Missing the record value")
rr = next((r for r in records if r['name'] == record and r['record_type'] == record_type and r['content'] == value), None)
if state == 'present':
changed = False
if is_solo:
# delete any records that have the same name and record type
same_type = [r['id'] for r in records if r['name'] == record and r['record_type'] == record_type]
if rr:
same_type = [rid for rid in same_type if rid != rr['id']]
if same_type:
if not module.check_mode:
for rid in same_type:
client.delete_record(str(domain), rid)
changed = True
if rr:
# check if we need to update
if rr['ttl'] != ttl or rr['prio'] != priority:
data = {}
if ttl: data['ttl'] = ttl
if priority: data['prio'] = priority
if module.check_mode:
module.exit_json(changed=True)
else:
module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record'])
else:
module.exit_json(changed=changed, result=rr)
else:
# create it
data = {
'name': record,
'record_type': record_type,
'content': value,
}
if ttl: data['ttl'] = ttl
if priority: data['prio'] = priority
if module.check_mode:
module.exit_json(changed=True)
else:
module.exit_json(changed=True, result=client.add_record(str(domain), data)['record'])
elif state == 'absent':
if rr:
if not module.check_mode:
client.delete_record(str(domain), rr['id'])
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
else:
module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
# Make sure these record_ids either all exist or none
if domain and record_ids:
current_records = [str(r['record']['id']) for r in client.records(str(domain))]
wanted_records = [str(r) for r in record_ids]
if state == 'present':
difference = list(set(wanted_records) - set(current_records))
if difference:
module.fail_json(msg="Missing the following records: %s" % difference)
else:
module.exit_json(changed=False)
elif state == 'absent':
difference = list(set(wanted_records) & set(current_records))
if difference:
if not module.check_mode:
for rid in difference:
client.delete_record(str(domain), rid)
module.exit_json(changed=True)
else:
module.exit_json(changed=False)
else:
module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
except DNSimpleException:
e = get_exception()
module.fail_json(msg="Unable to contact DNSimple: %s" % e.message)
module.fail_json(msg="Unknown what you wanted me to do")
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
main()
| gpl-3.0 |
cherylyli/stress-aid | env/lib/python3.5/site-packages/pymongo/server_selectors.py | 20 | 5307 | # Copyright 2014-2016 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Criteria to select some ServerDescriptions from a TopologyDescription."""
from pymongo.server_type import SERVER_TYPE
class Selection(object):
"""Input or output of a server selector function."""
@classmethod
def from_topology_description(cls, topology_description):
known_servers = topology_description.known_servers
primary = None
for sd in known_servers:
if sd.server_type == SERVER_TYPE.RSPrimary:
primary = sd
break
return Selection(topology_description,
topology_description.known_servers,
topology_description.common_wire_version,
primary)
def __init__(self,
topology_description,
server_descriptions,
common_wire_version,
primary):
self.topology_description = topology_description
self.server_descriptions = server_descriptions
self.primary = primary
self.common_wire_version = common_wire_version
def with_server_descriptions(self, server_descriptions):
return Selection(self.topology_description,
server_descriptions,
self.common_wire_version,
self.primary)
def secondary_with_max_last_write_date(self):
secondaries = secondary_server_selector(self)
if secondaries.server_descriptions:
return max(secondaries.server_descriptions,
key=lambda sd: sd.last_write_date)
@property
def primary_selection(self):
primaries = [self.primary] if self.primary else []
return self.with_server_descriptions(primaries)
@property
def heartbeat_frequency(self):
return self.topology_description.heartbeat_frequency
@property
def topology_type(self):
return self.topology_description.topology_type
def __bool__(self):
return bool(self.server_descriptions)
__nonzero__ = __bool__ # Python 2.
def __getitem__(self, item):
return self.server_descriptions[item]
def any_server_selector(selection):
return selection
def readable_server_selector(selection):
return selection.with_server_descriptions(
[s for s in selection.server_descriptions if s.is_readable])
def writable_server_selector(selection):
return selection.with_server_descriptions(
[s for s in selection.server_descriptions if s.is_writable])
def secondary_server_selector(selection):
return selection.with_server_descriptions(
[s for s in selection.server_descriptions
if s.server_type == SERVER_TYPE.RSSecondary])
def arbiter_server_selector(selection):
return selection.with_server_descriptions(
[s for s in selection.server_descriptions
if s.server_type == SERVER_TYPE.RSArbiter])
def writable_preferred_server_selector(selection):
"""Like PrimaryPreferred but doesn't use tags or latency."""
return (writable_server_selector(selection) or
secondary_server_selector(selection))
def apply_single_tag_set(tag_set, selection):
"""All servers matching one tag set.
A tag set is a dict. A server matches if its tags are a superset:
A server tagged {'a': '1', 'b': '2'} matches the tag set {'a': '1'}.
The empty tag set {} matches any server.
"""
def tags_match(server_tags):
for key, value in tag_set.items():
if key not in server_tags or server_tags[key] != value:
return False
return True
return selection.with_server_descriptions(
[s for s in selection.server_descriptions if tags_match(s.tags)])
def apply_tag_sets(tag_sets, selection):
"""All servers match a list of tag sets.
tag_sets is a list of dicts. The empty tag set {} matches any server,
and may be provided at the end of the list as a fallback. So
[{'a': 'value'}, {}] expresses a preference for servers tagged
{'a': 'value'}, but accepts any server if none matches the first
preference.
"""
for tag_set in tag_sets:
with_tag_set = apply_single_tag_set(tag_set, selection)
if with_tag_set:
return with_tag_set
return selection.with_server_descriptions([])
def secondary_with_tags_server_selector(tag_sets, selection):
"""All near-enough secondaries matching the tag sets."""
return apply_tag_sets(tag_sets, secondary_server_selector(selection))
def member_with_tags_server_selector(tag_sets, selection):
"""All near-enough members matching the tag sets."""
return apply_tag_sets(tag_sets, readable_server_selector(selection))
| mit |
mcanthony/rethinkdb | external/v8_3.30.33.16/build/gyp/test/mac/gyptest-installname.py | 244 | 2512 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that LD_DYLIB_INSTALL_NAME and DYLIB_INSTALL_NAME_BASE are handled
correctly.
"""
import TestGyp
import re
import subprocess
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'installname'
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', test.ALL, chdir=CHDIR)
def GetInstallname(p):
p = test.built_file_path(p, chdir=CHDIR)
r = re.compile(r'cmd LC_ID_DYLIB.*?name (.*?) \(offset \d+\)', re.DOTALL)
proc = subprocess.Popen(['otool', '-l', p], stdout=subprocess.PIPE)
o = proc.communicate()[0]
assert not proc.returncode
m = r.search(o)
assert m
return m.group(1)
if (GetInstallname('libdefault_installname.dylib') !=
'/usr/local/lib/libdefault_installname.dylib'):
test.fail_test()
if (GetInstallname('My Framework.framework/My Framework') !=
'/Library/Frameworks/My Framework.framework/'
'Versions/A/My Framework'):
test.fail_test()
if (GetInstallname('libexplicit_installname.dylib') !=
'Trapped in a dynamiclib factory'):
test.fail_test()
if (GetInstallname('libexplicit_installname_base.dylib') !=
'@executable_path/../../../libexplicit_installname_base.dylib'):
test.fail_test()
if (GetInstallname('My Other Framework.framework/My Other Framework') !=
'@executable_path/../../../My Other Framework.framework/'
'Versions/A/My Other Framework'):
test.fail_test()
if (GetInstallname('libexplicit_installname_with_base.dylib') !=
'/usr/local/lib/libexplicit_installname_with_base.dylib'):
test.fail_test()
if (GetInstallname('libexplicit_installname_with_explicit_base.dylib') !=
'@executable_path/../libexplicit_installname_with_explicit_base.dylib'):
test.fail_test()
if (GetInstallname('libboth_base_and_installname.dylib') !=
'Still trapped in a dynamiclib factory'):
test.fail_test()
if (GetInstallname('install_name_with_info_plist.framework/'
'install_name_with_info_plist') !=
'/Library/Frameworks/install_name_with_info_plist.framework/'
'Versions/A/install_name_with_info_plist'):
test.fail_test()
if ('DYLIB_INSTALL_NAME_BASE:standardizepath: command not found' in
test.stdout()):
test.fail_test()
test.pass_test()
| agpl-3.0 |
jmptrader/dirigible-spreadsheet | dirigible/sheet/tests/parser/test_fl_cell_reference_parse_node.py | 2 | 6314 | # Copyright (c) 2005-2010 Resolver Systems Ltd, PythonAnywhere LLP
# See LICENSE.md
#
try:
import unittest2 as unittest
except ImportError:
import unittest
from sheet.parser.parse_node import ParseNode
from sheet.parser.fl_cell_reference_parse_node import FLCellReferenceParseNode
from sheet.parser.fl_reference_parse_node import FLReferenceParseNode
class FLCellReferenceParseNodeTest(unittest.TestCase):
def testConstructor(self):
flCellReference = FLCellReferenceParseNode(["A1"])
self.assertTrue(isinstance(flCellReference, FLReferenceParseNode), 'should be a parse node')
self.assertEquals(flCellReference.type, ParseNode.FL_CELL_REFERENCE, "Node was of the wrong type")
self.assertEquals(flCellReference.children, ["A1"], "Node had the wrong children")
def testStr(self):
node = FLCellReferenceParseNode(["a1"])
self.assertEquals(str(node), "<FLCellReferenceParseNode type=\"FL_CELL_REFERENCE\" children=['a1']>", "Wrong string representation")
def testColAbsolute(self):
self.assertFalse(FLCellReferenceParseNode(["A1"]).colAbsolute, "Incorrect colAbsolute for A1")
self.assertFalse(FLCellReferenceParseNode(["A$1"]).colAbsolute, "Incorrect colAbsolute for A$1")
self.assertTrue(FLCellReferenceParseNode(["$A1"]).colAbsolute, "Incorrect colAbsolute for $A1")
self.assertTrue(FLCellReferenceParseNode(["$A$1"]).colAbsolute, "Incorrect colAbsolute for $A$1")
self.assertFalse(FLCellReferenceParseNode(["SheetSomething", "! ", "A1"]).colAbsolute,
"Incorrect colAbsolute for A1 with worksheet")
self.assertTrue(FLCellReferenceParseNode(["SheetSomething", "! ", "$A$1"]).colAbsolute,
"Incorrect colAbsolute for $A$1 with worksheet")
def testRowAbsolute(self):
self.assertFalse(FLCellReferenceParseNode(["A1"]).rowAbsolute, "Incorrect rowAbsolute for A1")
self.assertTrue(FLCellReferenceParseNode(["A$1"]).rowAbsolute, "Incorrect rowAbsolute for A$1")
self.assertFalse(FLCellReferenceParseNode(["$A1"]).rowAbsolute, "Incorrect rowAbsolute for $A1")
self.assertTrue(FLCellReferenceParseNode(["$A$1"]).rowAbsolute, "Incorrect rowAbsolute for $A$1")
self.assertFalse(FLCellReferenceParseNode(["SheetSomething", "! ", "A1"]).rowAbsolute,
"Incorrect colAbsolute for A1 with worksheet")
self.assertTrue(FLCellReferenceParseNode(["SheetSomething", "! ", "$A$1"]).rowAbsolute,
"Incorrect colAbsolute for $A$1 with worksheet")
def testPlainCellName(self):
self.assertEquals(FLCellReferenceParseNode(["A1"]).plainCellName, "A1", "Incorrect plainCellName for A1")
self.assertEquals(FLCellReferenceParseNode(["A$1"]).plainCellName, "A1", "Incorrect plainCellName for A$1")
self.assertEquals(FLCellReferenceParseNode(["$A1"]).plainCellName, "A1", "Incorrect plainCellName for $A1")
self.assertEquals(FLCellReferenceParseNode(["$A$1"]).plainCellName, "A1", "Incorrect plainCellName for $A$1")
self.assertEquals(FLCellReferenceParseNode(["SheetSomething", "! ", "A1"]).plainCellName, "A1",
"Incorrect plainCellName for A1 with worksheet")
self.assertEquals(FLCellReferenceParseNode(["SheetSomething", "! ", "$A$1"]).plainCellName, "A1",
"Incorrect plainCellName for $A$1 with worksheet")
def testRegisteredWithParse(self):
"test registered with ParseNode"
self.assertEquals(type(ParseNode.construct_node(ParseNode.FL_CELL_REFERENCE, ['A1'])), FLCellReferenceParseNode,
"Class is not registered with ParseNode")
def testCellProperty(self):
node = FLCellReferenceParseNode(["G8 "])
self.assertEquals(node.localReference, "G8 ", "cellref wrong")
node = FLCellReferenceParseNode(["Sheet1", "!", "G8 "])
self.assertEquals(node.localReference, "G8 ", "cellref wrong")
node = FLCellReferenceParseNode(["G8 "])
node.localReference = "F5"
self.assertEquals(node.localReference, "F5", "should discard whitespace")
node = FLCellReferenceParseNode(["G8 "])
node.localReference = "F5 "
self.assertEquals(node.localReference, "F5 ", "should not pile whitespace")
def testCanonicalise(self):
node = FLCellReferenceParseNode(["bertie ", "!", "a1 "])
node.canonicalise(['Bertie'])
self.assertEquals(node.localReference, 'A1 ')
self.assertEquals(node.worksheetReference, 'Bertie')
def testOffset(self):
node = FLCellReferenceParseNode(["G8 "])
node.offset(1, 4)
self.assertEquals(node.localReference, "H12 ", "offset didnt work")
node = FLCellReferenceParseNode(["G8 "])
node.offset(-7, 1)
self.assertEquals(node.localReference, "#Invalid! ", "offset didnt work")
node = FLCellReferenceParseNode(["G8 "])
node.offset(1, -8)
self.assertEquals(node.localReference, "#Invalid! ", "offset didnt work")
node = FLCellReferenceParseNode(["G8 "])
node.offset(-6, -7)
self.assertEquals(node.localReference, "A1 ", "offset didnt work")
node = FLCellReferenceParseNode(["$G8 "])
node.offset(-6, -7)
self.assertEquals(node.localReference, "$G1 ", "offset didnt work")
node = FLCellReferenceParseNode(["G$8 "])
node.offset(-6, -7)
self.assertEquals(node.localReference, "A$8 ", "offset didnt work")
node = FLCellReferenceParseNode(["$G$8 "])
node.offset(-6, -7)
self.assertEquals(node.localReference, "$G$8 ", "offset didnt work")
node = FLCellReferenceParseNode(["$G$8 "])
node.offset(-6, -7, move_absolute=True)
self.assertEquals(node.localReference, "$A$1 ", "offset didnt work")
node = FLCellReferenceParseNode(["ZZZ9 "])
node.offset(1, -1)
self.assertEquals(node.localReference, "#Invalid! ", "offset didnt work")
def testCoords(self):
node = FLCellReferenceParseNode(["A2"])
self.assertEquals(node.coords, (1, 2))
node = FLCellReferenceParseNode(["B1"])
self.assertEquals(node.coords, (2, 1))
| mit |
cristiandima/highlights | highlights/extractive/erank.py | 1 | 3576 | """
This is in many ways identical to the textrank algorithms. The only difference
is that we expand the sentence graph to also include the title of the text,
the topics associated with the text, and the named entitites present
The output is still an importance score for each sentence in the original text
but these new nodes offer extra information and increase the weights of those
sentences which are more closely related to the topics/title/named entities
associated with the text
"""
import spacy
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from highlights.extractive.textrank import _textrank_scores
from highlights.internals.helpers import summary_length, NLP
_word_tokenize = TfidfVectorizer(stop_words='english').build_analyzer()
def _get_named_entities(nlp_doc):
""" Given a spacy document return the top ten most frequent name entities
present in the text. Name entities appearing only once are skipped.
Args:
nlp_doc (spacy document): document to extract named entities from
Returns:
a list of words, the most frequent named entities present in the document
"""
ignored_ents = {'DATE', 'TIME', 'PERCENT', 'MONEY', 'QUANTITY', 'ORDINAL', 'CARDINAL'}
ne = [n.text for n in nlp_doc.ents if n.label_ not in ignored_ents]
ne = [n.replace('the', '').strip() for n in ne]
ne = set(ne)
counter = CountVectorizer(ngram_range=(1,2))
counts = counter.fit_transform([nlp_doc.text])
ne_scores = []
for entity in ne:
entity = entity.lower()
if entity in counter.vocabulary_:
ne_scores.append((counts[0, counter.vocabulary_.get(entity)], entity))
ne_scores = sorted([n for n in ne_scores if n[0] != 1], reverse=True)[:10]
return [n[1] for n in ne_scores]
def _get_topics(nlp_doc, lda, word_dict, topic_terms):
""" Given a spacy document, as well as an lda model, this function returns
a list of lists where each list holds the string words associated with each
topic associated with the document
"""
doc_bow = word_dict.doc2bow(_word_tokenize(nlp_doc.text))
topics = lda.get_document_topics(doc_bow)
topics_as_words = []
for topic_tuple in topics:
topic_words = []
for word_tuple in topic_terms[topic_tuple[0]]:
topic_words.append(word_dict[word_tuple[0]])
topics_as_words.append(topic_words)
return topics_as_words
def _erank_scores(nlp_doc, topics, named_entities, title=None):
sentences = [sent.text for sent in nlp_doc.sents]
original_len = len(sentences)
for topic_words in topics:
sentences.append(' '.join(topic_words))
if len(named_entities) >= 1:
sentences.append(' '.join(named_entities))
if title is not None:
sentences.append(' '.join(_word_tokenize(title)))
scores = _textrank_scores(sentences)
scores = {i: scores.get(i, 0) for i in range(original_len)}
return scores
def erank(text, lda, word_dict, topic_terms, title=None, len_func=summary_length):
nlp_doc = NLP(text)
sentences = [sent.text for sent in nlp_doc.sents]
topics = _get_topics(nlp_doc, lda, word_dict, topic_terms)
named_entities = _get_named_entities(nlp_doc)
scores = _erank_scores(nlp_doc, topics, named_entities, title)
sum_len = len_func(len(scores))
sent_scores = [(scores[i], s) for i, s in enumerate(sentences)]
top_sentences = sorted(sent_scores, reverse=True)[:sum_len]
return [s[1] for s in top_sentences]
| mit |
SerialShadow/SickRage | autoProcessTV/lib/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py | 2360 | 3778 | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
| gpl-3.0 |
jbzdak/edx-platform | cms/djangoapps/contentstore/views/tests/test_course_index.py | 5 | 35844 | """
Unit tests for getting the list of courses and the course outline.
"""
import ddt
import json
import lxml
import datetime
import mock
import pytz
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext as _
from contentstore.courseware_index import CoursewareSearchIndexer, SearchIndexingError
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url, reverse_library_url, add_instructor, reverse_usage_url
from contentstore.views.course import (
course_outline_initial_state, reindex_course_and_check_access, _deprecated_blocks_info
)
from contentstore.views.item import create_xblock_info, VisibilityState
from course_action_state.managers import CourseRerunUIStateManager
from course_action_state.models import CourseRerunState
from opaque_keys.edx.locator import CourseLocator
from search.api import perform_search
from student.auth import has_course_author_access
from student.tests.factories import UserFactory
from util.date_utils import get_default_time_display
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.exceptions import ItemNotFoundError
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory, LibraryFactory
class TestCourseIndex(CourseTestCase):
"""
Unit tests for getting the list of courses and the course outline.
"""
def setUp(self):
"""
Add a course with odd characters in the fields
"""
super(TestCourseIndex, self).setUp()
# had a problem where index showed course but has_access failed to retrieve it for non-staff
self.odd_course = CourseFactory.create(
org='test.org_1-2',
number='test-2.3_course',
display_name='dotted.course.name-2',
)
def check_index_and_outline(self, authed_client):
"""
Test getting the list of courses and then pulling up their outlines
"""
index_url = '/home/'
index_response = authed_client.get(index_url, {}, HTTP_ACCEPT='text/html')
parsed_html = lxml.html.fromstring(index_response.content)
course_link_eles = parsed_html.find_class('course-link')
self.assertGreaterEqual(len(course_link_eles), 2)
for link in course_link_eles:
self.assertRegexpMatches(
link.get("href"),
'course/{}'.format(settings.COURSE_KEY_PATTERN)
)
# now test that url
outline_response = authed_client.get(link.get("href"), {}, HTTP_ACCEPT='text/html')
# ensure it has the expected 2 self referential links
outline_parsed = lxml.html.fromstring(outline_response.content)
outline_link = outline_parsed.find_class('course-link')[0]
self.assertEqual(outline_link.get("href"), link.get("href"))
course_menu_link = outline_parsed.find_class('nav-course-courseware-outline')[0]
self.assertEqual(course_menu_link.find("a").get("href"), link.get("href"))
def test_libraries_on_course_index(self):
"""
Test getting the list of libraries from the course listing page
"""
# Add a library:
lib1 = LibraryFactory.create()
index_url = '/home/'
index_response = self.client.get(index_url, {}, HTTP_ACCEPT='text/html')
parsed_html = lxml.html.fromstring(index_response.content)
library_link_elements = parsed_html.find_class('library-link')
self.assertEqual(len(library_link_elements), 1)
link = library_link_elements[0]
self.assertEqual(
link.get("href"),
reverse_library_url('library_handler', lib1.location.library_key),
)
# now test that url
outline_response = self.client.get(link.get("href"), {}, HTTP_ACCEPT='text/html')
self.assertEqual(outline_response.status_code, 200)
def test_is_staff_access(self):
"""
Test that people with is_staff see the courses and can navigate into them
"""
self.check_index_and_outline(self.client)
def test_negative_conditions(self):
"""
Test the error conditions for the access
"""
outline_url = reverse_course_url('course_handler', self.course.id)
# register a non-staff member and try to delete the course branch
non_staff_client, _ = self.create_non_staff_authed_user_client()
response = non_staff_client.delete(outline_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 403)
def test_course_staff_access(self):
"""
Make and register course_staff and ensure they can access the courses
"""
course_staff_client, course_staff = self.create_non_staff_authed_user_client()
for course in [self.course, self.odd_course]:
permission_url = reverse_course_url('course_team_handler', course.id, kwargs={'email': course_staff.email})
self.client.post(
permission_url,
data=json.dumps({"role": "staff"}),
content_type="application/json",
HTTP_ACCEPT="application/json",
)
# test access
self.check_index_and_outline(course_staff_client)
def test_json_responses(self):
outline_url = reverse_course_url('course_handler', self.course.id)
chapter = ItemFactory.create(parent_location=self.course.location, category='chapter', display_name="Week 1")
lesson = ItemFactory.create(parent_location=chapter.location, category='sequential', display_name="Lesson 1")
subsection = ItemFactory.create(
parent_location=lesson.location,
category='vertical',
display_name='Subsection 1'
)
ItemFactory.create(parent_location=subsection.location, category="video", display_name="My Video")
resp = self.client.get(outline_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
# First spot check some values in the root response
self.assertEqual(json_response['category'], 'course')
self.assertEqual(json_response['id'], unicode(self.course.location))
self.assertEqual(json_response['display_name'], self.course.display_name)
self.assertTrue(json_response['published'])
self.assertIsNone(json_response['visibility_state'])
# Now verify the first child
children = json_response['child_info']['children']
self.assertTrue(len(children) > 0)
first_child_response = children[0]
self.assertEqual(first_child_response['category'], 'chapter')
self.assertEqual(first_child_response['id'], unicode(chapter.location))
self.assertEqual(first_child_response['display_name'], 'Week 1')
self.assertTrue(json_response['published'])
self.assertEqual(first_child_response['visibility_state'], VisibilityState.unscheduled)
self.assertTrue(len(first_child_response['child_info']['children']) > 0)
# Finally, validate the entire response for consistency
self.assert_correct_json_response(json_response)
def test_notifications_handler_get(self):
state = CourseRerunUIStateManager.State.FAILED
action = CourseRerunUIStateManager.ACTION
should_display = True
# try when no notification exists
notification_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': 1,
})
resp = self.client.get(notification_url, HTTP_ACCEPT='application/json')
# verify that we get an empty dict out
self.assertEquals(resp.status_code, 400)
# create a test notification
rerun_state = CourseRerunState.objects.update_state(
course_key=self.course.id,
new_state=state,
allow_not_found=True
)
CourseRerunState.objects.update_should_display(
entry_id=rerun_state.id,
user=UserFactory(),
should_display=should_display
)
# try to get information on this notification
notification_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': rerun_state.id,
})
resp = self.client.get(notification_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
self.assertEquals(json_response['state'], state)
self.assertEquals(json_response['action'], action)
self.assertEquals(json_response['should_display'], should_display)
def test_notifications_handler_dismiss(self):
state = CourseRerunUIStateManager.State.FAILED
should_display = True
rerun_course_key = CourseLocator(org='testx', course='test_course', run='test_run')
# add an instructor to this course
user2 = UserFactory()
add_instructor(rerun_course_key, self.user, user2)
# create a test notification
rerun_state = CourseRerunState.objects.update_state(
course_key=rerun_course_key,
new_state=state,
allow_not_found=True
)
CourseRerunState.objects.update_should_display(
entry_id=rerun_state.id,
user=user2,
should_display=should_display
)
# try to get information on this notification
notification_dismiss_url = reverse_course_url('course_notifications_handler', self.course.id, kwargs={
'action_state_id': rerun_state.id,
})
resp = self.client.delete(notification_dismiss_url)
self.assertEquals(resp.status_code, 200)
with self.assertRaises(CourseRerunState.DoesNotExist):
# delete nofications that are dismissed
CourseRerunState.objects.get(id=rerun_state.id)
self.assertFalse(has_course_author_access(user2, rerun_course_key))
def assert_correct_json_response(self, json_response):
"""
Asserts that the JSON response is syntactically consistent
"""
self.assertIsNotNone(json_response['display_name'])
self.assertIsNotNone(json_response['id'])
self.assertIsNotNone(json_response['category'])
self.assertTrue(json_response['published'])
if json_response.get('child_info', None):
for child_response in json_response['child_info']['children']:
self.assert_correct_json_response(child_response)
def test_course_updates_invalid_url(self):
"""
Tests the error conditions for the invalid course updates URL.
"""
# Testing the response code by passing slash separated course id whose format is valid but no course
# having this id exists.
invalid_course_key = '{}_blah_blah_blah'.format(self.course.id)
course_updates_url = reverse_course_url('course_info_handler', invalid_course_key)
response = self.client.get(course_updates_url)
self.assertEqual(response.status_code, 404)
# Testing the response code by passing split course id whose format is valid but no course
# having this id exists.
split_course_key = CourseLocator(org='orgASD', course='course_01213', run='Run_0_hhh_hhh_hhh')
course_updates_url_split = reverse_course_url('course_info_handler', split_course_key)
response = self.client.get(course_updates_url_split)
self.assertEqual(response.status_code, 404)
# Testing the response by passing split course id whose format is invalid.
invalid_course_id = 'invalid.course.key/{}'.format(split_course_key)
course_updates_url_split = reverse_course_url('course_info_handler', invalid_course_id)
response = self.client.get(course_updates_url_split)
self.assertEqual(response.status_code, 404)
def test_course_index_invalid_url(self):
"""
Tests the error conditions for the invalid course index URL.
"""
# Testing the response code by passing slash separated course key, no course
# having this key exists.
invalid_course_key = '{}_some_invalid_run'.format(self.course.id)
course_outline_url = reverse_course_url('course_handler', invalid_course_key)
response = self.client.get_html(course_outline_url)
self.assertEqual(response.status_code, 404)
# Testing the response code by passing split course key, no course
# having this key exists.
split_course_key = CourseLocator(org='invalid_org', course='course_01111', run='Run_0_invalid')
course_outline_url_split = reverse_course_url('course_handler', split_course_key)
response = self.client.get_html(course_outline_url_split)
self.assertEqual(response.status_code, 404)
@ddt.ddt
class TestCourseOutline(CourseTestCase):
"""
Unit tests for the course outline.
"""
def setUp(self):
"""
Set up the for the course outline tests.
"""
super(TestCourseOutline, self).setUp()
self.chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Week 1"
)
self.sequential = ItemFactory.create(
parent_location=self.chapter.location, category='sequential', display_name="Lesson 1"
)
self.vertical = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Subsection 1'
)
self.video = ItemFactory.create(
parent_location=self.vertical.location, category="video", display_name="My Video"
)
def test_json_responses(self):
"""
Verify the JSON responses returned for the course.
"""
outline_url = reverse_course_url('course_handler', self.course.id)
resp = self.client.get(outline_url, HTTP_ACCEPT='application/json')
json_response = json.loads(resp.content)
# First spot check some values in the root response
self.assertEqual(json_response['category'], 'course')
self.assertEqual(json_response['id'], unicode(self.course.location))
self.assertEqual(json_response['display_name'], self.course.display_name)
self.assertTrue(json_response['published'])
self.assertIsNone(json_response['visibility_state'])
# Now verify the first child
children = json_response['child_info']['children']
self.assertTrue(len(children) > 0)
first_child_response = children[0]
self.assertEqual(first_child_response['category'], 'chapter')
self.assertEqual(first_child_response['id'], unicode(self.chapter.location))
self.assertEqual(first_child_response['display_name'], 'Week 1')
self.assertTrue(json_response['published'])
self.assertEqual(first_child_response['visibility_state'], VisibilityState.unscheduled)
self.assertTrue(len(first_child_response['child_info']['children']) > 0)
# Finally, validate the entire response for consistency
self.assert_correct_json_response(json_response)
def assert_correct_json_response(self, json_response):
"""
Asserts that the JSON response is syntactically consistent
"""
self.assertIsNotNone(json_response['display_name'])
self.assertIsNotNone(json_response['id'])
self.assertIsNotNone(json_response['category'])
self.assertTrue(json_response['published'])
if json_response.get('child_info', None):
for child_response in json_response['child_info']['children']:
self.assert_correct_json_response(child_response)
def test_course_outline_initial_state(self):
course_module = modulestore().get_item(self.course.location)
course_structure = create_xblock_info(
course_module,
include_child_info=True,
include_children_predicate=lambda xblock: not xblock.category == 'vertical'
)
# Verify that None is returned for a non-existent locator
self.assertIsNone(course_outline_initial_state('no-such-locator', course_structure))
# Verify that the correct initial state is returned for the test chapter
chapter_locator = unicode(self.chapter.location)
initial_state = course_outline_initial_state(chapter_locator, course_structure)
self.assertEqual(initial_state['locator_to_show'], chapter_locator)
expanded_locators = initial_state['expanded_locators']
self.assertIn(unicode(self.sequential.location), expanded_locators)
self.assertIn(unicode(self.vertical.location), expanded_locators)
def test_start_date_on_page(self):
"""
Verify that the course start date is included on the course outline page.
"""
def _get_release_date(response):
"""Return the release date from the course page"""
parsed_html = lxml.html.fromstring(response.content)
return parsed_html.find_class('course-status')[0].find_class('status-release-value')[0].text_content()
def _assert_settings_link_present(response):
"""
Asserts there's a course settings link on the course page by the course release date.
"""
parsed_html = lxml.html.fromstring(response.content)
settings_link = parsed_html.find_class('course-status')[0].find_class('action-edit')[0].find('a')
self.assertIsNotNone(settings_link)
self.assertEqual(settings_link.get('href'), reverse_course_url('settings_handler', self.course.id))
outline_url = reverse_course_url('course_handler', self.course.id)
response = self.client.get(outline_url, {}, HTTP_ACCEPT='text/html')
# A course with the default release date should display as "Unscheduled"
self.assertEqual(_get_release_date(response), 'Unscheduled')
_assert_settings_link_present(response)
self.course.start = datetime.datetime(2014, 1, 1, tzinfo=pytz.utc)
modulestore().update_item(self.course, ModuleStoreEnum.UserID.test)
response = self.client.get(outline_url, {}, HTTP_ACCEPT='text/html')
self.assertEqual(_get_release_date(response), get_default_time_display(self.course.start))
_assert_settings_link_present(response)
def _create_test_data(self, course_module, create_blocks=False, publish=True, block_types=None):
"""
Create data for test.
"""
if create_blocks:
for block_type in block_types:
ItemFactory.create(
parent_location=self.vertical.location,
category=block_type,
display_name='{} Problem'.format(block_type)
)
if not publish:
self.store.unpublish(self.vertical.location, self.user.id)
course_module.advanced_modules.extend(block_types)
def _verify_deprecated_info(self, course_id, advanced_modules, info, deprecated_block_types):
"""
Verify deprecated info.
"""
expected_blocks = []
for block_type in deprecated_block_types:
expected_blocks.append(
[
reverse_usage_url('container_handler', self.vertical.location),
'{} Problem'.format(block_type)
]
)
self.assertEqual(info['block_types'], deprecated_block_types)
self.assertEqual(
info['block_types_enabled'],
any(component in advanced_modules for component in deprecated_block_types)
)
self.assertItemsEqual(info['blocks'], expected_blocks)
self.assertEqual(
info['advance_settings_url'],
reverse_course_url('advanced_settings_handler', course_id)
)
@ddt.data(
{'publish': True},
{'publish': False},
)
@ddt.unpack
def test_verify_deprecated_warning_message_with_single_feature(self, publish):
"""
Verify deprecated warning info for single deprecated feature.
"""
block_types = settings.DEPRECATED_BLOCK_TYPES
course_module = modulestore().get_item(self.course.location)
self._create_test_data(course_module, create_blocks=True, block_types=block_types, publish=publish)
info = _deprecated_blocks_info(course_module, block_types)
self._verify_deprecated_info(
course_module.id,
course_module.advanced_modules,
info,
block_types
)
def test_verify_deprecated_warning_message_with_multiple_features(self):
"""
Verify deprecated warning info for multiple deprecated features.
"""
block_types = ['peergrading', 'combinedopenended', 'openassessment']
course_module = modulestore().get_item(self.course.location)
self._create_test_data(course_module, create_blocks=True, block_types=block_types)
info = _deprecated_blocks_info(course_module, block_types)
self._verify_deprecated_info(course_module.id, course_module.advanced_modules, info, block_types)
@ddt.data(
{'delete_vertical': True},
{'delete_vertical': False},
)
@ddt.unpack
def test_deprecated_blocks_list_updated_correctly(self, delete_vertical):
"""
Verify that deprecated blocks list shown on banner is updated correctly.
Here is the scenario:
This list of deprecated blocks shown on banner contains published
and un-published blocks. That list should be updated when we delete
un-published block(s). This behavior should be same if we delete
unpublished vertical or problem.
"""
block_types = ['peergrading']
course_module = modulestore().get_item(self.course.location)
vertical1 = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Vert1 Subsection1'
)
problem1 = ItemFactory.create(
parent_location=vertical1.location,
category='peergrading',
display_name='peergrading problem in vert1',
publish_item=False
)
info = _deprecated_blocks_info(course_module, block_types)
# info['blocks'] should be empty here because there is nothing
# published or un-published present
self.assertEqual(info['blocks'], [])
vertical2 = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Vert2 Subsection1'
)
ItemFactory.create(
parent_location=vertical2.location,
category='peergrading',
display_name='peergrading problem in vert2',
pubish_item=True
)
# At this point CourseStructure will contain both the above
# published and un-published verticals
info = _deprecated_blocks_info(course_module, block_types)
self.assertItemsEqual(
info['blocks'],
[
[reverse_usage_url('container_handler', vertical1.location), 'peergrading problem in vert1'],
[reverse_usage_url('container_handler', vertical2.location), 'peergrading problem in vert2']
]
)
# Delete the un-published vertical or problem so that CourseStructure updates its data
if delete_vertical:
self.store.delete_item(vertical1.location, self.user.id)
else:
self.store.delete_item(problem1.location, self.user.id)
info = _deprecated_blocks_info(course_module, block_types)
# info['blocks'] should only contain the info about vertical2 which is published.
# There shouldn't be any info present about un-published vertical1
self.assertEqual(
info['blocks'],
[[reverse_usage_url('container_handler', vertical2.location), 'peergrading problem in vert2']]
)
class TestCourseReIndex(CourseTestCase):
"""
Unit tests for the course outline.
"""
SUCCESSFUL_RESPONSE = _("Course has been successfully reindexed.")
def setUp(self):
"""
Set up the for the course outline tests.
"""
super(TestCourseReIndex, self).setUp()
self.course.start = datetime.datetime(2014, 1, 1, tzinfo=pytz.utc)
modulestore().update_item(self.course, self.user.id)
self.chapter = ItemFactory.create(
parent_location=self.course.location, category='chapter', display_name="Week 1"
)
self.sequential = ItemFactory.create(
parent_location=self.chapter.location, category='sequential', display_name="Lesson 1"
)
self.vertical = ItemFactory.create(
parent_location=self.sequential.location, category='vertical', display_name='Subsection 1'
)
self.video = ItemFactory.create(
parent_location=self.vertical.location, category="video", display_name="My Video"
)
self.html = ItemFactory.create(
parent_location=self.vertical.location, category="html", display_name="My HTML",
data="<div>This is my unique HTML content</div>",
)
def test_reindex_course(self):
"""
Verify that course gets reindexed.
"""
index_url = reverse_course_url('course_search_index_handler', self.course.id)
response = self.client.get(index_url, {}, HTTP_ACCEPT='application/json')
# A course with the default release date should display as "Unscheduled"
self.assertIn(self.SUCCESSFUL_RESPONSE, response.content)
self.assertEqual(response.status_code, 200)
response = self.client.post(index_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.content, '')
self.assertEqual(response.status_code, 405)
self.client.logout()
response = self.client.get(index_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 302)
def test_negative_conditions(self):
"""
Test the error conditions for the access
"""
index_url = reverse_course_url('course_search_index_handler', self.course.id)
# register a non-staff member and try to delete the course branch
non_staff_client, _ = self.create_non_staff_authed_user_client()
response = non_staff_client.get(index_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 403)
def test_empty_content_type(self):
"""
Test json content type is set if '' is selected
"""
index_url = reverse_course_url('course_search_index_handler', self.course.id)
response = self.client.get(index_url, {}, CONTENT_TYPE='')
# A course with the default release date should display as "Unscheduled"
self.assertIn(self.SUCCESSFUL_RESPONSE, response.content)
self.assertEqual(response.status_code, 200)
@mock.patch('xmodule.html_module.HtmlDescriptor.index_dictionary')
def test_reindex_course_search_index_error(self, mock_index_dictionary):
"""
Test json response with mocked error data for html
"""
# set mocked exception response
err = SearchIndexingError
mock_index_dictionary.return_value = err
index_url = reverse_course_url('course_search_index_handler', self.course.id)
# Start manual reindex and check error in response
response = self.client.get(index_url, {}, HTTP_ACCEPT='application/json')
self.assertEqual(response.status_code, 500)
def test_reindex_json_responses(self):
"""
Test json response with real data
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# Start manual reindex
reindex_course_and_check_access(self.course.id, self.user)
# Check results remain the same
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
@mock.patch('xmodule.video_module.VideoDescriptor.index_dictionary')
def test_reindex_video_error_json_responses(self, mock_index_dictionary):
"""
Test json response with mocked error data for video
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = SearchIndexingError
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
reindex_course_and_check_access(self.course.id, self.user)
@mock.patch('xmodule.html_module.HtmlDescriptor.index_dictionary')
def test_reindex_html_error_json_responses(self, mock_index_dictionary):
"""
Test json response with mocked error data for html
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = SearchIndexingError
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
reindex_course_and_check_access(self.course.id, self.user)
@mock.patch('xmodule.seq_module.SequenceDescriptor.index_dictionary')
def test_reindex_seq_error_json_responses(self, mock_index_dictionary):
"""
Test json response with mocked error data for sequence
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = Exception
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
reindex_course_and_check_access(self.course.id, self.user)
@mock.patch('xmodule.modulestore.mongo.base.MongoModuleStore.get_course')
def test_reindex_no_item(self, mock_get_course):
"""
Test system logs an error if no item found.
"""
# set mocked exception response
err = ItemNotFoundError
mock_get_course.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
reindex_course_and_check_access(self.course.id, self.user)
def test_reindex_no_permissions(self):
# register a non-staff member and try to delete the course branch
user2 = UserFactory()
with self.assertRaises(PermissionDenied):
reindex_course_and_check_access(self.course.id, user2)
def test_indexing_responses(self):
"""
Test do_course_reindex response with real data
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# Start manual reindex
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
# Check results are the same following reindex
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
@mock.patch('xmodule.video_module.VideoDescriptor.index_dictionary')
def test_indexing_video_error_responses(self, mock_index_dictionary):
"""
Test do_course_reindex response with mocked error data for video
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = Exception
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
@mock.patch('xmodule.html_module.HtmlDescriptor.index_dictionary')
def test_indexing_html_error_responses(self, mock_index_dictionary):
"""
Test do_course_reindex response with mocked error data for html
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = Exception
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
@mock.patch('xmodule.seq_module.SequenceDescriptor.index_dictionary')
def test_indexing_seq_error_responses(self, mock_index_dictionary):
"""
Test do_course_reindex response with mocked error data for sequence
"""
# results are indexed because they are published from ItemFactory
response = perform_search(
"unique",
user=self.user,
size=10,
from_=0,
course_id=unicode(self.course.id))
self.assertEqual(response['total'], 1)
# set mocked exception response
err = Exception
mock_index_dictionary.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
@mock.patch('xmodule.modulestore.mongo.base.MongoModuleStore.get_course')
def test_indexing_no_item(self, mock_get_course):
"""
Test system logs an error if no item found.
"""
# set mocked exception response
err = ItemNotFoundError
mock_get_course.return_value = err
# Start manual reindex and check error in response
with self.assertRaises(SearchIndexingError):
CoursewareSearchIndexer.do_course_reindex(modulestore(), self.course.id)
| agpl-3.0 |
ujenmr/ansible | lib/ansible/modules/database/misc/kibana_plugin.py | 52 | 7252 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Thierno IB. BARRY @barryib
# Sponsored by Polyconseil http://polyconseil.fr.
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: kibana_plugin
short_description: Manage Kibana plugins
description:
- This module can be used to manage Kibana plugins.
version_added: "2.2"
author: Thierno IB. BARRY (@barryib)
options:
name:
description:
- Name of the plugin to install.
required: True
state:
description:
- Desired state of a plugin.
choices: ["present", "absent"]
default: present
url:
description:
- Set exact URL to download the plugin from.
- For local file, prefix its absolute path with file://
timeout:
description:
- "Timeout setting: 30s, 1m, 1h etc."
default: 1m
plugin_bin:
description:
- Location of the Kibana binary.
default: /opt/kibana/bin/kibana
plugin_dir:
description:
- Your configured plugin directory specified in Kibana.
default: /opt/kibana/installedPlugins/
version:
description:
- Version of the plugin to be installed.
- If plugin exists with previous version, plugin will NOT be updated unless C(force) is set to yes.
force:
description:
- Delete and re-install the plugin. Can be useful for plugins update.
type: bool
default: 'no'
'''
EXAMPLES = '''
- name: Install Elasticsearch head plugin
kibana_plugin:
state: present
name: elasticsearch/marvel
- name: Install specific version of a plugin
kibana_plugin:
state: present
name: elasticsearch/marvel
version: '2.3.3'
- name: Uninstall Elasticsearch head plugin
kibana_plugin:
state: absent
name: elasticsearch/marvel
'''
RETURN = '''
cmd:
description: the launched command during plugin management (install / remove)
returned: success
type: str
name:
description: the plugin name to install or remove
returned: success
type: str
url:
description: the url from where the plugin is installed from
returned: success
type: str
timeout:
description: the timeout for plugin download
returned: success
type: str
stdout:
description: the command stdout
returned: success
type: str
stderr:
description: the command stderr
returned: success
type: str
state:
description: the state for the managed plugin
returned: success
type: str
'''
import os
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule
PACKAGE_STATE_MAP = dict(
present="--install",
absent="--remove"
)
def parse_plugin_repo(string):
elements = string.split("/")
# We first consider the simplest form: pluginname
repo = elements[0]
# We consider the form: username/pluginname
if len(elements) > 1:
repo = elements[1]
# remove elasticsearch- prefix
# remove es- prefix
for string in ("elasticsearch-", "es-"):
if repo.startswith(string):
return repo[len(string):]
return repo
def is_plugin_present(plugin_dir, working_dir):
return os.path.isdir(os.path.join(working_dir, plugin_dir))
def parse_error(string):
reason = "reason: "
try:
return string[string.index(reason) + len(reason):].strip()
except ValueError:
return string
def install_plugin(module, plugin_bin, plugin_name, url, timeout, kibana_version='4.6'):
if LooseVersion(kibana_version) > LooseVersion('4.6'):
kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
cmd_args = [kibana_plugin_bin, "install"]
if url:
cmd_args.append(url)
else:
cmd_args.append(plugin_name)
else:
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["present"], plugin_name]
if url:
cmd_args.append("--url %s" % url)
if timeout:
cmd_args.append("--timeout %s" % timeout)
cmd = " ".join(cmd_args)
if module.check_mode:
return True, cmd, "check mode", ""
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def remove_plugin(module, plugin_bin, plugin_name, kibana_version='4.6'):
if LooseVersion(kibana_version) > LooseVersion('4.6'):
kibana_plugin_bin = os.path.join(os.path.dirname(plugin_bin), 'kibana-plugin')
cmd_args = [kibana_plugin_bin, "remove", plugin_name]
else:
cmd_args = [plugin_bin, "plugin", PACKAGE_STATE_MAP["absent"], plugin_name]
cmd = " ".join(cmd_args)
if module.check_mode:
return True, cmd, "check mode", ""
rc, out, err = module.run_command(cmd)
if rc != 0:
reason = parse_error(out)
module.fail_json(msg=reason)
return True, cmd, out, err
def get_kibana_version(module, plugin_bin):
cmd_args = [plugin_bin, '--version']
cmd = " ".join(cmd_args)
rc, out, err = module.run_command(cmd)
if rc != 0:
module.fail_json(msg="Failed to get Kibana version : %s" % err)
return out.strip()
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(default="present", choices=PACKAGE_STATE_MAP.keys()),
url=dict(default=None),
timeout=dict(default="1m"),
plugin_bin=dict(default="/opt/kibana/bin/kibana", type="path"),
plugin_dir=dict(default="/opt/kibana/installedPlugins/", type="path"),
version=dict(default=None),
force=dict(default="no", type="bool")
),
supports_check_mode=True,
)
name = module.params["name"]
state = module.params["state"]
url = module.params["url"]
timeout = module.params["timeout"]
plugin_bin = module.params["plugin_bin"]
plugin_dir = module.params["plugin_dir"]
version = module.params["version"]
force = module.params["force"]
changed, cmd, out, err = False, '', '', ''
kibana_version = get_kibana_version(module, plugin_bin)
present = is_plugin_present(parse_plugin_repo(name), plugin_dir)
# skip if the state is correct
if (present and state == "present" and not force) or (state == "absent" and not present and not force):
module.exit_json(changed=False, name=name, state=state)
if version:
name = name + '/' + version
if state == "present":
if force:
remove_plugin(module, plugin_bin, name)
changed, cmd, out, err = install_plugin(module, plugin_bin, name, url, timeout, kibana_version)
elif state == "absent":
changed, cmd, out, err = remove_plugin(module, plugin_bin, name, kibana_version)
module.exit_json(changed=changed, cmd=cmd, name=name, state=state, url=url, timeout=timeout, stdout=out, stderr=err)
if __name__ == '__main__':
main()
| gpl-3.0 |
nhejazi/scikit-learn | sklearn/neural_network/_base.py | 50 | 6856 | """Utilities for the neural network modules
"""
# Author: Issam H. Laradji <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy.special import expit as logistic_sigmoid
def identity(X):
"""Simply return the input array.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Same as the input data.
"""
return X
def logistic(X):
"""Compute the logistic function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
X_new : {array-like, sparse matrix}, shape (n_samples, n_features)
The transformed data.
"""
return logistic_sigmoid(X, out=X)
def tanh(X):
"""Compute the hyperbolic tan function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
X_new : {array-like, sparse matrix}, shape (n_samples, n_features)
The transformed data.
"""
return np.tanh(X, out=X)
def relu(X):
"""Compute the rectified linear unit function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
X_new : {array-like, sparse matrix}, shape (n_samples, n_features)
The transformed data.
"""
np.clip(X, 0, np.finfo(X.dtype).max, out=X)
return X
def softmax(X):
"""Compute the K-way softmax function inplace.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
X_new : {array-like, sparse matrix}, shape (n_samples, n_features)
The transformed data.
"""
tmp = X - X.max(axis=1)[:, np.newaxis]
np.exp(tmp, out=X)
X /= X.sum(axis=1)[:, np.newaxis]
return X
ACTIVATIONS = {'identity': identity, 'tanh': tanh, 'logistic': logistic,
'relu': relu, 'softmax': softmax}
def inplace_identity_derivative(Z, delta):
"""Apply the derivative of the identity function: do nothing.
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The data which was output from the identity activation function during
the forward pass.
delta : {array-like}, shape (n_samples, n_features)
The backpropagated error signal to be modified inplace.
"""
# Nothing to do
def inplace_logistic_derivative(Z, delta):
"""Apply the derivative of the logistic sigmoid function.
It exploits the fact that the derivative is a simple function of the output
value from logistic function.
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The data which was output from the logistic activation function during
the forward pass.
delta : {array-like}, shape (n_samples, n_features)
The backpropagated error signal to be modified inplace.
"""
delta *= Z
delta *= (1 - Z)
def inplace_tanh_derivative(Z, delta):
"""Apply the derivative of the hyperbolic tanh function.
It exploits the fact that the derivative is a simple function of the output
value from hyperbolic tangent.
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The data which was output from the hyperbolic tangent activation
function during the forward pass.
delta : {array-like}, shape (n_samples, n_features)
The backpropagated error signal to be modified inplace.
"""
delta *= (1 - Z ** 2)
def inplace_relu_derivative(Z, delta):
"""Apply the derivative of the relu function.
It exploits the fact that the derivative is a simple function of the output
value from rectified linear units activation function.
Parameters
----------
Z : {array-like, sparse matrix}, shape (n_samples, n_features)
The data which was output from the rectified linear units activation
function during the forward pass.
delta : {array-like}, shape (n_samples, n_features)
The backpropagated error signal to be modified inplace.
"""
delta[Z == 0] = 0
DERIVATIVES = {'identity': inplace_identity_derivative,
'tanh': inplace_tanh_derivative,
'logistic': inplace_logistic_derivative,
'relu': inplace_relu_derivative}
def squared_loss(y_true, y_pred):
"""Compute the squared loss for regression.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) values.
y_pred : array-like or label indicator matrix
Predicted values, as returned by a regression estimator.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
return ((y_true - y_pred) ** 2).mean() / 2
def log_loss(y_true, y_prob):
"""Compute Logistic loss for classification.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_prob : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
y_prob = np.clip(y_prob, 1e-10, 1 - 1e-10)
if y_prob.shape[1] == 1:
y_prob = np.append(1 - y_prob, y_prob, axis=1)
if y_true.shape[1] == 1:
y_true = np.append(1 - y_true, y_true, axis=1)
return -np.sum(y_true * np.log(y_prob)) / y_prob.shape[0]
def binary_log_loss(y_true, y_prob):
"""Compute binary logistic loss for classification.
This is identical to log_loss in binary classification case,
but is kept for its use in multilabel case.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels.
y_prob : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
Returns
-------
loss : float
The degree to which the samples are correctly predicted.
"""
y_prob = np.clip(y_prob, 1e-10, 1 - 1e-10)
return -np.sum(y_true * np.log(y_prob) +
(1 - y_true) * np.log(1 - y_prob)) / y_prob.shape[0]
LOSS_FUNCTIONS = {'squared_loss': squared_loss, 'log_loss': log_loss,
'binary_log_loss': binary_log_loss}
| bsd-3-clause |
isyippee/oslo.messaging | oslo_messaging/_drivers/protocols/amqp/drivertasks.py | 7 | 4058 | # Copyright 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import threading
import time
from oslo_messaging._drivers.protocols.amqp import controller
from oslo_messaging import exceptions
from six import moves
LOG = logging.getLogger(__name__)
class SendTask(controller.Task):
"""A task that sends a message to a target, and optionally waits for a
reply message. The caller may block until the remote confirms receipt or
the reply message has arrived.
"""
def __init__(self, target, request, wait_for_reply, deadline):
super(SendTask, self).__init__()
self._target = target
self._request = request
self._deadline = deadline
self._wait_for_reply = wait_for_reply
self._results_queue = moves.queue.Queue()
def wait(self, timeout):
"""Wait for the send to complete, and, optionally, a reply message from
the remote. Will raise MessagingTimeout if the send does not complete
or no reply is received within timeout seconds. If the request has
failed for any other reason, a MessagingException is raised."
"""
try:
result = self._results_queue.get(timeout=timeout)
except moves.queue.Empty:
if self._wait_for_reply:
reason = "Timed out waiting for a reply."
else:
reason = "Timed out waiting for send to complete."
raise exceptions.MessagingTimeout(reason)
if result["status"] == "OK":
return result.get("response", None)
raise result["error"]
def execute(self, controller):
"""Runs on eventloop thread - sends request."""
if not self._deadline or self._deadline > time.time():
controller.request(self._target, self._request,
self._results_queue, self._wait_for_reply)
else:
LOG.warn("Send request to %s aborted: TTL expired.", self._target)
class ListenTask(controller.Task):
"""A task that creates a subscription to the given target. Messages
arriving from the target are given to the listener.
"""
def __init__(self, target, listener, notifications=False):
"""Create a subscription to the target."""
super(ListenTask, self).__init__()
self._target = target
self._listener = listener
self._notifications = notifications
def execute(self, controller):
"""Run on the eventloop thread - subscribes to target. Inbound messages
are queued to the listener's incoming queue.
"""
if self._notifications:
controller.subscribe_notifications(self._target,
self._listener.incoming)
else:
controller.subscribe(self._target, self._listener.incoming)
class ReplyTask(controller.Task):
"""A task that sends 'response' message to 'address'.
"""
def __init__(self, address, response, log_failure):
super(ReplyTask, self).__init__()
self._address = address
self._response = response
self._log_failure = log_failure
self._wakeup = threading.Event()
def wait(self):
"""Wait for the controller to send the message.
"""
self._wakeup.wait()
def execute(self, controller):
"""Run on the eventloop thread - send the response message."""
controller.response(self._address, self._response)
self._wakeup.set()
| apache-2.0 |
amenonsen/ansible | lib/ansible/modules/storage/netapp/netapp_e_facts.py | 4 | 27761 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: netapp_e_facts
short_description: NetApp E-Series retrieve facts about NetApp E-Series storage arrays
description:
- The netapp_e_facts module returns a collection of facts regarding NetApp E-Series storage arrays.
- When contacting a storage array directly the collection includes details about the array, controllers, management
interfaces, hostside interfaces, driveside interfaces, disks, storage pools, volumes, snapshots, and features.
- When contacting a web services proxy the collection will include basic information regarding the storage systems
that are under its management.
version_added: '2.2'
author:
- Kevin Hulquest (@hulquest)
- Nathan Swartz (@ndswartz)
extends_documentation_fragment:
- netapp.eseries
'''
EXAMPLES = """
---
- name: Get array facts
netapp_e_facts:
ssid: "{{ netapp_array_id }}"
api_url: "https://{{ netapp_e_api_host }}:8443/devmgr/v2"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
validate_certs: "{{ netapp_api_validate_certs }}"
- name: Get array facts
netapp_e_facts:
ssid: 1
api_url: https://192.168.1.100:8443/devmgr/v2
api_username: myApiUser
api_password: myApiPass
validate_certs: true
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample:
- Gathered facts for storage array. Array ID [1].
- Gathered facts for web services proxy.
storage_array_facts:
description: provides details about the array, controllers, management interfaces, hostside interfaces,
driveside interfaces, disks, storage pools, volumes, snapshots, and features.
returned: on successful inquiry from from embedded web services rest api
type: complex
contains:
netapp_controllers:
description: storage array controller list that contains basic controller identification and status
type: complex
sample:
- [{"name": "A", "serial": "021632007299", "status": "optimal"},
{"name": "B", "serial": "021632007300", "status": "failed"}]
netapp_disks:
description: drive list that contains identification, type, and status information for each drive
type: complex
sample:
- [{"available": false,
"firmware_version": "MS02",
"id": "01000000500003960C8B67880000000000000000",
"media_type": "ssd",
"product_id": "PX02SMU080 ",
"serial_number": "15R0A08LT2BA",
"status": "optimal",
"tray_ref": "0E00000000000000000000000000000000000000",
"usable_bytes": "799629205504" }]
netapp_driveside_interfaces:
description: drive side interface list that contains identification, type, and speed for each interface
type: complex
sample:
- [{ "controller": "A", "interface_speed": "12g", "interface_type": "sas" }]
- [{ "controller": "B", "interface_speed": "10g", "interface_type": "iscsi" }]
netapp_enabled_features:
description: specifies the enabled features on the storage array.
returned: on success
type: complex
sample:
- [ "flashReadCache", "performanceTier", "protectionInformation", "secureVolume" ]
netapp_host_groups:
description: specifies the host groups on the storage arrays.
returned: on success
type: complex
sample:
- [{ "id": "85000000600A098000A4B28D003610705C40B964", "name": "group1" }]
netapp_hosts:
description: specifies the hosts on the storage arrays.
returned: on success
type: complex
sample:
- [{ "id": "8203800000000000000000000000000000000000",
"name": "host1",
"group_id": "85000000600A098000A4B28D003610705C40B964",
"host_type_index": 28,
"ports": [{ "type": "fc", "address": "1000FF7CFFFFFF01", "label": "FC_1" },
{ "type": "fc", "address": "1000FF7CFFFFFF00", "label": "FC_2" }]}]
netapp_host_types:
description: lists the available host types on the storage array.
returned: on success
type: complex
sample:
- [{ "index": 0, "type": "FactoryDefault" },
{ "index": 1, "type": "W2KNETNCL"},
{ "index": 2, "type": "SOL" },
{ "index": 5, "type": "AVT_4M" },
{ "index": 6, "type": "LNX" },
{ "index": 7, "type": "LnxALUA" },
{ "index": 8, "type": "W2KNETCL" },
{ "index": 9, "type": "AIX MPIO" },
{ "index": 10, "type": "VmwTPGSALUA" },
{ "index": 15, "type": "HPXTPGS" },
{ "index": 17, "type": "SolTPGSALUA" },
{ "index": 18, "type": "SVC" },
{ "index": 22, "type": "MacTPGSALUA" },
{ "index": 23, "type": "WinTPGSALUA" },
{ "index": 24, "type": "LnxTPGSALUA" },
{ "index": 25, "type": "LnxTPGSALUA_PM" },
{ "index": 26, "type": "ONTAP_ALUA" },
{ "index": 27, "type": "LnxTPGSALUA_SF" },
{ "index": 28, "type": "LnxDHALUA" },
{ "index": 29, "type": "ATTOClusterAllOS" }]
netapp_hostside_interfaces:
description: host side interface list that contains identification, configuration, type, speed, and
status information for each interface
type: complex
sample:
- [{"iscsi":
[{ "controller": "A",
"current_interface_speed": "10g",
"ipv4_address": "10.10.10.1",
"ipv4_enabled": true,
"ipv4_gateway": "10.10.10.1",
"ipv4_subnet_mask": "255.255.255.0",
"ipv6_enabled": false,
"iqn": "iqn.1996-03.com.netapp:2806.600a098000a81b6d0000000059d60c76",
"link_status": "up",
"mtu": 9000,
"supported_interface_speeds": [ "10g" ] }]}]
netapp_management_interfaces:
description: management interface list that contains identification, configuration, and status for
each interface
type: complex
sample:
- [{"alias": "ict-2800-A",
"channel": 1,
"controller": "A",
"dns_config_method": "dhcp",
"dns_servers": [],
"ipv4_address": "10.1.1.1",
"ipv4_address_config_method": "static",
"ipv4_enabled": true,
"ipv4_gateway": "10.113.1.1",
"ipv4_subnet_mask": "255.255.255.0",
"ipv6_enabled": false,
"link_status": "up",
"mac_address": "00A098A81B5D",
"name": "wan0",
"ntp_config_method": "disabled",
"ntp_servers": [],
"remote_ssh_access": false }]
netapp_storage_array:
description: provides storage array identification, firmware version, and available capabilities
type: dict
sample:
- {"chassis_serial": "021540006043",
"firmware": "08.40.00.01",
"name": "ict-2800-11_40",
"wwn": "600A098000A81B5D0000000059D60C76",
"cacheBlockSizes": [4096,
8192,
16384,
32768],
"supportedSegSizes": [8192,
16384,
32768,
65536,
131072,
262144,
524288]}
netapp_storage_pools:
description: storage pool list that contains identification and capacity information for each pool
type: complex
sample:
- [{"available_capacity": "3490353782784",
"id": "04000000600A098000A81B5D000002B45A953A61",
"name": "Raid6",
"total_capacity": "5399466745856",
"used_capacity": "1909112963072" }]
netapp_volumes:
description: storage volume list that contains identification and capacity information for each volume
type: complex
sample:
- [{"capacity": "5368709120",
"id": "02000000600A098000AAC0C3000002C45A952BAA",
"is_thin_provisioned": false,
"name": "5G",
"parent_storage_pool_id": "04000000600A098000A81B5D000002B45A953A61" }]
netapp_workload_tags:
description: workload tag list
type: complex
sample:
- [{"id": "87e19568-43fb-4d8d-99ea-2811daaa2b38",
"name": "ftp_server",
"workloadAttributes": [{"key": "use",
"value": "general"}]}]
netapp_volumes_by_initiators:
description: list of available volumes keyed by the mapped initiators.
type: complex
sample:
- {"192_168_1_1": [{"id": "02000000600A098000A4B9D1000015FD5C8F7F9E",
"meta_data": {"filetype": "xfs", "public": true},
"name": "some_volume",
"workload_name": "test2_volumes",
"wwn": "600A098000A4B9D1000015FD5C8F7F9E"}]}
snapshot_images:
description: snapshot image list that contains identification, capacity, and status information for each
snapshot image
type: complex
sample:
- [{"active_cow": true,
"creation_method": "user",
"id": "34000000600A098000A81B5D00630A965B0535AC",
"pit_capacity": "5368709120",
"reposity_cap_utilization": "0",
"rollback_source": false,
"status": "optimal" }]
"""
from re import match
from pprint import pformat
from ansible.module_utils.netapp import NetAppESeriesModule
class Facts(NetAppESeriesModule):
def __init__(self):
web_services_version = "02.00.0000.0000"
super(Facts, self).__init__(ansible_options={},
web_services_version=web_services_version,
supports_check_mode=True)
def get_controllers(self):
"""Retrieve a mapping of controller references to their labels."""
controllers = list()
try:
rc, controllers = self.request('storage-systems/%s/graph/xpath-filter?query=/controller/id' % self.ssid)
except Exception as err:
self.module.fail_json(
msg="Failed to retrieve controller list! Array Id [%s]. Error [%s]."
% (self.ssid, str(err)))
controllers.sort()
controllers_dict = {}
i = ord('A')
for controller in controllers:
label = chr(i)
controllers_dict[controller] = label
i += 1
return controllers_dict
def get_array_facts(self):
"""Extract particular facts from the storage array graph"""
facts = dict(facts_from_proxy=False, ssid=self.ssid)
controller_reference_label = self.get_controllers()
array_facts = None
# Get the storage array graph
try:
rc, array_facts = self.request("storage-systems/%s/graph" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to obtain facts from storage array with id [%s]. Error [%s]"
% (self.ssid, str(error)))
facts['netapp_storage_array'] = dict(
name=array_facts['sa']['saData']['storageArrayLabel'],
chassis_serial=array_facts['sa']['saData']['chassisSerialNumber'],
firmware=array_facts['sa']['saData']['fwVersion'],
wwn=array_facts['sa']['saData']['saId']['worldWideName'],
segment_sizes=array_facts['sa']['featureParameters']['supportedSegSizes'],
cache_block_sizes=array_facts['sa']['featureParameters']['cacheBlockSizes'])
facts['netapp_controllers'] = [
dict(
name=controller_reference_label[controller['controllerRef']],
serial=controller['serialNumber'].strip(),
status=controller['status'],
) for controller in array_facts['controller']]
facts['netapp_host_groups'] = [
dict(
id=group['id'],
name=group['name']
) for group in array_facts['storagePoolBundle']['cluster']]
facts['netapp_hosts'] = [
dict(
group_id=host['clusterRef'],
hosts_reference=host['hostRef'],
id=host['id'],
name=host['name'],
host_type_index=host['hostTypeIndex'],
posts=host['hostSidePorts']
) for host in array_facts['storagePoolBundle']['host']]
facts['netapp_host_types'] = [
dict(
type=host_type['hostType'],
index=host_type['index']
) for host_type in array_facts['sa']['hostSpecificVals']
if 'hostType' in host_type.keys() and host_type['hostType']
# This conditional ignores zero-length strings which indicates that the associated host-specific NVSRAM region has been cleared.
]
facts['snapshot_images'] = [
dict(
id=snapshot['id'],
status=snapshot['status'],
pit_capacity=snapshot['pitCapacity'],
creation_method=snapshot['creationMethod'],
reposity_cap_utilization=snapshot['repositoryCapacityUtilization'],
active_cow=snapshot['activeCOW'],
rollback_source=snapshot['isRollbackSource']
) for snapshot in array_facts['highLevelVolBundle']['pit']]
facts['netapp_disks'] = [
dict(
id=disk['id'],
available=disk['available'],
media_type=disk['driveMediaType'],
status=disk['status'],
usable_bytes=disk['usableCapacity'],
tray_ref=disk['physicalLocation']['trayRef'],
product_id=disk['productID'],
firmware_version=disk['firmwareVersion'],
serial_number=disk['serialNumber'].lstrip()
) for disk in array_facts['drive']]
facts['netapp_management_interfaces'] = [
dict(controller=controller_reference_label[controller['controllerRef']],
name=iface['ethernet']['interfaceName'],
alias=iface['ethernet']['alias'],
channel=iface['ethernet']['channel'],
mac_address=iface['ethernet']['macAddr'],
remote_ssh_access=iface['ethernet']['rloginEnabled'],
link_status=iface['ethernet']['linkStatus'],
ipv4_enabled=iface['ethernet']['ipv4Enabled'],
ipv4_address_config_method=iface['ethernet']['ipv4AddressConfigMethod'].lower().replace("config", ""),
ipv4_address=iface['ethernet']['ipv4Address'],
ipv4_subnet_mask=iface['ethernet']['ipv4SubnetMask'],
ipv4_gateway=iface['ethernet']['ipv4GatewayAddress'],
ipv6_enabled=iface['ethernet']['ipv6Enabled'],
dns_config_method=iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsAcquisitionType'],
dns_servers=(iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers']
if iface['ethernet']['dnsProperties']['acquisitionProperties']['dnsServers'] else []),
ntp_config_method=iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpAcquisitionType'],
ntp_servers=(iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers']
if iface['ethernet']['ntpProperties']['acquisitionProperties']['ntpServers'] else [])
) for controller in array_facts['controller'] for iface in controller['netInterfaces']]
facts['netapp_hostside_interfaces'] = [
dict(
fc=[dict(controller=controller_reference_label[controller['controllerRef']],
channel=iface['fibre']['channel'],
link_status=iface['fibre']['linkStatus'],
current_interface_speed=strip_interface_speed(iface['fibre']['currentInterfaceSpeed']),
maximum_interface_speed=strip_interface_speed(iface['fibre']['maximumInterfaceSpeed']))
for controller in array_facts['controller']
for iface in controller['hostInterfaces']
if iface['interfaceType'] == 'fc'],
ib=[dict(controller=controller_reference_label[controller['controllerRef']],
channel=iface['ib']['channel'],
link_status=iface['ib']['linkState'],
mtu=iface['ib']['maximumTransmissionUnit'],
current_interface_speed=strip_interface_speed(iface['ib']['currentSpeed']),
maximum_interface_speed=strip_interface_speed(iface['ib']['supportedSpeed']))
for controller in array_facts['controller']
for iface in controller['hostInterfaces']
if iface['interfaceType'] == 'ib'],
iscsi=[dict(controller=controller_reference_label[controller['controllerRef']],
iqn=iface['iscsi']['iqn'],
link_status=iface['iscsi']['interfaceData']['ethernetData']['linkStatus'],
ipv4_enabled=iface['iscsi']['ipv4Enabled'],
ipv4_address=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4Address'],
ipv4_subnet_mask=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4SubnetMask'],
ipv4_gateway=iface['iscsi']['ipv4Data']['ipv4AddressData']['ipv4GatewayAddress'],
ipv6_enabled=iface['iscsi']['ipv6Enabled'],
mtu=iface['iscsi']['interfaceData']['ethernetData']['maximumFramePayloadSize'],
current_interface_speed=strip_interface_speed(iface['iscsi']['interfaceData']
['ethernetData']['currentInterfaceSpeed']),
supported_interface_speeds=strip_interface_speed(iface['iscsi']['interfaceData']
['ethernetData']
['supportedInterfaceSpeeds']))
for controller in array_facts['controller']
for iface in controller['hostInterfaces']
if iface['interfaceType'] == 'iscsi'],
sas=[dict(controller=controller_reference_label[controller['controllerRef']],
channel=iface['sas']['channel'],
current_interface_speed=strip_interface_speed(iface['sas']['currentInterfaceSpeed']),
maximum_interface_speed=strip_interface_speed(iface['sas']['maximumInterfaceSpeed']),
link_status=iface['sas']['iocPort']['state'])
for controller in array_facts['controller']
for iface in controller['hostInterfaces']
if iface['interfaceType'] == 'sas'])]
facts['netapp_driveside_interfaces'] = [
dict(
controller=controller_reference_label[controller['controllerRef']],
interface_type=interface['interfaceType'],
interface_speed=strip_interface_speed(
interface[interface['interfaceType']]['maximumInterfaceSpeed']
if (interface['interfaceType'] == 'sata' or
interface['interfaceType'] == 'sas' or
interface['interfaceType'] == 'fibre')
else (
interface[interface['interfaceType']]['currentSpeed']
if interface['interfaceType'] == 'ib'
else (
interface[interface['interfaceType']]['interfaceData']['maximumInterfaceSpeed']
if interface['interfaceType'] == 'iscsi' else 'unknown'
))),
)
for controller in array_facts['controller']
for interface in controller['driveInterfaces']]
facts['netapp_storage_pools'] = [
dict(
id=storage_pool['id'],
name=storage_pool['name'],
available_capacity=storage_pool['freeSpace'],
total_capacity=storage_pool['totalRaidedSpace'],
used_capacity=storage_pool['usedSpace']
) for storage_pool in array_facts['volumeGroup']]
all_volumes = list(array_facts['volume'])
facts['netapp_volumes'] = [
dict(
id=v['id'],
name=v['name'],
parent_storage_pool_id=v['volumeGroupRef'],
capacity=v['capacity'],
is_thin_provisioned=v['thinProvisioned'],
workload=v['metadata'],
) for v in all_volumes]
workload_tags = None
try:
rc, workload_tags = self.request("storage-systems/%s/workloads" % self.ssid)
except Exception as error:
self.module.fail_json(msg="Failed to retrieve workload tags. Array [%s]." % self.ssid)
facts['netapp_workload_tags'] = [
dict(
id=workload_tag['id'],
name=workload_tag['name'],
attributes=workload_tag['workloadAttributes']
) for workload_tag in workload_tags]
# Create a dictionary of volume lists keyed by host names
facts['netapp_volumes_by_initiators'] = dict()
for mapping in array_facts['storagePoolBundle']['lunMapping']:
for host in facts['netapp_hosts']:
if mapping['mapRef'] == host['hosts_reference'] or mapping['mapRef'] == host['group_id']:
if host['name'] not in facts['netapp_volumes_by_initiators'].keys():
facts['netapp_volumes_by_initiators'].update({host['name']: []})
for volume in all_volumes:
if mapping['id'] in [volume_mapping['id'] for volume_mapping in volume['listOfMappings']]:
# Determine workload name if there is one
workload_name = ""
metadata = dict()
for volume_tag in volume['metadata']:
if volume_tag['key'] == 'workloadId':
for workload_tag in facts['netapp_workload_tags']:
if volume_tag['value'] == workload_tag['id']:
workload_name = workload_tag['name']
metadata = dict((entry['key'], entry['value'])
for entry in workload_tag['attributes']
if entry['key'] != 'profileId')
facts['netapp_volumes_by_initiators'][host['name']].append(
dict(name=volume['name'],
id=volume['id'],
wwn=volume['wwn'],
workload_name=workload_name,
meta_data=metadata))
features = [feature for feature in array_facts['sa']['capabilities']]
features.extend([feature['capability'] for feature in array_facts['sa']['premiumFeatures']
if feature['isEnabled']])
features = list(set(features)) # ensure unique
features.sort()
facts['netapp_enabled_features'] = features
return facts
def get_facts(self):
"""Get the embedded or web services proxy information."""
facts = self.get_array_facts()
self.module.log("isEmbedded: %s" % self.is_embedded())
self.module.log(pformat(facts))
self.module.exit_json(msg="Gathered facts for storage array. Array ID: [%s]." % self.ssid,
storage_array_facts=facts)
def strip_interface_speed(speed):
"""Converts symbol interface speeds to a more common notation. Example: 'speed10gig' -> '10g'"""
if isinstance(speed, list):
result = [match(r"speed[0-9]{1,3}[gm]", sp) for sp in speed]
result = [sp.group().replace("speed", "") if result else "unknown" for sp in result if sp]
result = ["auto" if match(r"auto", sp) else sp for sp in result]
else:
result = match(r"speed[0-9]{1,3}[gm]", speed)
result = result.group().replace("speed", "") if result else "unknown"
result = "auto" if match(r"auto", result.lower()) else result
return result
def main():
facts = Facts()
facts.get_facts()
if __name__ == "__main__":
main()
| gpl-3.0 |
Mactory/easy-thumbnails | easy_thumbnails/south_migrations/0009_auto__del_storage.py | 20 | 2337 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Storage'
db.delete_table('easy_thumbnails_storage')
def backwards(self, orm):
# Adding model 'Storage'
db.create_table('easy_thumbnails_storage', (
('pickle', self.gf('django.db.models.fields.TextField')()),
('hash', self.gf('django.db.models.fields.CharField')(max_length=40, primary_key=True, db_index=True)),
))
db.send_create_signal('easy_thumbnails', ['Storage'])
models = {
'easy_thumbnails.source': {
'Meta': {'object_name': 'Source'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 7, 21, 4, 34, 17, 1330)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'storage_new': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['easy_thumbnails.StorageNew']"})
},
'easy_thumbnails.storagenew': {
'Meta': {'object_name': 'StorageNew'},
'hash': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pickle': ('django.db.models.fields.TextField', [], {})
},
'easy_thumbnails.thumbnail': {
'Meta': {'object_name': 'Thumbnail'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2010, 7, 21, 4, 34, 17, 1330)'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'thumbnails'", 'to': "orm['easy_thumbnails.Source']"}),
'storage_new': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['easy_thumbnails.StorageNew']"})
}
}
complete_apps = ['easy_thumbnails']
| bsd-3-clause |
embray/numpy | numpy/core/tests/test_ufunc.py | 1 | 43039 | from __future__ import division, absolute_import, print_function
import sys
import numpy as np
from numpy.testing import *
import numpy.core.umath_tests as umt
import numpy.core.operand_flag_tests as opflag_tests
from numpy.compat import asbytes
from numpy.core.test_rational import *
class TestUfunc(TestCase):
def test_pickle(self):
import pickle
assert pickle.loads(pickle.dumps(np.sin)) is np.sin
def test_pickle_withstring(self):
import pickle
astring = asbytes("cnumpy.core\n_ufunc_reconstruct\np0\n"
"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
assert pickle.loads(astring) is np.cos
def test_reduceat_shifting_sum(self) :
L = 6
x = np.arange(L)
idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel()
assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7])
def test_generic_loops(self) :
"""Test generic loops.
The loops to be tested are:
PyUFunc_ff_f_As_dd_d
PyUFunc_ff_f
PyUFunc_dd_d
PyUFunc_gg_g
PyUFunc_FF_F_As_DD_D
PyUFunc_DD_D
PyUFunc_FF_F
PyUFunc_GG_G
PyUFunc_OO_O
PyUFunc_OO_O_method
PyUFunc_f_f_As_d_d
PyUFunc_d_d
PyUFunc_f_f
PyUFunc_g_g
PyUFunc_F_F_As_D_D
PyUFunc_F_F
PyUFunc_D_D
PyUFunc_G_G
PyUFunc_O_O
PyUFunc_O_O_method
PyUFunc_On_Om
Where:
f -- float
d -- double
g -- long double
F -- complex float
D -- complex double
G -- complex long double
O -- python object
It is difficult to assure that each of these loops is entered from the
Python level as the special cased loops are a moving target and the
corresponding types are architecture dependent. We probably need to
define C level testing ufuncs to get at them. For the time being, I've
just looked at the signatures registered in the build directory to find
relevant functions.
Fixme, currently untested:
PyUFunc_ff_f_As_dd_d
PyUFunc_FF_F_As_DD_D
PyUFunc_f_f_As_d_d
PyUFunc_F_F_As_D_D
PyUFunc_On_Om
"""
fone = np.exp
ftwo = lambda x, y : x**y
fone_val = 1
ftwo_val = 1
# check unary PyUFunc_f_f.
msg = "PyUFunc_f_f"
x = np.zeros(10, dtype=np.single)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_d_d.
msg = "PyUFunc_d_d"
x = np.zeros(10, dtype=np.double)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_g_g.
msg = "PyUFunc_g_g"
x = np.zeros(10, dtype=np.longdouble)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_F_F.
msg = "PyUFunc_F_F"
x = np.zeros(10, dtype=np.csingle)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_D_D.
msg = "PyUFunc_D_D"
x = np.zeros(10, dtype=np.cdouble)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check unary PyUFunc_G_G.
msg = "PyUFunc_G_G"
x = np.zeros(10, dtype=np.clongdouble)[0::2]
assert_almost_equal(fone(x), fone_val, err_msg=msg)
# check binary PyUFunc_ff_f.
msg = "PyUFunc_ff_f"
x = np.ones(10, dtype=np.single)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_dd_d.
msg = "PyUFunc_dd_d"
x = np.ones(10, dtype=np.double)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_gg_g.
msg = "PyUFunc_gg_g"
x = np.ones(10, dtype=np.longdouble)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_FF_F.
msg = "PyUFunc_FF_F"
x = np.ones(10, dtype=np.csingle)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_DD_D.
msg = "PyUFunc_DD_D"
x = np.ones(10, dtype=np.cdouble)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# check binary PyUFunc_GG_G.
msg = "PyUFunc_GG_G"
x = np.ones(10, dtype=np.clongdouble)[0::2]
assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
# class to use in testing object method loops
class foo(object):
def conjugate(self) :
return np.bool_(1)
def logical_xor(self, obj) :
return np.bool_(1)
# check unary PyUFunc_O_O
msg = "PyUFunc_O_O"
x = np.ones(10, dtype=np.object)[0::2]
assert_(np.all(np.abs(x) == 1), msg)
# check unary PyUFunc_O_O_method
msg = "PyUFunc_O_O_method"
x = np.zeros(10, dtype=np.object)[0::2]
for i in range(len(x)) :
x[i] = foo()
assert_(np.all(np.conjugate(x) == True), msg)
# check binary PyUFunc_OO_O
msg = "PyUFunc_OO_O"
x = np.ones(10, dtype=np.object)[0::2]
assert_(np.all(np.add(x, x) == 2), msg)
# check binary PyUFunc_OO_O_method
msg = "PyUFunc_OO_O_method"
x = np.zeros(10, dtype=np.object)[0::2]
for i in range(len(x)) :
x[i] = foo()
assert_(np.all(np.logical_xor(x, x)), msg)
# check PyUFunc_On_Om
# fixme -- I don't know how to do this yet
def test_all_ufunc(self) :
"""Try to check presence and results of all ufuncs.
The list of ufuncs comes from generate_umath.py and is as follows:
===== ==== ============= =============== ========================
done args function types notes
===== ==== ============= =============== ========================
n 1 conjugate nums + O
n 1 absolute nums + O complex -> real
n 1 negative nums + O
n 1 sign nums + O -> int
n 1 invert bool + ints + O flts raise an error
n 1 degrees real + M cmplx raise an error
n 1 radians real + M cmplx raise an error
n 1 arccos flts + M
n 1 arccosh flts + M
n 1 arcsin flts + M
n 1 arcsinh flts + M
n 1 arctan flts + M
n 1 arctanh flts + M
n 1 cos flts + M
n 1 sin flts + M
n 1 tan flts + M
n 1 cosh flts + M
n 1 sinh flts + M
n 1 tanh flts + M
n 1 exp flts + M
n 1 expm1 flts + M
n 1 log flts + M
n 1 log10 flts + M
n 1 log1p flts + M
n 1 sqrt flts + M real x < 0 raises error
n 1 ceil real + M
n 1 trunc real + M
n 1 floor real + M
n 1 fabs real + M
n 1 rint flts + M
n 1 isnan flts -> bool
n 1 isinf flts -> bool
n 1 isfinite flts -> bool
n 1 signbit real -> bool
n 1 modf real -> (frac, int)
n 1 logical_not bool + nums + M -> bool
n 2 left_shift ints + O flts raise an error
n 2 right_shift ints + O flts raise an error
n 2 add bool + nums + O boolean + is ||
n 2 subtract bool + nums + O boolean - is ^
n 2 multiply bool + nums + O boolean * is &
n 2 divide nums + O
n 2 floor_divide nums + O
n 2 true_divide nums + O bBhH -> f, iIlLqQ -> d
n 2 fmod nums + M
n 2 power nums + O
n 2 greater bool + nums + O -> bool
n 2 greater_equal bool + nums + O -> bool
n 2 less bool + nums + O -> bool
n 2 less_equal bool + nums + O -> bool
n 2 equal bool + nums + O -> bool
n 2 not_equal bool + nums + O -> bool
n 2 logical_and bool + nums + M -> bool
n 2 logical_or bool + nums + M -> bool
n 2 logical_xor bool + nums + M -> bool
n 2 maximum bool + nums + O
n 2 minimum bool + nums + O
n 2 bitwise_and bool + ints + O flts raise an error
n 2 bitwise_or bool + ints + O flts raise an error
n 2 bitwise_xor bool + ints + O flts raise an error
n 2 arctan2 real + M
n 2 remainder ints + real + O
n 2 hypot real + M
===== ==== ============= =============== ========================
Types other than those listed will be accepted, but they are cast to
the smallest compatible type for which the function is defined. The
casting rules are:
bool -> int8 -> float32
ints -> double
"""
pass
def test_signature(self):
# the arguments to test_signature are: nin, nout, core_signature
# pass
assert_equal(umt.test_signature(2, 1, "(i),(i)->()"), 1)
# pass. empty core signature; treat as plain ufunc (with trivial core)
assert_equal(umt.test_signature(2, 1, "(),()->()"), 0)
# in the following calls, a ValueError should be raised because
# of error in core signature
# error: extra parenthesis
msg = "core_sig: extra parenthesis"
try:
ret = umt.test_signature(2, 1, "((i)),(i)->()")
assert_equal(ret, None, err_msg=msg)
except ValueError: None
# error: parenthesis matching
msg = "core_sig: parenthesis matching"
try:
ret = umt.test_signature(2, 1, "(i),)i(->()")
assert_equal(ret, None, err_msg=msg)
except ValueError: None
# error: incomplete signature. letters outside of parenthesis are ignored
msg = "core_sig: incomplete signature"
try:
ret = umt.test_signature(2, 1, "(i),->()")
assert_equal(ret, None, err_msg=msg)
except ValueError: None
# error: incomplete signature. 2 output arguments are specified
msg = "core_sig: incomplete signature"
try:
ret = umt.test_signature(2, 2, "(i),(i)->()")
assert_equal(ret, None, err_msg=msg)
except ValueError: None
# more complicated names for variables
assert_equal(umt.test_signature(2, 1, "(i1,i2),(J_1)->(_kAB)"), 1)
def test_get_signature(self):
assert_equal(umt.inner1d.signature, "(i),(i)->()")
def test_forced_sig(self):
a = 0.5*np.arange(3, dtype='f8')
assert_equal(np.add(a, 0.5), [0.5, 1, 1.5])
assert_equal(np.add(a, 0.5, sig='i', casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig='ii->i', casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig=('i4',), casting='unsafe'), [0, 0, 1])
assert_equal(np.add(a, 0.5, sig=('i4', 'i4', 'i4'),
casting='unsafe'), [0, 0, 1])
b = np.zeros((3,), dtype='f8')
np.add(a, 0.5, out=b)
assert_equal(b, [0.5, 1, 1.5])
b[:] = 0
np.add(a, 0.5, sig='i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig='ii->i', out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig=('i4',), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
b[:] = 0
np.add(a, 0.5, sig=('i4', 'i4', 'i4'), out=b, casting='unsafe')
assert_equal(b, [0, 0, 1])
def test_sum_stability(self):
a = np.ones(500, dtype=np.float32)
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 4)
a = np.ones(500, dtype=np.float64)
assert_almost_equal((a / 10.).sum() - a.size / 10., 0, 13)
def test_sum(self):
for dt in (np.int, np.float16, np.float32, np.float64, np.longdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2)
d = np.arange(1, v + 1, dtype=dt)
assert_almost_equal(np.sum(d), tgt)
assert_almost_equal(np.sum(d[::-1]), tgt)
d = np.ones(500, dtype=dt)
assert_almost_equal(np.sum(d[::2]), 250.)
assert_almost_equal(np.sum(d[1::2]), 250.)
assert_almost_equal(np.sum(d[::3]), 167.)
assert_almost_equal(np.sum(d[1::3]), 167.)
assert_almost_equal(np.sum(d[::-2]), 250.)
assert_almost_equal(np.sum(d[-1::-2]), 250.)
assert_almost_equal(np.sum(d[::-3]), 167.)
assert_almost_equal(np.sum(d[-1::-3]), 167.)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt)
d += d
assert_almost_equal(d, 2.)
def test_sum_complex(self):
for dt in (np.complex64, np.complex128, np.clongdouble):
for v in (0, 1, 2, 7, 8, 9, 15, 16, 19, 127,
128, 1024, 1235):
tgt = dt(v * (v + 1) / 2) - dt((v * (v + 1) / 2) *1j)
d = np.empty(v, dtype=dt)
d.real = np.arange(1, v + 1)
d.imag = -np.arange(1, v + 1)
assert_almost_equal(np.sum(d), tgt)
assert_almost_equal(np.sum(d[::-1]), tgt)
d = np.ones(500, dtype=dt) + 1j
assert_almost_equal(np.sum(d[::2]), 250. + 250j)
assert_almost_equal(np.sum(d[1::2]), 250. + 250j)
assert_almost_equal(np.sum(d[::3]), 167. + 167j)
assert_almost_equal(np.sum(d[1::3]), 167. + 167j)
assert_almost_equal(np.sum(d[::-2]), 250. + 250j)
assert_almost_equal(np.sum(d[-1::-2]), 250. + 250j)
assert_almost_equal(np.sum(d[::-3]), 167. + 167j)
assert_almost_equal(np.sum(d[-1::-3]), 167. + 167j)
# sum with first reduction entry != 0
d = np.ones((1,), dtype=dt) + 1j
d += d
assert_almost_equal(d, 2. + 2j)
def test_inner1d(self):
a = np.arange(6).reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1))
a = np.arange(6)
assert_array_equal(umt.inner1d(a, a), np.sum(a*a))
def test_broadcast(self):
msg = "broadcast"
a = np.arange(4).reshape((2, 1, 2))
b = np.arange(4).reshape((1, 2, 2))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
msg = "extend & broadcast loop dimensions"
b = np.arange(4).reshape((2, 2))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
msg = "broadcast in core dimensions"
a = np.arange(8).reshape((4, 2))
b = np.arange(4).reshape((4, 1))
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
msg = "extend & broadcast core and loop dimensions"
a = np.arange(8).reshape((4, 2))
b = np.array(7)
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
msg = "broadcast should fail"
a = np.arange(2).reshape((2, 1, 1))
b = np.arange(3).reshape((3, 1, 1))
try:
ret = umt.inner1d(a, b)
assert_equal(ret, None, err_msg=msg)
except ValueError: None
def test_type_cast(self):
msg = "type cast"
a = np.arange(6, dtype='short').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg)
msg = "type cast on one argument"
a = np.arange(6).reshape((2, 3))
b = a+0.1
assert_array_almost_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1),
err_msg=msg)
def test_endian(self):
msg = "big endian"
a = np.arange(6, dtype='>i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg)
msg = "little endian"
a = np.arange(6, dtype='<i4').reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1), err_msg=msg)
# Output should always be native-endian
Ba = np.arange(1, dtype='>f8')
La = np.arange(1, dtype='<f8')
assert_equal((Ba+Ba).dtype, np.dtype('f8'))
assert_equal((Ba+La).dtype, np.dtype('f8'))
assert_equal((La+Ba).dtype, np.dtype('f8'))
assert_equal((La+La).dtype, np.dtype('f8'))
assert_equal(np.absolute(La).dtype, np.dtype('f8'))
assert_equal(np.absolute(Ba).dtype, np.dtype('f8'))
assert_equal(np.negative(La).dtype, np.dtype('f8'))
assert_equal(np.negative(Ba).dtype, np.dtype('f8'))
def test_incontiguous_array(self):
msg = "incontiguous memory layout of array"
x = np.arange(64).reshape((2, 2, 2, 2, 2, 2))
a = x[:, 0,:, 0,:, 0]
b = x[:, 1,:, 1,:, 1]
a[0, 0, 0] = -1
msg2 = "make sure it references to the original array"
assert_equal(x[0, 0, 0, 0, 0, 0], -1, err_msg=msg2)
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
x = np.arange(24).reshape(2, 3, 4)
a = x.T
b = x.T
a[0, 0, 0] = -1
assert_equal(x[0, 0, 0], -1, err_msg=msg2)
assert_array_equal(umt.inner1d(a, b), np.sum(a*b, axis=-1), err_msg=msg)
def test_output_argument(self):
msg = "output argument"
a = np.arange(12).reshape((2, 3, 2))
b = np.arange(4).reshape((2, 1, 2)) + 1
c = np.zeros((2, 3), dtype='int')
umt.inner1d(a, b, c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
msg = "output argument with type cast"
c = np.zeros((2, 3), dtype='int16')
umt.inner1d(a, b, c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c)
assert_array_equal(c, np.sum(a*b, axis=-1), err_msg=msg)
msg = "output argument with incontiguous layout"
c = np.zeros((2, 3, 4), dtype='int16')
umt.inner1d(a, b, c[..., 0])
assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
c[:] = -1
umt.inner1d(a, b, out=c[..., 0])
assert_array_equal(c[..., 0], np.sum(a*b, axis=-1), err_msg=msg)
def test_innerwt(self):
a = np.arange(6).reshape((2, 3))
b = np.arange(10, 16).reshape((2, 3))
w = np.arange(20, 26).reshape((2, 3))
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
a = np.arange(100, 124).reshape((2, 3, 4))
b = np.arange(200, 224).reshape((2, 3, 4))
w = np.arange(300, 324).reshape((2, 3, 4))
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
def test_innerwt_empty(self):
"""Test generalized ufunc with zero-sized operands"""
a = np.array([], dtype='f8')
b = np.array([], dtype='f8')
w = np.array([], dtype='f8')
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
def test_matrix_multiply(self):
self.compare_matrix_multiply_results(np.long)
self.compare_matrix_multiply_results(np.double)
def compare_matrix_multiply_results(self, tp):
d1 = np.array(rand(2, 3, 4), dtype=tp)
d2 = np.array(rand(2, 3, 4), dtype=tp)
msg = "matrix multiply on type %s" % d1.dtype.name
def permute_n(n):
if n == 1:
return ([0],)
ret = ()
base = permute_n(n-1)
for perm in base:
for i in range(n):
new = perm + [n-1]
new[n-1] = new[i]
new[i] = n-1
ret += (new,)
return ret
def slice_n(n):
if n == 0:
return ((),)
ret = ()
base = slice_n(n-1)
for sl in base:
ret += (sl+(slice(None),),)
ret += (sl+(slice(0, 1),),)
return ret
def broadcastable(s1, s2):
return s1 == s2 or s1 == 1 or s2 == 1
permute_3 = permute_n(3)
slice_3 = slice_n(3) + ((slice(None, None, -1),)*3,)
ref = True
for p1 in permute_3:
for p2 in permute_3:
for s1 in slice_3:
for s2 in slice_3:
a1 = d1.transpose(p1)[s1]
a2 = d2.transpose(p2)[s2]
ref = ref and a1.base != None
ref = ref and a2.base != None
if broadcastable(a1.shape[-1], a2.shape[-2]) and \
broadcastable(a1.shape[0], a2.shape[0]):
assert_array_almost_equal(
umt.matrix_multiply(a1, a2),
np.sum(a2[..., np.newaxis].swapaxes(-3, -1) *
a1[..., np.newaxis,:], axis=-1),
err_msg = msg+' %s %s' % (str(a1.shape),
str(a2.shape)))
assert_equal(ref, True, err_msg="reference check")
def test_object_logical(self):
a = np.array([3, None, True, False, "test", ""], dtype=object)
assert_equal(np.logical_or(a, None),
np.array([x or None for x in a], dtype=object))
assert_equal(np.logical_or(a, True),
np.array([x or True for x in a], dtype=object))
assert_equal(np.logical_or(a, 12),
np.array([x or 12 for x in a], dtype=object))
assert_equal(np.logical_or(a, "blah"),
np.array([x or "blah" for x in a], dtype=object))
assert_equal(np.logical_and(a, None),
np.array([x and None for x in a], dtype=object))
assert_equal(np.logical_and(a, True),
np.array([x and True for x in a], dtype=object))
assert_equal(np.logical_and(a, 12),
np.array([x and 12 for x in a], dtype=object))
assert_equal(np.logical_and(a, "blah"),
np.array([x and "blah" for x in a], dtype=object))
assert_equal(np.logical_not(a),
np.array([not x for x in a], dtype=object))
assert_equal(np.logical_or.reduce(a), 3)
assert_equal(np.logical_and.reduce(a), None)
def test_object_array_reduction(self):
# Reductions on object arrays
a = np.array(['a', 'b', 'c'], dtype=object)
assert_equal(np.sum(a), 'abc')
assert_equal(np.max(a), 'c')
assert_equal(np.min(a), 'a')
a = np.array([True, False, True], dtype=object)
assert_equal(np.sum(a), 2)
assert_equal(np.prod(a), 0)
assert_equal(np.any(a), True)
assert_equal(np.all(a), False)
assert_equal(np.max(a), True)
assert_equal(np.min(a), False)
def test_zerosize_reduction(self):
# Test with default dtype and object dtype
for a in [[], np.array([], dtype=object)]:
assert_equal(np.sum(a), 0)
assert_equal(np.prod(a), 1)
assert_equal(np.any(a), False)
assert_equal(np.all(a), True)
assert_raises(ValueError, np.max, a)
assert_raises(ValueError, np.min, a)
def test_axis_out_of_bounds(self):
a = np.array([False, False])
assert_raises(ValueError, a.all, axis=1)
a = np.array([False, False])
assert_raises(ValueError, a.all, axis=-2)
a = np.array([False, False])
assert_raises(ValueError, a.any, axis=1)
a = np.array([False, False])
assert_raises(ValueError, a.any, axis=-2)
def test_scalar_reduction(self):
# The functions 'sum', 'prod', etc allow specifying axis=0
# even for scalars
assert_equal(np.sum(3, axis=0), 3)
assert_equal(np.prod(3.5, axis=0), 3.5)
assert_equal(np.any(True, axis=0), True)
assert_equal(np.all(False, axis=0), False)
assert_equal(np.max(3, axis=0), 3)
assert_equal(np.min(2.5, axis=0), 2.5)
# Check scalar behaviour for ufuncs without an identity
assert_equal(np.power.reduce(3), 3)
# Make sure that scalars are coming out from this operation
assert_(type(np.prod(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.sum(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.max(np.float32(2.5), axis=0)) is np.float32)
assert_(type(np.min(np.float32(2.5), axis=0)) is np.float32)
# check if scalars/0-d arrays get cast
assert_(type(np.any(0, axis=0)) is np.bool_)
# assert that 0-d arrays get wrapped
class MyArray(np.ndarray):
pass
a = np.array(1).view(MyArray)
assert_(type(np.any(a)) is MyArray)
def test_casting_out_param(self):
# Test that it's possible to do casts on output
a = np.ones((200, 100), np.int64)
b = np.ones((200, 100), np.int64)
c = np.ones((200, 100), np.float64)
np.add(a, b, out=c)
assert_equal(c, 2)
a = np.zeros(65536)
b = np.zeros(65536, dtype=np.float32)
np.subtract(a, 0, out=b)
assert_equal(b, 0)
def test_where_param(self):
# Test that the where= ufunc parameter works with regular arrays
a = np.arange(7)
b = np.ones(7)
c = np.zeros(7)
np.add(a, b, out=c, where=(a % 2 == 1))
assert_equal(c, [0, 2, 0, 4, 0, 6, 0])
a = np.arange(4).reshape(2, 2) + 2
np.power(a, [2, 3], out=a, where=[[0, 1], [1, 0]])
assert_equal(a, [[2, 27], [16, 5]])
# Broadcasting the where= parameter
np.subtract(a, 2, out=a, where=[True, False])
assert_equal(a, [[0, 27], [14, 5]])
def test_where_param_buffer_output(self):
# This test is temporarily skipped because it requires
# adding masking features to the nditer to work properly
# With casting on output
a = np.ones(10, np.int64)
b = np.ones(10, np.int64)
c = 1.5 * np.ones(10, np.float64)
np.add(a, b, out=c, where=[1, 0, 0, 1, 0, 0, 1, 1, 1, 0])
assert_equal(c, [2, 1.5, 1.5, 2, 1.5, 1.5, 2, 2, 2, 1.5])
def check_identityless_reduction(self, a):
# np.minimum.reduce is a identityless reduction
# Verify that it sees the zero at various positions
a[...] = 1
a[1, 0, 0] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [1, 0])
assert_equal(np.minimum.reduce(a, axis=0),
[[0, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[1, 1, 1, 1], [0, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[1, 1, 1], [0, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
a[...] = 1
a[0, 1, 0] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [0, 1, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [1, 0, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
assert_equal(np.minimum.reduce(a, axis=0),
[[1, 1, 1, 1], [0, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[0, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[1, 0, 1], [1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
a[...] = 1
a[0, 0, 1] = 0
assert_equal(np.minimum.reduce(a, axis=None), 0)
assert_equal(np.minimum.reduce(a, axis=(0, 1)), [1, 0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(0, 2)), [0, 1, 1])
assert_equal(np.minimum.reduce(a, axis=(1, 2)), [0, 1])
assert_equal(np.minimum.reduce(a, axis=0),
[[1, 0, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=1),
[[1, 0, 1, 1], [1, 1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=2),
[[0, 1, 1], [1, 1, 1]])
assert_equal(np.minimum.reduce(a, axis=()), a)
def test_identityless_reduction_corder(self):
a = np.empty((2, 3, 4), order='C')
self.check_identityless_reduction(a)
def test_identityless_reduction_forder(self):
a = np.empty((2, 3, 4), order='F')
self.check_identityless_reduction(a)
def test_identityless_reduction_otherorder(self):
a = np.empty((2, 4, 3), order='C').swapaxes(1, 2)
self.check_identityless_reduction(a)
def test_identityless_reduction_noncontig(self):
a = np.empty((3, 5, 4), order='C').swapaxes(1, 2)
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
def test_identityless_reduction_noncontig_unaligned(self):
a = np.empty((3*4*5*8 + 1,), dtype='i1')
a = a[1:].view(dtype='f8')
a.shape = (3, 4, 5)
a = a[1:, 1:, 1:]
self.check_identityless_reduction(a)
def test_identityless_reduction_nonreorderable(self):
a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]])
res = np.divide.reduce(a, axis=0)
assert_equal(res, [8.0, 4.0, 8.0])
res = np.divide.reduce(a, axis=1)
assert_equal(res, [2.0, 8.0])
res = np.divide.reduce(a, axis=())
assert_equal(res, a)
assert_raises(ValueError, np.divide.reduce, a, axis=(0, 1))
def test_reduce_zero_axis(self):
# If we have a n x m array and do a reduction with axis=1, then we are
# doing n reductions, and each reduction takes an m-element array. For
# a reduction operation without an identity, then:
# n > 0, m > 0: fine
# n = 0, m > 0: fine, doing 0 reductions of m-element arrays
# n > 0, m = 0: can't reduce a 0-element array, ValueError
# n = 0, m = 0: can't reduce a 0-element array, ValueError (for
# consistency with the above case)
# This test doesn't actually look at return values, it just checks to
# make sure that error we get an error in exactly those cases where we
# expect one, and assumes the calculations themselves are done
# correctly.
def ok(f, *args, **kwargs):
f(*args, **kwargs)
def err(f, *args, **kwargs):
assert_raises(ValueError, f, *args, **kwargs)
def t(expect, func, n, m):
expect(func, np.zeros((n, m)), axis=1)
expect(func, np.zeros((m, n)), axis=0)
expect(func, np.zeros((n // 2, n // 2, m)), axis=2)
expect(func, np.zeros((n // 2, m, n // 2)), axis=1)
expect(func, np.zeros((n, m // 2, m // 2)), axis=(1, 2))
expect(func, np.zeros((m // 2, n, m // 2)), axis=(0, 2))
expect(func, np.zeros((m // 3, m // 3, m // 3,
n // 2, n //2)),
axis=(0, 1, 2))
# Check what happens if the inner (resp. outer) dimensions are a
# mix of zero and non-zero:
expect(func, np.zeros((10, m, n)), axis=(0, 1))
expect(func, np.zeros((10, n, m)), axis=(0, 2))
expect(func, np.zeros((m, 10, n)), axis=0)
expect(func, np.zeros((10, m, n)), axis=1)
expect(func, np.zeros((10, n, m)), axis=2)
# np.maximum is just an arbitrary ufunc with no reduction identity
assert_equal(np.maximum.identity, None)
t(ok, np.maximum.reduce, 30, 30)
t(ok, np.maximum.reduce, 0, 30)
t(err, np.maximum.reduce, 30, 0)
t(err, np.maximum.reduce, 0, 0)
err(np.maximum.reduce, [])
np.maximum.reduce(np.zeros((0, 0)), axis=())
# all of the combinations are fine for a reduction that has an
# identity
t(ok, np.add.reduce, 30, 30)
t(ok, np.add.reduce, 0, 30)
t(ok, np.add.reduce, 30, 0)
t(ok, np.add.reduce, 0, 0)
np.add.reduce([])
np.add.reduce(np.zeros((0, 0)), axis=())
# OTOH, accumulate always makes sense for any combination of n and m,
# because it maps an m-element array to an m-element array. These
# tests are simpler because accumulate doesn't accept multiple axes.
for uf in (np.maximum, np.add):
uf.accumulate(np.zeros((30, 0)), axis=0)
uf.accumulate(np.zeros((0, 30)), axis=0)
uf.accumulate(np.zeros((30, 30)), axis=0)
uf.accumulate(np.zeros((0, 0)), axis=0)
def test_safe_casting(self):
# In old versions of numpy, in-place operations used the 'unsafe'
# casting rules. In some future version, 'same_kind' will become the
# default.
a = np.array([1, 2, 3], dtype=int)
# Non-in-place addition is fine
assert_array_equal(assert_no_warnings(np.add, a, 1.1),
[2.1, 3.1, 4.1])
assert_warns(DeprecationWarning, np.add, a, 1.1, out=a)
assert_array_equal(a, [2, 3, 4])
def add_inplace(a, b):
a += b
assert_warns(DeprecationWarning, add_inplace, a, 1.1)
assert_array_equal(a, [3, 4, 5])
# Make sure that explicitly overriding the warning is allowed:
assert_no_warnings(np.add, a, 1.1, out=a, casting="unsafe")
assert_array_equal(a, [4, 5, 6])
# There's no way to propagate exceptions from the place where we issue
# this deprecation warning, so we must throw the exception away
# entirely rather than cause it to be raised at some other point, or
# trigger some other unsuspecting if (PyErr_Occurred()) { ...} at some
# other location entirely.
import warnings
import sys
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
with warnings.catch_warnings():
warnings.simplefilter("error")
old_stderr = sys.stderr
try:
sys.stderr = StringIO()
# No error, but dumps to stderr
a += 1.1
# No error on the next bit of code executed either
1 + 1
assert_("Implicitly casting" in sys.stderr.getvalue())
finally:
sys.stderr = old_stderr
def test_ufunc_custom_out(self):
# Test ufunc with built in input types and custom output type
a = np.array([0, 1, 2], dtype='i8')
b = np.array([0, 1, 2], dtype='i8')
c = np.empty(3, dtype=rational)
# Output must be specified so numpy knows what
# ufunc signature to look for
result = test_add(a, b, c)
assert_equal(result, np.array([0, 2, 4], dtype=rational))
# no output type should raise TypeError
assert_raises(TypeError, test_add, a, b)
def test_operand_flags(self):
a = np.arange(16, dtype='l').reshape(4, 4)
b = np.arange(9, dtype='l').reshape(3, 3)
opflag_tests.inplace_add(a[:-1, :-1], b)
assert_equal(a, np.array([[0, 2, 4, 3], [7, 9, 11, 7],
[14, 16, 18, 11], [12, 13, 14, 15]], dtype='l'))
a = np.array(0)
opflag_tests.inplace_add(a, 3)
assert_equal(a, 3)
opflag_tests.inplace_add(a, [3, 4])
assert_equal(a, 10)
def test_struct_ufunc(self):
import numpy.core.struct_ufunc_test as struct_ufunc
a = np.array([(1, 2, 3)], dtype='u8,u8,u8')
b = np.array([(1, 2, 3)], dtype='u8,u8,u8')
result = struct_ufunc.add_triplet(a, b)
assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8'))
def test_custom_ufunc(self):
a = np.array([rational(1, 2), rational(1, 3), rational(1, 4)],
dtype=rational);
b = np.array([rational(1, 2), rational(1, 3), rational(1, 4)],
dtype=rational);
result = test_add_rationals(a, b)
expected = np.array([rational(1), rational(2, 3), rational(1, 2)],
dtype=rational);
assert_equal(result, expected);
def test_custom_array_like(self):
class MyThing(object):
__array_priority__ = 1000
rmul_count = 0
getitem_count = 0
def __init__(self, shape):
self.shape = shape
def __len__(self):
return self.shape[0]
def __getitem__(self, i):
MyThing.getitem_count += 1
if not isinstance(i, tuple):
i = (i,)
if len(i) > len(self.shape):
raise IndexError("boo")
return MyThing(self.shape[len(i):])
def __rmul__(self, other):
MyThing.rmul_count += 1
return self
np.float64(5)*MyThing((3, 3))
assert_(MyThing.rmul_count == 1, MyThing.rmul_count)
assert_(MyThing.getitem_count <= 2, MyThing.getitem_count)
def test_inplace_fancy_indexing(self):
a = np.arange(10)
np.add.at(a, [2, 5, 2], 1)
assert_equal(a, [0, 1, 4, 3, 4, 6, 6, 7, 8, 9])
a = np.arange(10)
b = np.array([100, 100, 100])
np.add.at(a, [2, 5, 2], b)
assert_equal(a, [0, 1, 202, 3, 4, 105, 6, 7, 8, 9])
a = np.arange(9).reshape(3, 3)
b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
np.add.at(a, (slice(None), [1, 2, 1]), b)
assert_equal(a, [[0, 201, 102], [3, 404, 205], [6, 607, 308]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), slice(None), [1, 2, 1]), b)
assert_equal(a,
[[[0, 401, 202],
[3, 404, 205],
[6, 407, 208]],
[[9, 410, 211],
[12, 413, 214],
[15, 416, 217]],
[[18, 419, 220],
[21, 422, 223],
[24, 425, 226]]])
a = np.arange(9).reshape(3, 3)
b = np.array([[100, 100, 100], [200, 200, 200], [300, 300, 300]])
np.add.at(a, ([1, 2, 1], slice(None)), b)
assert_equal(a, [[0, 1, 2], [403, 404, 405], [206, 207, 208]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), [1, 2, 1], slice(None)), b)
assert_equal(a,
[[[0, 1, 2 ],
[203, 404, 605],
[106, 207, 308]],
[[9, 10, 11 ],
[212, 413, 614],
[115, 216, 317]],
[[18, 19, 20 ],
[221, 422, 623],
[124, 225, 326]]])
a = np.arange(9).reshape(3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (0, [1, 2, 1]), b)
assert_equal(a, [[0, 401, 202], [3, 4, 5], [6, 7, 8]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, ([1, 2, 1], 0, slice(None)), b)
assert_equal(a,
[[[0, 1, 2],
[3, 4, 5],
[6, 7, 8]],
[[209, 410, 611],
[12, 13, 14],
[15, 16, 17]],
[[118, 219, 320],
[21, 22, 23],
[24, 25, 26]]])
a = np.arange(27).reshape(3, 3, 3)
b = np.array([100, 200, 300])
np.add.at(a, (slice(None), slice(None), slice(None)), b)
assert_equal(a,
[[[100, 201, 302],
[103, 204, 305],
[106, 207, 308]],
[[109, 210, 311],
[112, 213, 314],
[115, 216, 317]],
[[118, 219, 320],
[121, 222, 323],
[124, 225, 326]]])
a = np.arange(10)
np.negative.at(a, [2, 5, 2])
assert_equal(a, [0, 1, 2, 3, 4, -5, 6, 7, 8, 9])
# Test 0-dim array
a = np.array(0)
np.add.at(a, (), 1)
assert_equal(a, 1)
assert_raises(IndexError, np.add.at, a, 0, 1)
assert_raises(IndexError, np.add.at, a, [], 1)
# Test mixed dtypes
a = np.arange(10)
np.power.at(a, [1, 2, 3, 2], 3.5)
assert_equal(a, np.array([0, 1, 4414, 46, 4, 5, 6, 7, 8, 9]))
# Test boolean indexing and boolean ufuncs
a = np.arange(10)
index = a % 2 == 0
np.equal.at(a, index, [0, 2, 4, 6, 8])
assert_equal(a, [1, 1, 1, 3, 1, 5, 1, 7, 1, 9])
# Test unary operator
a = np.arange(10, dtype='u4')
np.invert.at(a, [2, 5, 2])
assert_equal(a, [0, 1, 2, 3, 4, 5 ^ 0xffffffff, 6, 7, 8, 9])
# Test empty subspace
orig = np.arange(4)
a = orig[:, None][:, 0:0]
np.add.at(a, [0, 1], 3)
assert_array_equal(orig, np.arange(4))
# Test with swapped byte order
index = np.array([1, 2, 1], np.dtype('i').newbyteorder())
values = np.array([1, 2, 3, 4], np.dtype('f').newbyteorder())
np.add.at(values, index, 3)
assert_array_equal(values, [1, 8, 6, 4])
# Test exception thrown
values = np.array(['a', 1], dtype=np.object)
self.assertRaises(TypeError, np.add.at, values, [0, 1], 1)
assert_array_equal(values, np.array(['a', 1], dtype=np.object))
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
diorcety/intellij-community | plugins/hg4idea/testData/bin/mercurial/help.py | 91 | 18018 | # help.py - help data for mercurial
#
# Copyright 2006 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import gettext, _
import itertools, sys, os, error
import extensions, revset, fileset, templatekw, templatefilters, filemerge
import encoding, util, minirst
import cmdutil
def listexts(header, exts, indent=1):
'''return a text listing of the given extensions'''
rst = []
if exts:
rst.append('\n%s\n\n' % header)
for name, desc in sorted(exts.iteritems()):
rst.append('%s:%s: %s\n' % (' ' * indent, name, desc))
return rst
def extshelp():
rst = loaddoc('extensions')().splitlines(True)
rst.extend(listexts(_('enabled extensions:'), extensions.enabled()))
rst.extend(listexts(_('disabled extensions:'), extensions.disabled()))
doc = ''.join(rst)
return doc
def optrst(options, verbose):
data = []
multioccur = False
for option in options:
if len(option) == 5:
shortopt, longopt, default, desc, optlabel = option
else:
shortopt, longopt, default, desc = option
optlabel = _("VALUE") # default label
if _("DEPRECATED") in desc and not verbose:
continue
so = ''
if shortopt:
so = '-' + shortopt
lo = '--' + longopt
if default:
desc += _(" (default: %s)") % default
if isinstance(default, list):
lo += " %s [+]" % optlabel
multioccur = True
elif (default is not None) and not isinstance(default, bool):
lo += " %s" % optlabel
data.append((so, lo, desc))
rst = minirst.maketable(data, 1)
if multioccur:
rst.append(_("\n[+] marked option can be specified multiple times\n"))
return ''.join(rst)
def indicateomitted(rst, omitted, notomitted=None):
rst.append('\n\n.. container:: omitted\n\n %s\n\n' % omitted)
if notomitted:
rst.append('\n\n.. container:: notomitted\n\n %s\n\n' % notomitted)
def topicmatch(kw):
"""Return help topics matching kw.
Returns {'section': [(name, summary), ...], ...} where section is
one of topics, commands, extensions, or extensioncommands.
"""
kw = encoding.lower(kw)
def lowercontains(container):
return kw in encoding.lower(container) # translated in helptable
results = {'topics': [],
'commands': [],
'extensions': [],
'extensioncommands': [],
}
for names, header, doc in helptable:
if (sum(map(lowercontains, names))
or lowercontains(header)
or lowercontains(doc())):
results['topics'].append((names[0], header))
import commands # avoid cycle
for cmd, entry in commands.table.iteritems():
if cmd.startswith('debug'):
continue
if len(entry) == 3:
summary = entry[2]
else:
summary = ''
# translate docs *before* searching there
docs = _(getattr(entry[0], '__doc__', None)) or ''
if kw in cmd or lowercontains(summary) or lowercontains(docs):
doclines = docs.splitlines()
if doclines:
summary = doclines[0]
cmdname = cmd.split('|')[0].lstrip('^')
results['commands'].append((cmdname, summary))
for name, docs in itertools.chain(
extensions.enabled().iteritems(),
extensions.disabled().iteritems()):
# extensions.load ignores the UI argument
mod = extensions.load(None, name, '')
if lowercontains(name) or lowercontains(docs):
# extension docs are already translated
results['extensions'].append((name, docs.splitlines()[0]))
for cmd, entry in getattr(mod, 'cmdtable', {}).iteritems():
if kw in cmd or (len(entry) > 2 and lowercontains(entry[2])):
cmdname = cmd.split('|')[0].lstrip('^')
if entry[0].__doc__:
cmddoc = gettext(entry[0].__doc__).splitlines()[0]
else:
cmddoc = _('(no help text available)')
results['extensioncommands'].append((cmdname, cmddoc))
return results
def loaddoc(topic):
"""Return a delayed loader for help/topic.txt."""
def loader():
if util.mainfrozen():
module = sys.executable
else:
module = __file__
base = os.path.dirname(module)
for dir in ('.', '..'):
docdir = os.path.join(base, dir, 'help')
if os.path.isdir(docdir):
break
path = os.path.join(docdir, topic + ".txt")
doc = gettext(util.readfile(path))
for rewriter in helphooks.get(topic, []):
doc = rewriter(topic, doc)
return doc
return loader
helptable = sorted([
(["config", "hgrc"], _("Configuration Files"), loaddoc('config')),
(["dates"], _("Date Formats"), loaddoc('dates')),
(["patterns"], _("File Name Patterns"), loaddoc('patterns')),
(['environment', 'env'], _('Environment Variables'),
loaddoc('environment')),
(['revisions', 'revs'], _('Specifying Single Revisions'),
loaddoc('revisions')),
(['multirevs', 'mrevs'], _('Specifying Multiple Revisions'),
loaddoc('multirevs')),
(['revsets', 'revset'], _("Specifying Revision Sets"), loaddoc('revsets')),
(['filesets', 'fileset'], _("Specifying File Sets"), loaddoc('filesets')),
(['diffs'], _('Diff Formats'), loaddoc('diffs')),
(['merge-tools', 'mergetools'], _('Merge Tools'), loaddoc('merge-tools')),
(['templating', 'templates', 'template', 'style'], _('Template Usage'),
loaddoc('templates')),
(['urls'], _('URL Paths'), loaddoc('urls')),
(["extensions"], _("Using Additional Features"), extshelp),
(["subrepos", "subrepo"], _("Subrepositories"), loaddoc('subrepos')),
(["hgweb"], _("Configuring hgweb"), loaddoc('hgweb')),
(["glossary"], _("Glossary"), loaddoc('glossary')),
(["hgignore", "ignore"], _("Syntax for Mercurial Ignore Files"),
loaddoc('hgignore')),
(["phases"], _("Working with Phases"), loaddoc('phases')),
])
# Map topics to lists of callable taking the current topic help and
# returning the updated version
helphooks = {}
def addtopichook(topic, rewriter):
helphooks.setdefault(topic, []).append(rewriter)
def makeitemsdoc(topic, doc, marker, items):
"""Extract docstring from the items key to function mapping, build a
.single documentation block and use it to overwrite the marker in doc
"""
entries = []
for name in sorted(items):
text = (items[name].__doc__ or '').rstrip()
if not text:
continue
text = gettext(text)
lines = text.splitlines()
doclines = [(lines[0])]
for l in lines[1:]:
# Stop once we find some Python doctest
if l.strip().startswith('>>>'):
break
doclines.append(' ' + l.strip())
entries.append('\n'.join(doclines))
entries = '\n\n'.join(entries)
return doc.replace(marker, entries)
def addtopicsymbols(topic, marker, symbols):
def add(topic, doc):
return makeitemsdoc(topic, doc, marker, symbols)
addtopichook(topic, add)
addtopicsymbols('filesets', '.. predicatesmarker', fileset.symbols)
addtopicsymbols('merge-tools', '.. internaltoolsmarker', filemerge.internals)
addtopicsymbols('revsets', '.. predicatesmarker', revset.symbols)
addtopicsymbols('templates', '.. keywordsmarker', templatekw.dockeywords)
addtopicsymbols('templates', '.. filtersmarker', templatefilters.filters)
def help_(ui, name, unknowncmd=False, full=True, **opts):
'''
Generate the help for 'name' as unformatted restructured text. If
'name' is None, describe the commands available.
'''
import commands # avoid cycle
def helpcmd(name):
try:
aliases, entry = cmdutil.findcmd(name, commands.table,
strict=unknowncmd)
except error.AmbiguousCommand, inst:
# py3k fix: except vars can't be used outside the scope of the
# except block, nor can be used inside a lambda. python issue4617
prefix = inst.args[0]
select = lambda c: c.lstrip('^').startswith(prefix)
rst = helplist(select)
return rst
rst = []
# check if it's an invalid alias and display its error if it is
if getattr(entry[0], 'badalias', False):
if not unknowncmd:
ui.pushbuffer()
entry[0](ui)
rst.append(ui.popbuffer())
return rst
# synopsis
if len(entry) > 2:
if entry[2].startswith('hg'):
rst.append("%s\n" % entry[2])
else:
rst.append('hg %s %s\n' % (aliases[0], entry[2]))
else:
rst.append('hg %s\n' % aliases[0])
# aliases
if full and not ui.quiet and len(aliases) > 1:
rst.append(_("\naliases: %s\n") % ', '.join(aliases[1:]))
rst.append('\n')
# description
doc = gettext(entry[0].__doc__)
if not doc:
doc = _("(no help text available)")
if util.safehasattr(entry[0], 'definition'): # aliased command
if entry[0].definition.startswith('!'): # shell alias
doc = _('shell alias for::\n\n %s') % entry[0].definition[1:]
else:
doc = _('alias for: hg %s\n\n%s') % (entry[0].definition, doc)
doc = doc.splitlines(True)
if ui.quiet or not full:
rst.append(doc[0])
else:
rst.extend(doc)
rst.append('\n')
# check if this command shadows a non-trivial (multi-line)
# extension help text
try:
mod = extensions.find(name)
doc = gettext(mod.__doc__) or ''
if '\n' in doc.strip():
msg = _('use "hg help -e %s" to show help for '
'the %s extension') % (name, name)
rst.append('\n%s\n' % msg)
except KeyError:
pass
# options
if not ui.quiet and entry[1]:
rst.append('\n%s\n\n' % _("options:"))
rst.append(optrst(entry[1], ui.verbose))
if ui.verbose:
rst.append('\n%s\n\n' % _("global options:"))
rst.append(optrst(commands.globalopts, ui.verbose))
if not ui.verbose:
if not full:
rst.append(_('\nuse "hg help %s" to show the full help text\n')
% name)
elif not ui.quiet:
omitted = _('use "hg -v help %s" to show more complete'
' help and the global options') % name
notomitted = _('use "hg -v help %s" to show'
' the global options') % name
indicateomitted(rst, omitted, notomitted)
return rst
def helplist(select=None):
# list of commands
if name == "shortlist":
header = _('basic commands:\n\n')
else:
header = _('list of commands:\n\n')
h = {}
cmds = {}
for c, e in commands.table.iteritems():
f = c.split("|", 1)[0]
if select and not select(f):
continue
if (not select and name != 'shortlist' and
e[0].__module__ != commands.__name__):
continue
if name == "shortlist" and not f.startswith("^"):
continue
f = f.lstrip("^")
if not ui.debugflag and f.startswith("debug"):
continue
doc = e[0].__doc__
if doc and 'DEPRECATED' in doc and not ui.verbose:
continue
doc = gettext(doc)
if not doc:
doc = _("(no help text available)")
h[f] = doc.splitlines()[0].rstrip()
cmds[f] = c.lstrip("^")
rst = []
if not h:
if not ui.quiet:
rst.append(_('no commands defined\n'))
return rst
if not ui.quiet:
rst.append(header)
fns = sorted(h)
for f in fns:
if ui.verbose:
commacmds = cmds[f].replace("|",", ")
rst.append(" :%s: %s\n" % (commacmds, h[f]))
else:
rst.append(' :%s: %s\n' % (f, h[f]))
if not name:
exts = listexts(_('enabled extensions:'), extensions.enabled())
if exts:
rst.append('\n')
rst.extend(exts)
rst.append(_("\nadditional help topics:\n\n"))
topics = []
for names, header, doc in helptable:
topics.append((names[0], header))
for t, desc in topics:
rst.append(" :%s: %s\n" % (t, desc))
optlist = []
if not ui.quiet:
if ui.verbose:
optlist.append((_("global options:"), commands.globalopts))
if name == 'shortlist':
optlist.append((_('use "hg help" for the full list '
'of commands'), ()))
else:
if name == 'shortlist':
msg = _('use "hg help" for the full list of commands '
'or "hg -v" for details')
elif name and not full:
msg = _('use "hg help %s" to show the full help '
'text') % name
else:
msg = _('use "hg -v help%s" to show builtin aliases and '
'global options') % (name and " " + name or "")
optlist.append((msg, ()))
if optlist:
for title, options in optlist:
rst.append('\n%s\n' % title)
if options:
rst.append('\n%s\n' % optrst(options, ui.verbose))
return rst
def helptopic(name):
for names, header, doc in helptable:
if name in names:
break
else:
raise error.UnknownCommand(name)
rst = [minirst.section(header)]
# description
if not doc:
rst.append(" %s\n" % _("(no help text available)"))
if util.safehasattr(doc, '__call__'):
rst += [" %s\n" % l for l in doc().splitlines()]
if not ui.verbose:
omitted = (_('use "hg help -v %s" to show more complete help') %
name)
indicateomitted(rst, omitted)
try:
cmdutil.findcmd(name, commands.table)
rst.append(_('\nuse "hg help -c %s" to see help for '
'the %s command\n') % (name, name))
except error.UnknownCommand:
pass
return rst
def helpext(name):
try:
mod = extensions.find(name)
doc = gettext(mod.__doc__) or _('no help text available')
except KeyError:
mod = None
doc = extensions.disabledext(name)
if not doc:
raise error.UnknownCommand(name)
if '\n' not in doc:
head, tail = doc, ""
else:
head, tail = doc.split('\n', 1)
rst = [_('%s extension - %s\n\n') % (name.split('.')[-1], head)]
if tail:
rst.extend(tail.splitlines(True))
rst.append('\n')
if not ui.verbose:
omitted = (_('use "hg help -v %s" to show more complete help') %
name)
indicateomitted(rst, omitted)
if mod:
try:
ct = mod.cmdtable
except AttributeError:
ct = {}
modcmds = set([c.split('|', 1)[0] for c in ct])
rst.extend(helplist(modcmds.__contains__))
else:
rst.append(_('use "hg help extensions" for information on enabling '
'extensions\n'))
return rst
def helpextcmd(name):
cmd, ext, mod = extensions.disabledcmd(ui, name,
ui.configbool('ui', 'strict'))
doc = gettext(mod.__doc__).splitlines()[0]
rst = listexts(_("'%s' is provided by the following "
"extension:") % cmd, {ext: doc}, indent=4)
rst.append('\n')
rst.append(_('use "hg help extensions" for information on enabling '
'extensions\n'))
return rst
rst = []
kw = opts.get('keyword')
if kw:
matches = topicmatch(kw)
for t, title in (('topics', _('Topics')),
('commands', _('Commands')),
('extensions', _('Extensions')),
('extensioncommands', _('Extension Commands'))):
if matches[t]:
rst.append('%s:\n\n' % title)
rst.extend(minirst.maketable(sorted(matches[t]), 1))
rst.append('\n')
elif name and name != 'shortlist':
i = None
if unknowncmd:
queries = (helpextcmd,)
elif opts.get('extension'):
queries = (helpext,)
elif opts.get('command'):
queries = (helpcmd,)
else:
queries = (helptopic, helpcmd, helpext, helpextcmd)
for f in queries:
try:
rst = f(name)
i = None
break
except error.UnknownCommand, inst:
i = inst
if i:
raise i
else:
# program name
if not ui.quiet:
rst = [_("Mercurial Distributed SCM\n"), '\n']
rst.extend(helplist())
return ''.join(rst)
| apache-2.0 |
toslunar/chainerrl | chainerrl/links/empirical_normalization.py | 1 | 3685 | from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import * # NOQA
from future import standard_library
standard_library.install_aliases() # NOQA
import chainer
import numpy as np
class EmpiricalNormalization(chainer.Link):
"""Normalize mean and variance of values based on emprical values.
Args:
shape (int or tuple of int): Shape of input values except batch axis.
batch_axis (int): Batch axis.
eps (float): Small value for stability.
dtype (dtype): Dtype of input values.
until (int or None): If this arg is specified, the link learns input
values until the sum of batch sizes exceeds it.
"""
def __init__(self, shape, batch_axis=0, eps=1e-2, dtype=np.float32,
until=None, clip_threshold=None):
super(EmpiricalNormalization, self).__init__()
dtype = np.dtype(dtype)
self.batch_axis = batch_axis
self.eps = dtype.type(eps)
self.until = until
self.clip_threshold = clip_threshold
self._mean = np.expand_dims(np.zeros(shape, dtype=dtype), batch_axis)
self._var = np.expand_dims(np.ones(shape, dtype=dtype), batch_axis)
self.count = 0
self.register_persistent('_mean')
self.register_persistent('_var')
self.register_persistent('count')
# cache
self._cached_std_inverse = None
@property
def mean(self):
return self.xp.squeeze(self._mean, self.batch_axis).copy()
@property
def std(self):
xp = self.xp
return xp.sqrt(xp.squeeze(self._var, self.batch_axis))
@property
def _std_inverse(self):
if self._cached_std_inverse is None:
self._cached_std_inverse = (self._var + self.eps) ** -0.5
return self._cached_std_inverse
def experience(self, x):
"""Learn input values without computing the output values of them"""
if self.until is not None and self.count >= self.until:
return
if isinstance(x, chainer.Variable):
x = x.array
count_x = x.shape[self.batch_axis]
if count_x == 0:
return
xp = self.xp
self.count += count_x
rate = x.dtype.type(count_x / self.count)
mean_x = xp.mean(x, axis=self.batch_axis, keepdims=True)
var_x = xp.var(x, axis=self.batch_axis, keepdims=True)
delta_mean = mean_x - self._mean
self._mean += rate * delta_mean
self._var += rate * (
var_x - self._var
+ delta_mean * (mean_x - self._mean)
)
# clear cache
self._cached_std_inverse = None
def __call__(self, x, update=True):
"""Normalize mean and variance of values based on emprical values.
Args:
x (ndarray or Variable): Input values
update (bool): Flag to learn the input values
Returns:
ndarray or Variable: Normalized output values
"""
xp = self.xp
mean = xp.broadcast_to(self._mean, x.shape)
std_inv = xp.broadcast_to(self._std_inverse, x.shape)
if update:
self.experience(x)
normalized = (x - mean) * std_inv
if self.clip_threshold is not None:
normalized = xp.clip(
normalized, -self.clip_threshold, self.clip_threshold)
return normalized
def inverse(self, y):
xp = self.xp
mean = xp.broadcast_to(self._mean, y.shape)
std = xp.broadcast_to(xp.sqrt(self._var + self.eps), y.shape)
return y * std + mean
| mit |
jgiannuzzi/pypi-server | pypi_server/handlers/pypi/proxy/client.py | 2 | 4592 | # encoding: utf-8
import hashlib
import logging
from copy import copy
from slimurl import URL
from tornado.gen import coroutine, Return
from tornado.httpclient import AsyncHTTPClient
from tornado.ioloop import IOLoop
from tornado.locks import Lock
from tornado.options import options
from tornado_xmlrpc.client import ServerProxy
from pypi_server.cache import Cache, HOUR, MONTH
from pypi_server.hash_version import HashVersion
log = logging.getLogger(__name__)
def normalize_package_name(name):
return name.lower().replace("_", "-").replace(".", "-")
class PYPIClient(object):
CLIENT = None
BACKEND = None
THREAD_POOL = None
INDEX = None
XMLRPC = None
LOCK = None
@classmethod
def configure(cls, backend, thread_pool):
cls.CLIENT = AsyncHTTPClient(io_loop=IOLoop.current())
cls.BACKEND = backend
cls.THREAD_POOL = thread_pool
cls.XMLRPC = ServerProxy(
str(copy(backend)(path="/pypi")),
)
cls.LOCK = Lock()
@classmethod
@coroutine
@Cache(HOUR, files_cache=True, ignore_self=True)
def packages(cls):
with (yield cls.LOCK.acquire()):
index = dict(
map(
lambda x: (normalize_package_name(x), x),
(yield cls.XMLRPC.list_packages())
)
)
log.info("Remote PYPI index updated: %d packages", len(index))
raise Return(index)
@classmethod
@coroutine
@Cache(4 * HOUR, files_cache=True, ignore_self=True)
def search(cls, names, descriptions, operator="or"):
assert operator in ('or', 'and')
result = yield cls.XMLRPC.search({'name': names, 'description': descriptions}, operator)
raise Return(result)
@classmethod
@coroutine
def exists(cls, name):
try:
real_name = yield cls.find_real_name(name)
except LookupError:
raise Return(False)
releases = yield cls.releases(real_name)
if not releases:
raise Return(False)
raise Return(True)
@classmethod
@coroutine
def find_real_name(cls, name):
if not options.pypi_proxy:
raise LookupError("Proxying to PyPI disabled")
name = normalize_package_name(name).lower()
packages = yield cls.packages()
real_name = packages.get(name)
if real_name is None:
raise LookupError("Package not found")
raise Return(real_name)
@classmethod
@coroutine
@Cache(4 * HOUR, files_cache=True, ignore_self=True)
def releases(cls, name):
process_versions = lambda x: set(HashVersion(i) for i in x)
all_releases, current_releases = yield [
cls.XMLRPC.package_releases(name, True),
cls.XMLRPC.package_releases(name)
]
all_releases = process_versions(all_releases)
current_releases = process_versions(current_releases)
hidden_releases = all_releases - current_releases
res = []
for x in current_releases:
x.hidden = False
res.append(x)
for x in hidden_releases:
x.hidden = True
res.append(x)
raise Return(set(res))
@classmethod
@coroutine
@Cache(MONTH, files_cache=True, ignore_self=True)
def release_data(cls, name, version):
info, files = yield [
cls.XMLRPC.release_data(str(name), str(version)),
cls.XMLRPC.release_urls(str(name), str(version))
]
download_url = info.get('download_url')
if download_url and not files:
try:
url = URL(download_url)
filename = url.path.split('/')[-1]
if "#" in filename:
filename = filename.split("#")[0]
response = yield cls.CLIENT.fetch(download_url)
files = [{
'filename': filename,
'md5_digest': hashlib.md5(response.body).hexdigest(),
'downloads': -1,
'url': download_url,
'size': len(response.body),
'comment_text': None,
}]
except Exception as e:
files = []
log.error("Error when trying to download version %s of package %s", version, name)
log.exception(e)
else:
files = sorted(
files,
key=lambda x: x['filename']
)
raise Return((info, files))
| mit |
talha81/TACTIC-DEV | src/client/tactic_client_lib/tactic_server_stub.py | 4 | 146424 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
# This is a stub for accessing the TACTIC server. It simplifies the access for
# scripts using the client api. Thin wrapper to the client API.
# These are meant to be copied to client directories.
import datetime
import re
import xmlrpclib, os, getpass, shutil, httplib, sys, urllib, types, hashlib
class TacticApiException(Exception):
pass
''' Class: TacticServerStub
It allows client to send commands to and receive information from the TACTIC
server.'''
class TacticServerStub(object):
'''
Constructor: TacticServerStub
'''
def __init__(my, login=None, setup=True, protocol=None, server=None,
project=None, ticket=None, user=None, password=""):
'''Function: __init__(login=None, setup=True, protocol=None, server=None, project=None, ticket=None, user=None, password="")
Initialize the TacticServerStub
@keyparam:
login - login_code
setup - if set to True, it runs the protocol set-up
protocol - xmlrpc or local. it defaults to xmlrpc
server - tactic server
project - targeted project
ticket - login ticket key
user - tactic login_code that overrides the login
password - password for login'''
# initialize some variables
if user:
login = user
my.login = login
my.project_code = None
my.server = None
my.has_server = False
my.server_name = None
my.ticket = None # the ticket sent to the server
my.login_ticket = None
my.transaction_ticket = None
# autodetect protocol
if not protocol:
protocol = 'xmlrpc'
try:
import tactic
from pyasm.web import WebContainer
web = WebContainer.get_web()
if web:
server_name = web.get_http_host()
if server_name:
protocol = 'local'
except ImportError:
pass
my.protocol = protocol
# if all of the necessary parameters are set, then
if server and (ticket or login) and project:
my.set_server(server)
my.set_project(project)
if ticket:
my.set_ticket(ticket)
elif login:
# else try with no password (api_require_password)
ticket = my.get_ticket(login, password)
my.set_ticket(ticket)
elif setup:
my._setup(protocol)
# cached handoff dir
my.handoff_dir = None
'''if the function does not exist, call this and make an attempt
'''
def _call_missing_method(my, *args):
# convert from tuple to sequence
args = [x for x in args]
args.insert(0, my.ticket)
return my.server.missing_method(my.missing_method_name, args)
''' DISABLING for now
def __getattr__(my, attr):
my.missing_method_name = attr
return my._call_missing_method
'''
def test_error(my):
return my.server.test_error(my.ticket)
def get_protocol(my):
'''Function: get_protocol()
@return:
string - local or xmlrpc'''
return my.protocol
def set_protocol(my, protocol):
'''Function: get_protocol()
@params
string - local or xmlrpc'''
my.protocol = protocol
def set_ticket(my, ticket):
'''set the login ticket'''
my.set_login_ticket(ticket)
# reset the handoff_dir
my.handoff_dir = None
def set_login_ticket(my, ticket):
'''Function: set_login_ticket(ticket)
Set the login ticket with the ticket key'''
my.login_ticket = ticket
my.set_transaction_ticket(ticket)
def set_transaction_ticket(my, ticket):
if not my.project_code:
my.project_code = ''
my.ticket = {
'ticket': ticket,
'project': my.project_code,
'language': 'python'
}
"""
if my.project_code:
my.ticket = {
'ticket': ticket,
'project': my.project_code,
'language': 'python'
}
else:
raise TacticApiException("No project has been set. Please set a project using method TacticServerStub.set_project()")
"""
my.transaction_ticket = ticket
def get_transaction_ticket(my):
return my.transaction_ticket
def get_login_ticket(my):
return my.login_ticket
def get_login(my):
return my.login
def set_server(my, server_name):
'''Function: set_server(server_name)
Set the server name for this XML-RPC server'''
my.server_name = server_name
if my.protocol == "local":
from pyasm.prod.service import ApiXMLRPC
my.server = ApiXMLRPC()
my.server.set_protocol('local')
my.has_server = True
return
if (my.server_name.startswith("http://") or
my.server_name.startswith("https://")):
url = "%s/tactic/default/Api/" % my.server_name
else:
url = "http://%s/tactic/default/Api/" % my.server_name
#url = "http://localhost:8081/"
# TODO: Not implmeneted: This is needed for isolation of transactions
#if my.transaction_ticket:
# url = '%s%s' % (url, my.transaction_ticket)
my.server = xmlrpclib.Server(url, allow_none=True)
try:
pass
#print my.server.test(my.ticket)
except httplib.InvalidURL:
raise TacticApiException("You have supplied an invalid server name [%s]"
% my.server_name)
my.has_server = True
# WARNING: this is changing code in the xmlrpclib library. This
# library is not sending a proper user agent. Hacking it in
# so that at least the OS is sent
if os.name == "nt":
user_agent = 'xmlrpclib.py (Windows)'
else:
user_agent = 'xmlrpclib.py (Linux)'
xmlrpclib.Transport.user_agent = user_agent
def get_server_name(my):
return my.server_name
def get_server(my):
return my.server
def set_project(my, project_code):
'''Function: set_project(project_code)
Set the project code'''
my.project_code = project_code
if my.protocol == 'local':
from pyasm.biz import Project
Project.set_project(project_code)
#my.set_project_state(project_code)
# switch the project code on the ticket
my.set_transaction_ticket(my.transaction_ticket)
def get_project(my):
return my.project_code
def set_palette(my, palette):
my.server.set_palette(palette)
#-----------------------------------
# API FUNCTIONS
#
#
#
# Building earch type functions
#
def build_search_type(my, search_type, project_code=None):
'''API Function: build_search_type(search_type, project_code=None)
Convenience method to build a search type from its components. It is
a simple method that build the proper format for project scoped search
types. A full search type has the form:
prod/asset?project=bar.
It uniquely defines a type of sobject in a project.
@param:
search_type - the unique identifier of a search type: ie prod/asset
project_code (optional) - an optional project code. If this is not
included, the project from get_ticket() is added.
@return:
search type string
@example
[code]
search_type = "prod/asset"
full_search_type = server.build_search_type(search_type)
[/code]
'''
# do not append project for sthpw/* search_type
if search_type.startswith('sthpw/'):
return search_type
if not project_code:
project_code = my.project_code
assert project_code
return "%s?project=%s" % (search_type, project_code)
def build_search_key(my, search_type, code, project_code=None,
column='code'):
'''API Function: build_search_key(search_type, code, project_code=None, column='code')
Convenience method to build a search key from its components. A
search_key uniquely indentifies a specific sobject. This string
that is returned is heavily used as an argument in the API to
identify an sobject to operate one
A search key has the form: "prod/shot?project=bar&code=XG001"
where search_type = "prod/shot", project_code = "bar" and code = "XG001"
@param:
search_type - the unique identifier of a search type: ie prod/asset
code - the unique code of the sobject
@keyparam:
project_code - an optional project code. If this is not
included, the project from get_ticket() is added.
@return:
string - search key
@example:
[code]
search_type = "prod/asset"
code = "chr001"
search_key = server.build_search_key(search_type, code)
e.g. search_key = prod/asset?project=code=chr001
[/code]
[code]
search_type = "sthpw/login"
code = "admin"
search_key = server.build_search_key(search_type, code, column='code')
e.g. search_key = sthpw/login?code=admin
[/code]
'''
if not project_code:
if not search_type.startswith("sthpw/"):
project_code = my.project_code
assert project_code
if search_type.find('?') == -1:
if search_type.startswith('sthpw/'):
search_key = "%s?%s=%s" %(search_type, column, code)
else:
search_key = "%s?project=%s&%s=%s" % (search_type, project_code,
column, code)
else:
search_key = "%s&%s=%s" %(search_type, column, code)
return search_key
def split_search_key(my, search_key):
'''API Function: split_search_key(search_key)
Convenience method to split a search_key in into its search_type and search_code/id components. Note: only accepts the new form prod/asset?project=sample3d&code=chr001
@param:
search_key - the unique identifier of a sobject
@return:
tuple - search type, search code/id
'''
if search_key.find('&') != -1:
search_type, code = search_key.split('&')
else:
# non project-based search_key
search_type, code = search_key.split('?')
codes = code.split('=')
assert len(codes) == 2;
return search_type, codes[1]
def get_home_dir(my):
'''API Function: get_home_dir()
OS independent method to Get the home directory of the current user.
@return:
string - home directory
'''
if os.name == "nt":
dir = "%s%s" % (os.environ.get('HOMEDRIVE'),
os.environ.get('HOMEPATH'))
if os.path.exists(dir):
return dir
return os.path.expanduser('~')
def create_resource_path(my, login=None):
'''DEPRECATED: use create_resource_paths() or get_resource_path()
Create the resource path'''
# get the current user
if not login:
login = getpass.getuser()
filename = "%s.tacticrc" % login
# first check home directory
dir = my.get_home_dir()
is_dir_writeable = os.access(dir, os.W_OK) and os.path.isdir(dir)
# if the home directory is not existent or writable,
# use the temp directory
if not os.path.exists(dir) or not is_dir_writeable:
if os.name == "nt":
dir = "C:/sthpw/etc"
else:
dir = "/tmp/sthpw/etc"
if not os.path.exists(dir):
os.makedirs(dir)
else:
dir = "%s/.tactic/etc" % dir
if not os.path.exists(dir):
os.makedirs(dir)
# if an old resource path does exist, then remove it
if os.name == "nt":
old_dir = "C:/sthpw/etc"
else:
old_dir = "/tmp/sthpw/etc"
old_path = "%s/%s" % (old_dir, filename)
if os.path.exists(old_path):
os.unlink(old_path)
print "Removing deprectated resource file [%s]" % old_path
path = "%s/%s" % (dir,filename)
return path
def create_resource_paths(my, login=None):
'''Get the 1 or possiblly 2 the resource paths for creation'''
# get the current user
os_login = getpass.getuser()
if not login:
login = os_login
filename = "%s.tacticrc" % login
filename2 = "%s.tacticrc" % os_login
# first check home directory
dir = my.get_home_dir()
is_dir_writeable = os.access(dir, os.W_OK) and os.path.isdir(dir)
# if the home directory is not existent or writable,
# use the temp directory
if not os.path.exists(dir) or not is_dir_writeable:
if os.name == "nt":
dir = "C:/sthpw/etc"
else:
dir = "/tmp/sthpw/etc"
if not os.path.exists(dir):
os.makedirs(dir)
else:
dir = "%s/.tactic/etc" % dir
if not os.path.exists(dir):
os.makedirs(dir)
# if an old resource path does exist, then remove it
if os.name == "nt":
old_dir = "C:/sthpw/etc"
else:
old_dir = "/tmp/sthpw/etc"
old_path = "%s/%s" % (old_dir, filename)
if os.path.exists(old_path):
os.unlink(old_path)
print "Removing deprectated resource file [%s]" % old_path
path = "%s/%s" % (dir,filename)
path2 = "%s/%s" % (dir,filename2)
paths = [path]
if path2 != path:
paths.append(path2)
return paths
def get_resource_path(my, login=None):
'''API Function: get_resource_path(login=None)
Get the resource path of the current user. It differs from
create_resource_paths() which actually create dir. The resource path
identifies the location of the file which is used to cache connection information.
An exmple of the contents is shown below:
[code]
login=admin
server=localhost
ticket=30818057bf561429f97af59243e6ef21
project=unittest
[/code]
The contents in the resource file represent the defaults to use
when connection to the TACTIC server, but may be overriden by the
API methods: set_ticket(), set_server(), set_project() or the
environment variables: TACTIC_TICKET, TACTIC_SERVER, and TACTIC_PROJECT
Typically this method is not explicitly called by API developers and
is used automatically by the API server stub. It attempts to get from
home dir first and then from temp_dir is it fails.
@param:
login (optional) - login code. If not provided, it gets the current system user
@return:
string - resource file path
'''
# get the current user
if not login:
login = getpass.getuser()
filename = "%s.tacticrc" % login
# first check home directory
dir = my.get_home_dir()
is_dir_writeable = os.access(dir, os.W_OK) and os.path.isdir(dir)
path = "%s/.tactic/etc/%s" % (dir,filename)
# if the home directory path does not exist, check the temp directory
if not is_dir_writeable or not os.path.exists(path):
if os.name == "nt":
dir = "C:/sthpw/etc"
else:
dir = "/tmp/sthpw/etc"
else:
dir = "%s/.tactic/etc" % dir
path = "%s/%s" % (dir,filename)
return path
def get_ticket(my, login, password):
'''API Function: get_ticket(login, password)
Get an authentication ticket based on a login and password.
This function first authenticates the user and the issues a ticket.
The returned ticket is used on subsequent calls to the client api
@param:
login - the user that is used for authentications
password - the password of that user
@return:
string - ticket key
'''
return my.server.get_ticket(login, password)
def get_info_from_user(my, force=False):
'''API Function: get_info_from_user(force=False)
Get input from the user about the users environment. Questions
asked pertain to the location of the tactic server, the project worked
on and the user's login and password. This information is stored in
an .<login>.tacticrc file.
@keyparam:
force - if set to True, it will always ask for new infomation from the
command prompt again
'''
if my.protocol == "local":
return
old_server_name = my.server_name
old_project_code = my.project_code
old_ticket = my.login_ticket
old_login = my.login
default_login = getpass.getuser()
if not force and old_server_name and old_project_code:
return
print
print "TACTIC requires the following connection information:"
print
server_name = raw_input("Enter name of TACTIC server (%s): "
% old_server_name)
if not server_name:
server_name = old_server_name
print
login = raw_input("Enter user name (%s): " % default_login)
if not login:
login = default_login
print
if login == old_login and old_ticket:
password = getpass.getpass(
"Enter password (or use previous ticket): ")
else:
password = getpass.getpass("Enter password: ")
print
project_code = raw_input("Project (%s): " % old_project_code)
if not project_code:
project_code = old_project_code
my.set_server(server_name)
# do the actual work
if login != old_login or password:
ticket = my.get_ticket(login, password)
print "Got ticket [%s] for [%s]" % (ticket, login)
else:
ticket = old_ticket
# commit info to a file
paths = my.create_resource_paths(login)
# this is needed when running get_ticket.py
my.login = login
for path in paths:
file = open(path, 'w')
file.write("login=%s\n" % login)
file.write("server=%s\n" % server_name)
file.write("ticket=%s\n" % ticket)
if project_code:
file.write("project=%s\n" % project_code)
file.close()
print "Saved to [%s]" % path
# set up the server with the new information
my._setup(my.protocol)
#
# Simple Ping Test
#
def ping(my):
return my.server.ping(my.ticket)
def fast_ping(my):
return my.server.fast_ping(my.ticket)
def fast_query(my, search_type, filters=[], limit=None):
results = my.server.fast_query(my.ticket, search_type, filters, limit)
return eval(results)
def test_speed(my):
return my.server.test_speed(my.ticket)
def get_connection_info(my):
'''simple test to get connection info'''
return my.server.get_connection_info(my.ticket)
#
# Logging facilities
#
def log(my, level, message, category="default"):
'''API Function: log(level, message, category="default")
Log a message in the logging queue. It is often difficult to see output
of a trigger unless you are running the server in debug mode.
In production mode, the server sends the output to log files.
The log files are general buffered.
It cannot be predicted exactly when buffered output will be dumped to a file.
This log() method will make a request to the server.
The message will be immediately stored in the database in the debug log table.
@param:
level - critical|error|warning|info|debug - arbitrary debug level category
message - freeform string describing the entry
@keyparam:
category - a label for the type of message being logged.
It defaults to "default"
'''
return my.server.log(my.ticket, level,message, category)
def log_message(my, key, message, status="", category="default"):
'''API Function: log_message(key, message, status=None, category="default")
Log a message which will be seen by all who are subscribed to
the message "key". Messages are often JSON strings of data.
@params
key - unique key for this message
message - the message to be sent
@keyparam
status - arbitrary status for this message
category - value to categorize this message
@return
string - "OK"
'''
return my.server.log_message(my.ticket, key, message, status, category)
def subscribe(my, key, category="default"):
'''API Function: subscribe(key, category="default")
Allow a user to subscribe to this message key. All messages
belonging to the corresponding key will be available to users
subscribed to it.
@params
key - unique key for this message
@keyparam
category - value to categorize this message
@return
subscription sobject
'''
return my.server.subscribe(my.ticket, key, category)
#
# Transaction methods
#
def set_state(my, name, value):
'''Set a state for this transaction
@params
name: name of state variable
value: value of state variable
'''
return my.server.set_state(my.ticket, name, value)
def set_project_state(my, project):
'''Covenience function to set the project state
@params
project: code of the project to set the state to
'''
return my.set_state("project", project)
def generate_ticket(my):
'''API Function: generate_ticket()
Ask the server to generate a ticket explicity used for your own commands
@return - a string representing the transaction ticket
'''
return my.server.generate_ticket(my.ticket)
def start(my, title='', description='', transaction_ticket=''):
'''API Function: start(title, description='')
Start a transaction. All commands using the client API are bound
in a transaction. The combination of start(), finish() and abort()
makes it possible to group a series of API commands in a single
transaction. The start/finish commands are not necessary for
query operations (like query(...), get_snapshot(...), etc).
@keyparam:
title - the title of the command to be executed. This will show up on
transaction log
description - the description of the command. This is more detailed.
transaction_ticket - optionally, one can provide the transaction ticket sequence
@example:
A full transaction inserting 10 shots. If an error occurs, all 10
inserts will be aborted.
[code]
server.start('Start adding shots')
try:
for i in range(0,10):
server.insert("prod/shot", { 'code': 'XG%0.3d'%i } )
except:
server.abort()
else:
server.finish("10 shots added")
[/code]
'''
my.get_info_from_user()
if not my.has_server:
raise TacticApiException("No server connected. If running a command line script, please execute get_ticket.py")
ticket = my.server.start(my.login_ticket, my.project_code, \
title, description, transaction_ticket)
my.set_transaction_ticket(ticket)
#client_version = my.get_client_version()
#server_version = my.get_server_version()
# Switch to using api versions
client_api_version = my.get_client_api_version()
server_api_version = my.get_server_api_version()
if client_api_version != server_api_version:
raise TacticApiException("Server version [%s] does not match client api version [%s]"
% (server_api_version, client_api_version))
my.set_server(my.server_name)
# clear the handoff dir
my.handoff_dir = None
return ticket
def finish(my, description=''):
'''API Function: finish()
End the current transaction and cleans it up
@params:
description: this will be recorded in the transaction log as the
description of the transction
@example:
A full transaction inserting 10 shots. If an error occurs, all 10
inserts will be aborted.
[code]
server.start('Start adding shots')
try:
for i in range(0,10):
server.insert("prod/shot", { 'code': 'XG%0.3d'%i } )
except:
server.abort()
else:
server.finish("10 shots added")
[/code]
'''
if my.protocol == "local":
return
result = my.server.finish(my.ticket, description)
my.set_login_ticket(my.login_ticket)
#my.ticket = None
#my.transaction_ticket = None
return result
def abort(my, ignore_files=False):
'''API Function: abort(ignore_files=False)
Abort the transaction. This undos all commands that occurred
from the beginning of the transactions
@keyparam:
ignore_files: (boolean) - determines if any files moved into the
repository are left as is. This is useful for very long processes
where it is desireable to keep the files in the repository
even on abort.
@example:
A full transaction inserting 10 shots. If an error occurs, all 10
inserts will be aborted.
[code]
server.start('Start adding shots')
try:
for i in range(0,10):
server.insert("prod/shot", { 'code': 'XG%0.3d'%i } )
except:
server.abort()
else:
server.finish("10 shots added")
[/code]
'''
if my.protocol == "local":
return
result = my.server.abort(my.ticket, ignore_files)
my.ticket = None
my.transaction_ticket = None
return result
# FIXME: have to fix these because these are post transaction!!
def undo(my, transaction_ticket=None, transaction_id=None,
ignore_files=False):
'''API Function: undo(transaction_ticket=None, transaction_id=None, ignore_files=False)
undo an operation. If no transaction id is given, then the last
operation of this user on this project is undone
@keyparam:
transaction_ticket - explicitly undo a specific transaction
transaction_id - explicitly undo a specific transaction by id
ignore_files - flag which determines whether the files should
also be undone. Useful for large preallcoated checkins.
'''
if my.protocol == "local":
return
return my.server.undo(my.ticket, transaction_ticket, transaction_id,
ignore_files)
def redo(my, transaction_ticket=None, transaction_id=None):
'''API Function: redo(transaction_ticket=None, transaction_id=None)
Redo an operation. If no transaction id is given, then the last
undone operation of this user on this project is redone
@keyparam:
transaction_ticket - explicitly redo a specific transaction
transaction_id - explicitly redo a specific transaction by id
'''
if my.protocol == "local":
return
return my.server.redo(my.ticket, transaction_ticket, transaction_id)
#
# Low Level Database methods
#
def get_column_info(my, search_type):
'''API Function: get_column_info(search_type)
Get column information of the table given a search type
@param:
search_type - the key identifying a type of sobject as registered in
the search_type table.
@return - a dictionary of info for each column
'''
results = my.server.get_column_info(my.ticket, search_type)
return results
def get_table_info(my, search_type):
'''API Function: get_table_info(search_type)
Get column information of the table given a search type
@param:
search_type - the key identifying a type of sobject as registered in
the search_type table.
@return - a dictionary of info for each column
'''
results = my.server.get_table_info(my.ticket, search_type)
return results
def get_related_types(my, search_type):
'''API Function: get_related_types(search_type)
Get related search types given a search type
@param:
search_type - the key identifying a type of sobject as registered in
the search_type table.
@return - list of search_types
'''
results = my.server.get_related_types(my.ticket, search_type)
return results
def query(my, search_type, filters=[], columns=[], order_bys=[],
show_retired=False, limit=None, offset=None, single=False,
distinct=None, return_sobjects=False):
'''API Function: query(search_type, filters=[], columns=[], order_bys=[], show_retired=False, limit=None, offset=None, single=False, distinct=None, return_sobjects=False)
General query for sobject information
@param:
search_type - the key identifying a type of sobject as registered in
the search_type table.
@keyparam:
filters - an array of filters to alter the search
columns - an array of columns whose values should be
retrieved
order_bys - an array of order_by to alter the search
show_retired - sets whether retired sobjects are also
returned
limit - sets the maximum number of results returned
single - returns only a single object
distinct - specify a distinct column
return_sobjects - return sobjects instead of dictionary. This
works only when using the API on the server.
@return:
list of dictionary/sobjects - Each array item represents an sobject
and is a dictionary of name/value pairs
@example:
[code]
filters = []
filters.append( ("code", "XG002") )
order_bys = ['timestamp desc']
columns = ['code']
server.query(ticket, "prod/shot", filters, columns, order_bys)
[/code]
The arguments "filters", "columns", and "order_bys" are optional
The "filters" argument is a list. Each list item represents an
individual filter. The forms are as follows:
[code]
(column, value) -> where column = value
(column, (value1,value2)) -> where column in (value1, value2)
(column, op, value) -> where column op value
where op is ('like', '<=', '>=', '>', '<', 'is', '~', '!~','~*','!~*)
(value) -> where value
[/code]
'''
#return my.server.query(my.ticket, search_type, filters, columns, order_bys, show_retired, limit, offset, single, return_sobjects)
results = my.server.query(my.ticket, search_type, filters, columns,
order_bys, show_retired, limit, offset,
single, distinct, return_sobjects)
if not return_sobjects and isinstance(results, basestring):
results = eval(results)
return results
def insert(my, search_type, data, metadata={}, parent_key=None, info={},
use_id=False, triggers=True):
'''API Function: insert(search_type, data, metadata={}, parent_key=None, info={}, use_id=False, triggers=True)
General insert for creating a new sobject
@param:
search_type - the search_type attribute of the sType
data - a dictionary of name/value pairs which will be used to update
the sobject defined by the search_key.
parent_key - set the parent key for this sobject
@keyparam:
metadata - a dictionary of values that will be stored in the metadata attribute
if available
info - a dictionary of info to pass to the ApiClientCmd
use_id - use id in the returned search key
triggers - boolean to fire trigger on insert
@return:
dictionary - represent the sobject with it's current data
@example:
insert a new asset
[code]
search_type = "prod/asset"
data = {
'code': chr001,
'description': 'Main Character'
}
server.insert( search_type, data )
[/code]
insert a new note with a shot parent
[code]
# get shot key
shot_key = server.build_search_key(search_type='prod/shot',code='XG001')
data = {
'context': 'model',
'note': 'This is a modelling note',
'login': server.get_login()
}
server.insert( search_type, data, parent_key=shot_key)
[/code]
insert a note without firing triggers
[code]
search_type = "sthpw/note"
data = {
'process': 'roto',
'context': 'roto',
'note': 'The keys look good.',
'project_code': 'art'
}
server.insert( search_type, data, triggers=False )
[/code]
'''
return my.server.insert(my.ticket, search_type, data, metadata,
parent_key, info, use_id, triggers)
def update(my, search_key, data={}, metadata={}, parent_key=None, info={},
use_id=False, triggers=True):
'''API Function: update(search_key, data={}, metadata={}, parent_key=None, info={}, use_id=False, triggers=True)
General update for updating sobject
@param:
search_key - a unique identifier key representing an sobject.
Note: this can also be an array, in which case, the data will
be updated to each sobject represented by this search key
@keyparam:
data - a dictionary of name/value pairs which will be used to update
the sobject defined by the search_key
Note: this can also be an array. Each data dictionary element in
the array will be applied to the corresponding search key
parent_key - set the parent key for this sobject
info - a dictionary of info to pass to the ApiClientCmd
metadata - a dictionary of values that will be stored in the metadata attribute if available
use_id - use id in the returned search key
triggers - boolean to fire trigger on update
@return:
dictionary - represent the sobject with its current data.
If search_key is an array, This will be an array of dictionaries
'''
return my.server.update(my.ticket, search_key, data, metadata,
parent_key, info, use_id, triggers)
def update_multiple(my, data, triggers=True):
'''API Function: update_multiple(data, triggers=True)
Update for several sobjects with different data in one function call. The
data structure contains all the information needed to update and is
formated as follows:
data = {
search_key1: { column1: value1, column2: value2 }
search_key2: { column1: value1, column2: value2 }
}
@params:
data - data structure containing update information for all
sobjects
@keyparam:
data - a dictionary of name/value pairs which will be used to update
the sobject defined by the search_key
Note: this can also be an array. Each data dictionary element in
the array will be applied to the corresponding search key
triggers - boolean to fire trigger on insert
@return:
None
'''
return my.server.update_multiple(my.ticket, data, triggers)
def insert_multiple(my, search_type, data, metadata=[], parent_key=None,
use_id=False, triggers=True):
'''API Function: insert_multiple(data, metadata=[], parent_key=None, use_id=False, triggers=True)
Insert for several sobjects in one function call. The
data structure contains all the infon needed to update and is
formated as follows:
data = [
{ column1: value1, column2: value2, column3: value3 },
{ column1: value1, column2: value2, column3: value3 }
}
metadata = [
{ color: blue, height: 180 },
{ color: orange, height: 170 }
]
@params:
search_type - the search_type attribute of the sType
data - a dictionary of name/value pairs which will be used to update
the sobject defined by the search_key
Note: this can also be an array. Each data dictionary element in
the array will be applied to the corresponding search key
@keyparam:
parent_key - set the parent key for this sobject
use_id - boolean to control if id is used in the search_key in returning sobject dict
triggers - boolean to fire trigger on insert
@return:
a list of all the inserted sobjects
'''
return my.server.insert_multiple(my.ticket, search_type, data, metadata,
parent_key, use_id, triggers)
def insert_update(my, search_key, data, metadata={}, parent_key=None,
info={}, use_id=False, triggers=True):
'''API Function: insert_update(search_key, data, metadata={}, parent_key=None, info={}, use_id=False, triggers=True)
Insert if the entry does not exist, update otherwise
@param:
search_key - a unique identifier key representing an sobject.
data - a dictionary of name/value pairs which will be used to update
the sobject defined by the search_key
@keyparam:
metadata - a dictionary of values that will be stored in the metadata attribute if available
parent_key - set the parent key for this sobject
info - a dictionary of info to pass to the ApiClientCmd
use_id - use id in the returned search key
triggers - boolean to fire trigger on insert
@return:
dictionary - represent the sobject with its current data.
'''
return my.server.insert_update(my.ticket, search_key, data, metadata,
parent_key, info, use_id, triggers)
def get_unique_sobject(my, search_type, data={}):
'''API Function: get_unique_sobject(search_type, data={})
This is a special convenience function which will query for an
sobject and if it doesn't exist, create it. It assumes that this
object should exist and spares the developer the logic of having to
query for the sobject, test if it doesn't exist and then create it.
@param:
search_type - the type of the sobject
data - a dictionary of name/value pairs that uniquely identify this
sobject
@return:
sobject - unique sobject matching the critieria in data
'''
results = my.server.get_unique_sobject(my.ticket, search_type, data)
return results
def get_column_names(my, search_type):
'''API Function: get_column_names(search_type)
This method will get all of the column names associated with a search
type
@param:
search_type - the search type used to query the columns for
@return
list of columns names
'''
return my.server.get_column_names(my.ticket, search_type)
#
# Expression methods
#
def eval(my, expression, search_keys=[], mode=None, single=False, vars={},
show_retired=False):
'''API Function: eval(expression, search_keys=[], mode=None, single=False, vars={}, show_retired=False)
Evaluate the expression. This expression uses the TACTIC expression
language to retrieve results. For more information, refer to the
expression language documentation.
@param:
expression - string expression
@keyparam:
search_keys - the starting point for the expression.
mode - string|expression - determines the starting mode of the expression
single - True|False - True value forces a single return value
vars - user defined variable
show_retired - defaults to False to not return retired items
@return:
results of the expression. The results depend on the exact nature
of the expression.
@example:
#1. Search for snapshots with context beginning with 'model' for the asset with the search key 'prod/asset?project=sample3d&id=96'
[code]
server = TacticServerStub.get()
exp = "@SOBJECT(sthpw/snapshot['context','EQ','^model'])"
result = server.eval(exp, search_keys=['prod/asset?project=sample3d&id=96'])
[/code]
Please refer to the expression language documentation for numerous
examples on how to use the expression language.
'''
#return my.server.eval(my.ticket, expression, search_keys, mode, single, vars)
results = my.server.eval(my.ticket, expression, search_keys, mode,
single, vars, show_retired)
try:
return eval(results)
except:
return results
#
# Higher Level Object methods
#
def create_search_type(my, search_type, title, description="",
has_pipeline=False):
'''API Function: create_search_type(search_type, title, description="", has_pipeline=False)
Create a new search type
@param:
search_type - Newly defined search_type
title - readable title to display this search type as
@keyparam:
description - a brief description of this search type
has_pipeline - determines whether this search type goes through a
pipeline. Simply puts a pipeline_code column in the table.
@return
string - the newly created search type
'''
return my.server.create_search_type(my.ticket, search_type, title,
description, has_pipeline)
def add_column_to_search_type(my, search_type, column_name, column_type):
'''Adds a new column to the search type
@params
search_type - the search type that the new column will be added to
column_name - the name of the column to add to the database
column_type - the type of the column to add to the database
@return
True if column was created, False if column exists
'''
return my.server.add_column_to_search_type(my.ticket, search_type,
column_name, column_type)
def get_by_search_key(my, search_key):
'''API Function: get_by_search_key(search_key)
Get the info on an sobject based on search key
@param:
search_key - the key identifying a type of sobject as registered in
the search_type table.
@return:
list of dictionary - sobjects that represent values of the sobject in the
form of name:value pairs
'''
return my.server.get_by_search_key(my.ticket, search_key)
def get_by_code(my, search_type, code):
'''API Function: get_by_code(search_type, search_code)
Get the info on an sobject based on search code
@param:
search_type - the search_type of the sobject to search for
code - the code of the sobject to search for
@return:
sobject - a dictionary that represents values of the sobject in the
form name/value pairs
'''
return my.server.get_by_code(my.ticket, search_type, code)
def delete_sobject(my, search_key, include_dependencies=False):
'''API Function: delete_sobject(search_key, include_dependencies=False)
Invoke the delete method. Note: this function may fail due
to dependencies. Tactic will not cascade delete. This function
should be used with extreme caution because, if successful, it will
permanently remove the existence of an sobject
@param:
search_key - a unique identifier key representing an sobject.
Note: this can also be an array.
@keyparam:
include_dependencies - True/False
@return:
dictionary - a sobject that represents values of the sobject in the
form name:value pairs
'''
return my.server.delete_sobject(my.ticket, search_key,
include_dependencies)
def retire_sobject(my, search_key):
'''API Function: retire_sobject(search_key)
Invoke the retire method. This is preferred over delete_sobject if
you are not sure whether other sobjects has dependency on this.
@param:
search_key - the unige key identifying the sobject.
@return:
dictionary - sobject that represents values of the sobject in the
form name:value pairs
'''
return my.server.retire_sobject(my.ticket, search_key)
def reactivate_sobject(my, search_key):
'''API Function: reactivate_sobject(search_key)
Invoke the reactivate method.
@param:
search_key - the unige key identifying the sobject.
@return:
dictionary - sobject that represents values of the sobject in the
form name:value pairs
'''
return my.server.reactivate_sobject(my.ticket, search_key)
def set_widget_setting(my, key, value):
'''API Function: set_widget_settings(key, value)
Set widget setting for current user and project
@param
key - unique key to identify this setting
value - value the setting should be set to
@return
None
'''
my.set_widget_setting(my.ticket, key, value)
def get_widget_setting(my, key):
'''API Function: set_widget_settings(key, value)
Get widget setting for current user and project
@param
key - unique key to identify this setting
@return
value of setting
'''
return my.get_widget_setting(my.ticket, key)
#
# sType Hierarchy methods
#
def get_parent(my, search_key, columns=[], show_retired=False):
'''API Function: get_parent(search_key, columns=[], show_retired=True)
Get the parent of an sobject.
@param:
search_key - a unique identifier key representing an sobject
@keyparam:
columns - the columns that will be returned in the sobject
show_retired - it defaults to False so it does not show retired parent if that's the case
@return:
dictionary - the parent sobject
'''
return my.server.get_parent(my.ticket, search_key, columns,
show_retired)
def get_all_children(my, search_key, child_type, filters=[], columns=[]):
'''API Function: get_all_children(search_key, child_type, filters=[], columns=[])
Get all children of a particular child type of an sobject
@param:
search_key - a unique identifier key representing an sobject
child_type - the search_type of the children to search for
@keyparam:
filters - extra filters on the query : see query method for examples
columns - list of column names to be included in the returned dictionary
@return:
list of dictionary - a list of sobjects dictionaries
'''
#filters = []
return my.server.get_all_children(my.ticket, search_key, child_type,
filters, columns)
def get_parent_type(my, search_key):
'''API Function: get_parent_type(search_key)
Get of the parent search type
@param:
search_key - a unique identifier key representing an sobject
@return:
list - a list of child search_types
'''
return my.server.get_parent_type(my.ticket, search_key)
def get_child_types(my, search_key):
'''API Function: get_child_types(search_key)
Get all the child search types
@param:
search_key - a unique identifier key representing an sobject
@return:
list - the child search types
'''
return my.server.get_child_types(my.ticket, search_key)
def get_types_from_instance(my, instance_type):
'''API Function: get_types_from_instance(instance_type)
Get the connector types from an instance type
@param:
instance_type - the search type of the instance
@return:
tuple - (from_type, parent_type)
a tuple with the from_type and the parent_type. The from_type is
the connector type and the parent type is the search type of the
parent of the instance
'''
return my.server.get_types_from_instance(my.ticket, instance_type)
def connect_sobjects(my, src_sobject, dst_sobject, context='default'):
'''API Function: connect_sobjects(src_sobject, dst_sobject, context='default')
Connect two sobjects together
@param:
src_sobject - the original sobject from which the connection starts
dst_sobject - the sobject to which the connection connects to
@keyparam:
context - an arbirarty parameter which defines type of connection
@return:
dictionary - the last connection sobject created
'''
return my.server.connect_sobjects(my.ticket, src_sobject, dst_sobject,
context)
def get_connected_sobjects(my, src_sobject, context='default'):
'''API Function: get_connected_sobjects(src_sobject, context='default')
Get all of the connected sobjects
@param:
src_sobject - the original sobject from which the connection starts
@keyparam:
context - an arbitrary parameter which defines type of connection
@return:
list - a list of connected sobjects
'''
return my.server.get_connected_sobjects(my.ticket, src_sobject, context)
def get_connected_sobject(my, src_sobject, context='default'):
'''API Function: get_connected_sobject(src_sobject, context='default')
Get the connected sobject
@params
src_sobject - the original sobject from which the connection starts
@keyparam:
context - an arbirarty parameter which defines type of connection
@return:
dict - a single connected sobject
'''
return my.server.get_connected_sobject(my.ticket, src_sobject, context)
#
# upload/download methods
#
def download(my, url, to_dir=".", filename='', md5_checksum=""):
'''API Function: download(my, url, to_dir=".", filename='', md5_checksum="")
Download a file from a given url
@param:
url - the url source location of the file
@keyparam:
to_dir - the directory to download to
filename - the filename to download to, defaults to original filename
md5_checksum - an md5 checksum to match the file against
@return:
string - path of the file donwloaded
'''
# use url filename by default
if not filename:
filename = os.path.basename(url)
# download to temp_dir
#if not to_dir:
# to_dir = my.get_tmpdir()
# make sure the directory exists
if not os.path.exists(to_dir):
os.makedirs(to_dir)
to_path = "%s/%s" % (to_dir, filename)
# check if this file is already downloaded. if so, skip
if os.path.exists(to_path):
# if it exists, check the MD5 checksum
if md5_checksum:
if my._md5_check(to_path, md5_checksum):
print "skipping '%s', already exists" % to_path
return to_path
else:
# always download if no md5_checksum available
pass
f = urllib.urlopen(url)
file = open(to_path, "wb")
file.write( f.read() )
file.close()
f.close()
# check for downloaded file
# COMMENTED OUT for now since it does not work well with icons
#if md5_checksum and not my._md5_check(to_path, md5_checksum):
# raise TacticException('Downloaded file [%s] in local repo failed md5 check. This file may be missing on the server or corrupted.'%to_path)
"""
print "starting download"
try:
import urllib2
file = open(to_path, "wb")
req = urllib2.urlopen(url)
try:
while True:
buffer = req.read(1024*100)
print "read: ", len(buffer)
if not buffer:
break
file.write( buffer )
finally:
print "closing ...."
req.close()
file.close()
except urllib2.URLError, e:
raise Exception('%s - %s' % (e,url))
print "... done download"
"""
return to_path
def upload_file(my, path, base_dir=None):
'''API Function: upload_file(path)
Use http protocol to upload a file through http
@param:
path - the name of the file that will be uploaded
'''
from common import UploadMultipart
upload = UploadMultipart()
upload.set_ticket(my.transaction_ticket)
if my.server_name.startswith("http://") or my.server_name.startswith("https://"):
upload_server_url = "%s/tactic/default/UploadServer/" % my.server_name
else:
upload_server_url = "http://%s/tactic/default/UploadServer/" % my.server_name
if base_dir:
basename = os.path.basename(path)
dirname = os.path.dirname(path)
if not path.startswith(dirname):
raise TacticApiException("Path [%s] does not start with base_dir [%s]" % (path, base_dir))
base_dir = base_dir.rstrip("/")
sub_dir = dirname.replace("%s/" % base_dir, "")
if sub_dir:
upload.set_subdir(sub_dir)
upload.set_upload_server(upload_server_url)
#upload.set_subdir("blah")
upload.execute(path)
# upload a file
#filename = os.path.basename(path)
#file = open(path, 'rb')
#data = xmlrpclib.Binary( file.read() )
#file.close()
#return my.server.upload_file(my.transaction_ticket, filename, data)
def upload_group(my, path, file_range):
'''uses http protocol to upload a sequences of files through HTTP
@params
path - the name of the file that will be uploaded
file_range - string describing range of frames in the form '1-5/1'
'''
start, end = file_range.split("-")
start = int(start)
end = int(end)
if path.find("####") != -1:
path = path.replace("####", "%0.4d")
# TODO: add full range functionality here
for frame in range(start, end+1):
full_path = path % frame
my.upload_file(full_path)
# file group functions
def _get_file_range(my, file_range):
'''get the file_range'''
frame_by = 1
if file_range.find("/") != -1:
file_range, frame_by = file_range.split("/")
frame_by = int(frame_by)
frame_start, frame_end = file_range.split("-")
frame_start = int(frame_start)
frame_end = int(frame_end)
return frame_start, frame_end, frame_by
def _expand_paths(my, file_path, file_range):
'''expands the file paths, replacing # as specified in the file_range
@param - file_path with #### or %0.4d notation
@file_range - a tuple'''
file_paths = []
frame_start, frame_end, frame_by = my._get_file_range(file_range)
# support %0.4d notation
if file_path.find('#') == -1:
for i in range(frame_start, frame_end+1, frame_by):
expanded = file_path % i
file_paths.append( expanded )
else:
# find out the number of #'s in the path
padding = len( file_path[file_path.index('#'):file_path.rindex('#')
])+1
for i in range(frame_start, frame_end+1, frame_by):
expanded = file_path.replace( '#'*padding, str(i).zfill(padding) )
file_paths.append(expanded)
return file_paths
#
# Checkin/out methods
#
def create_snapshot(my, search_key, context, snapshot_type="file",
description="No description", is_current=True, level_key=None,
is_revision=False, triggers=True):
'''API Function: create_snapshot(search_key, context, snapshot_type="file", description="No description", is_current=True, level_key=None, is_revision=False, triggers=True )
Create an empty snapshot
@param:
search_key - a unique identifier key representing an sobject
context - the context of the checkin
@keyparam:
snapshot_type - [optional] descibes what kind of a snapshot this is.
More information about a snapshot type can be found in the
prod/snapshot_type sobject
description - [optional] optional description for this checkin
is_current - flag to determine if this checkin is to be set as current
is_revision - flag to set this as a revision instead of a version
level_key - the unique identifier of the level that this
is to be checked into
triggers - boolean to fire triggers on insert
@return:
dictionary - representation of the snapshot created for this checkin
'''
return my.server.create_snapshot(my.ticket, search_key, context,
snapshot_type, description, is_current,
level_key, is_revision, triggers)
def simple_checkin(my, search_key, context, file_path,
snapshot_type="file", description="No description",
use_handoff_dir=False, file_type="main", is_current=True,
level_key=None, breadcrumb=False, metadata={}, mode='upload',
is_revision=False, info={} ,
keep_file_name=False, create_icon=True,
checkin_cls='pyasm.checkin.FileCheckin',
context_index_padding=None,
checkin_type="", source_path=None,
version=None
):
'''API Function: simple_checkin( search_key, context, file_path, snapshot_type="file", description="No description", use_handoff_dir=False, file_type="main", is_current=True, level_key=None, breadcrumb=False, metadata={}, mode=None, is_revision=False, info={}, keep_file_name=False, create_icon=True, checkin_cls='pyasm.checkin.FileCheckin', context_index_padding=None, checkin_type="strict", source_path=None, version=None )
Simple method that checks in a file.
@param:
search_key - a unique identifier key representing an sobject
context - the context of the checkin
file_path - path of the file that was previously uploaded
@keyparam:
snapshot_type - [optional] descibes what kind of a snapshot this is.
More information about a snapshot type can be found in the
prod/snapshot_type sobject
description - [optional] optional description for this checkin
file_type - [optional] optional description for this file_type
is_current - flag to determine if this checkin is to be set as current
level_key - the unique identifier of the level that this
is to be checked into
breadcrumb - flag to leave a .snapshot breadcrumb file containing
information about what happened to a checked in file
metadata - a dictionary of values that will be stored as metadata
on the snapshot
mode - inplace, upload, copy, move
is_revision - flag to set this as a revision instead of a version
create_icon - flag to create an icon on checkin
info - dict of info to pass to the ApiClientCmd
keep_file_name - keep the original file name
checkin_cls - checkin class
context_index_padding - determines the padding used for context
indexing: ie: design/0001
checkin_type - auto or strict which controls whether to auto create versionless
source_path - explicitly give the source path
version - force a version for this check-in
@return:
dictionary - representation of the snapshot created for this checkin
'''
mode_options = ['upload', 'uploaded', 'copy', 'move', 'local','inplace']
if mode:
if mode not in mode_options:
raise TacticApiException('Mode must be in %s' % mode_options)
if mode == 'upload':
my.upload_file(file_path)
elif mode == 'uploaded':
# remap file path: this mode is only used locally.
from pyasm.common import Environment
upload_dir = Environment.get_upload_dir()
file_path = "%s/%s" % (upload_dir, file_path)
elif mode in ['copy', 'move']:
handoff_dir = my.get_handoff_dir()
use_handoff_dir = True
# make sure that handoff dir is empty
try:
shutil.rmtree(handoff_dir)
os.makedirs(handoff_dir)
os.chmod(handoff_dir, 0777)
except OSError, e:
sys.stderr.write("WARNING: could not cleanup handoff directory [%s]: %s"
% (handoff_dir, e.__str__()))
# copy or move the tree
basename = os.path.basename(file_path)
if mode == 'move':
shutil.move(file_path, "%s/%s" % (handoff_dir, basename))
mode = 'create'
elif mode == 'copy':
shutil.copy(file_path, "%s/%s" % (handoff_dir, basename))
# it moves to repo from handoff dir later
mode = 'create'
elif mode in ['local']:
# do nothing
pass
# check in the file to the server
snapshot = my.server.simple_checkin(my.ticket, search_key, context,
file_path, snapshot_type,
description, use_handoff_dir,
file_type, is_current, level_key,
metadata, mode, is_revision, info,
keep_file_name, create_icon,
checkin_cls, context_index_padding,
checkin_type, source_path, version)
if mode == 'local':
# get the naming conventions and move the file to the local repo
files = my.server.eval(my.ticket, "@SOBJECT(sthpw/file)", snapshot)
# FIXME: this only works on the python implementation .. should
# use JSON
files = eval(files)
# TODO: maybe cache this??
base_dirs = my.server.get_base_dirs(my.ticket)
if os.name == 'nt':
client_repo_dir = base_dirs.get("win32_local_repo_dir")
else:
client_repo_dir = base_dirs.get("linux_local_repo_dir")
if not client_repo_dir:
raise TacticApiException('No local_repo_dir defined in server config file')
for file in files:
rel_path = "%s/%s" %( file.get('relative_dir'),
file.get('file_name'))
repo_path = "%s/%s" % (client_repo_dir, rel_path)
repo_dir = os.path.dirname(repo_path)
if not os.path.exists(repo_dir):
os.makedirs(repo_dir)
basename = os.path.basename(repo_path)
dirname = os.path.dirname(repo_path)
temp_repo_path = "%s/.%s.temp" % (dirname, basename)
shutil.copy(file_path, temp_repo_path)
shutil.move(temp_repo_path, repo_path)
# leave a breadcrumb
if breadcrumb:
snapshot_code = snapshot.get('code')
full_snapshot_xml = my.get_full_snapshot_xml(snapshot_code)
snapshot_path = "%s.snapshot" % file_path
file = open(snapshot_path, 'wb')
file.write(full_snapshot_xml)
file.close()
return snapshot
def group_checkin(my, search_key, context, file_path, file_range,
snapshot_type="sequence", description="",
file_type='main', metadata={}, mode=None,
is_revision=False , info={} ):
'''API Function: group_checkin(search_key, context, file_path, file_range, snapshot_type="sequence", description="", file_type='main', metadata={}, mode=None, is_revision=False, info={} )
Check in a range of files. A range of file is defined as any group
of files that have some sequence of numbers grouping them together.
An example of this includes a range frames that are rendered.
Although it is possible to add each frame in a range using add_file,
adding them as as sequence is lightweight, often significantly reducing
the number of database entries required. Also, it is understood that
test files form a range of related files, so that other optimizations
and manipulations can be operated on these files accordingly.
@param:
search_key - a unique identifier key representing an sobject
file_path - expression for file range: ./blah.####.jpg
file_type - the typ of file this is checked in as. Default = 'main'
file_range - string describing range of frames in the form '1-5/1'
@keyparam:
snapshot_type - type of snapshot this checkin will have
description - description related to this checkin
file_type - the type of file that will be associated with this group
metadata - add metadata to snapshot
mode - determines whether the files passed in should be copied, moved
or uploaded. By default, this is a manual process (for backwards
compatibility)
is_revision - flag to set this as a revision instead of a version
info - dict of info to pass to the ApiClientCmd
@return:
dictionary - snapshot
'''
mode_options = ['upload', 'copy', 'move', 'inplace']
if mode:
if mode not in mode_options:
raise TacticApiException('Mode must be in %s' % mode_options)
# brute force method
if mode == 'move':
handoff_dir = my.get_handoff_dir()
expanded_paths = my._expand_paths(file_path, file_range)
for path in expanded_paths:
basename = os.path.basename(path)
shutil.move(path, '%s/%s' %(handoff_dir, basename))
use_handoff_dir = True
mode = 'create'
elif mode == 'copy':
handoff_dir = my.get_handoff_dir()
expanded_paths = my._expand_paths(file_path, file_range)
for path in expanded_paths:
basename = os.path.basename(path)
shutil.copy(path, '%s/%s' %(handoff_dir, basename))
use_handoff_dir = True
# it moves to repo from handoff dir later
mode = 'create'
elif mode == 'upload':
expanded_paths = my._expand_paths(file_path, file_range)
for path in expanded_paths:
my.upload_file(path)
use_handoff_dir = False
elif mode == 'inplace':
use_handoff_dir = False
# get the absolute path
file_path = os.path.abspath(file_path)
return my.server.group_checkin(my.ticket, search_key, context,
file_path, file_range,
snapshot_type, description, file_type,
metadata, mode, is_revision, info )
def directory_checkin(my, search_key, context, dir,
snapshot_type="directory",
description="No description", file_type='main',
is_current=True, level_key=None, metadata={},
mode="copy", is_revision=False,
checkin_type='strict'):
'''API Function: directory_checkin(search_key, context, dir, snapshot_type="directory", description="No description", file_type='main', is_current=True, level_key=None, metadata={}, mode="copy", is_revision=False, checkin_type="strict")
Check in a directory of files. This informs TACTIC to treat the
entire directory as single entity without regard to the structure
of the contents. TACTIC will not know about the individual files
and the directory hierarchy within the base directory and it it left
up to the and external program to intepret and understand this.
This is often used when logic on the exact file structure exists in
some external source outside of TACTIC and it is deemed too complicated
to map this into TACTIC's snapshot definition.
@param:
search_key - a unique identifier key representing an sobject
dir - the directory that needs to be checked in
@keyparam:
snapshot_type - type of snapshot this checkin will have
description - description related to this checkin
file_type - the type of file that will be associated with this group
is_current - makes this snapshot current
level_key - the search key of the level if used
metadata - add metadata to snapshot
mode - determines whether the files passed in should be copied, moved
or uploaded. By default, this is 'copy'
is_revision - flag to set this as a revision instead of a version
checkin_type - auto or strict which controls whether to auto create versionless
@return:
dictionary - snapshot
'''
if mode not in ['copy', 'move', 'inplace', 'local']:
raise TacticApiException('mode must be either [move] or [copy]')
handoff_dir = my.get_handoff_dir()
# make sure that handoff dir is empty
try:
shutil.rmtree(handoff_dir)
os.makedirs(handoff_dir)
os.chmod(handoff_dir, 0777)
except OSError, e:
sys.stderr.write("WARNING: could not cleanup handoff directory [%s]: %s"
% (handoff_dir, e.__str__()))
# strip the trailing / or \ if any
m = re.match(r'(.*)([/|\\]$)', dir)
if m:
dir = m.groups()[0]
# copy or move the tree to the handoff directory
basename = os.path.basename(dir)
if mode == 'move':
shutil.move(dir, "%s/%s" % (handoff_dir, basename))
mode = 'create'
elif mode == 'copy':
shutil.copytree(dir, "%s/%s" % (handoff_dir, basename))
# it moves to repo from handoff dir later
mode = 'create'
use_handoff_dir = True
# some default data
info = {}
keep_file_name = False
create_icon = False
checkin_cls = "pyasm.checkin.FileCheckin"
context_index_padding = None
source_path = None
version = None
snapshot = my.server.simple_checkin(my.ticket, search_key, context, dir,
snapshot_type, description,
use_handoff_dir, file_type,
is_current, level_key, metadata,
mode, is_revision, info,
keep_file_name, create_icon,
checkin_cls, context_index_padding,
checkin_type, source_path, version)
if mode == 'local':
# get the naming conventions and move the file to the local repo
files = my.server.eval(my.ticket, "@SOBJECT(sthpw/file)", snapshot)
# FIXME: this only works on the python implementation
files = eval(files)
for file in files:
rel_path = "%s/%s" %( file.get('relative_dir'),
file.get('file_name'))
base_dirs = my.server.get_base_dirs(my.ticket)
if os.name == 'nt':
client_repo_dir = base_dirs.get("win32_local_base_dir")
else:
client_repo_dir = base_dirs.get("linux_local_base_dir")
repo_path = "%s/%s" % (client_repo_dir, rel_path)
repo_dir = os.path.dirname(repo_path)
if not os.path.exists(repo_dir):
os.makedirs(repo_dir)
shutil.copytree(dir,repo_path)
return snapshot
def add_dependency(my, snapshot_code, file_path, type='ref', tag='main'):
'''API Function: add_dependency(snapshot_code, file_path, type='ref')
Append a dependency referent to an existing check-in.
All files are uniquely containe by a particular snapshot. Presently,
this method does a reverse lookup by file name. This assumes that
the filename is unique within the system, so it is not recommended
unless it is known that naming conventions will produce unique
file names for every this particular file. If this is not the
case, it is recommended that add_dependency_by_code() is used.
@param:
snapshot_code - the unique code identifier of a snapshot
file_path - the path of the dependent file. This function is able
reverse map the file_path to the appropriate snapshot
@keyparam:
type - type of dependency. Values include 'ref' and 'input_ref'
ref = hierarchical reference: ie A contains B
input_ref = input reference: ie: A was used to create B
tag - a tagged keyword can be added to a dependency to categorize
the different dependencies that exist in a snapshot
@return:
dictionary - the resulting snapshot
'''
return my.server.add_dependency(my.ticket, snapshot_code, file_path,
type, tag)
def add_dependency_by_code(my, to_snapshot_code, from_snapshot_code,
type='ref', tag='main'):
'''API Function: add_dependency_by_code(to_snapshot_code, from_snapshot_code, type='ref')
Append a dependency reference to an existing checkin. This dependency
is used to connect various checkins together creating a separate
dependency tree for each checkin.
@param:
to_snapshot_code: the snapshot code which the dependency will be
connected to
from_snapshot_code: the snapshot code which the dependency will be
connected from
type - type of dependency. Values include 'ref' and 'input_ref'
ref = hierarchical reference: ie A contains B
input_ref - input reference: ie: A was used to create B
tag - a tagged keyword can be added to a dependency to categorize
the different dependencies that exist in a snapshot
@return:
dictionary - the resulting snapshot
'''
return my.server.add_dependency_by_code(my.ticket, to_snapshot_code,
from_snapshot_code, type, tag)
def add_file(my, snapshot_code, file_path, file_type='main',
use_handoff_dir=False, mode=None, create_icon=False,
dir_naming='', file_naming='', checkin_type='strict'):
'''API Function: add_file(snapshot_code, file_path, file_type='main', use_handoff_dir=False, mode=None, create_icon=False)
Add a file to an already existing snapshot. This method is used in
piecewise checkins. A blank snapshot can be created using
create_snapshot(). This method can then be used to successively
add files to the snapshot.
In order to check in the file, the server will need to have access
to these files. There are a number of ways of getting the files
to the server. When using copy or move mode, the files are either
copied or moved to the "handoff_dir". This directory
is an agreed upon directory in which to handoff the files to the
server. This mode is generally used for checking in user files.
For heavy bandwidth checkins, it is recommended to user preallocated
checkins.
@param:
snapshot_code - the unique code identifier of a snapshot
file_path - path of the file to add to the snapshot.
Optional: this can also be an array to add multiple files at once.
This has much faster performance that adding one file at a time.
Also, note that in this case, file_types must be an array
of equal size.
@keyparam:
file_type - type of the file to be added.
Optional: this can also be an array. See file_path argument
for more information.
use_handoff_dir - DEPRECATED: (use mode arg) use handoff dir to checkin
file. The handoff dir is an agreed upon directory between the
client and server to transfer files.
mode - upload|copy|move|manual|inplace - determine the protocol which delievers
the file to the server.
create_icon - (True|False) determine whether to create an icon for
this appended file. Only 1 icon should be created for each
snapshot.
dir_naming - explicitly set a dir_naming expression to use
file_naming - explicitly set a file_naming expression to use
checkin_type - auto or strict which controls whether to auto create versionless and adopt some default dir/file naming
@return:
dictionary - the resulting snapshot
@example:
This will create a blank model snapshot for character chr001 and
add a file
[code]
search_type = 'prod/asset'
code = 'chr001'
search_key = server.build_search_type(search_type, code)
context = 'model'
path = "./my_model.ma"
snapshot = server.create_snapshot(search_key, context)
server.add_file( snapshot.get('code'), path )
[/code]
Different files should be separated by file type. For example,
to check in both a maya and houdin file in the same snapshot:
[code]
maya_path = "./my_model.ma"
houdini_path = "./my_model.hip"
server.add_file( snapshot_code, maya_path, file_type='maya' )
server.add_file( snapshot_code, houdini_path, file_type='houdini' )
[/code]
To transfer files by uploading (using http protocol):
[code]
server.add_file( snapshot_code, maya_path, mode='upload' )
[/code]
To create an icon for this file
[code]
path = 'image.jpg'
server.add_file( snapshot_code, path, mode='upload', create_icon=True )
[/code]
To add multiple files at once
[code]
file_paths = [maya_path, houdini_path]
file_types ['maya', 'houdini']
server.add_file( snapshot_code, file_paths, file_types=file_types, mode='upload')
[/code]
'''
if type(file_path) != types.ListType:
file_paths = [file_path]
else:
file_paths = file_path
if type(file_type) != types.ListType:
file_types = [file_type]
else:
file_types = file_type
for path in file_paths:
if os.path.isdir(path):
raise TacticApiException('[%s] is a directory. Use add_directory() instead' %path)
mode_options = ['upload', 'copy', 'move', 'preallocate','inplace']
if mode:
if mode in ['copy', 'move']:
handoff_dir = my.get_handoff_dir()
use_handoff_dir = True
# make sure that handoff dir is empty
try:
shutil.rmtree(handoff_dir)
os.makedirs(handoff_dir)
except OSError, e:
sys.stderr.write("WARNING: could not cleanup handoff directory [%s]: %s"
% (handoff_dir, e.__str__()))
for i, file_path in enumerate(file_paths):
file_type = file_types[i]
if mode not in mode_options:
raise TacticApiException('Mode must be in %s' % mode_options)
if mode == 'upload':
my.upload_file(file_path)
use_handoff_dir = False
elif mode in ['copy', 'move']:
# copy or move the tree
basename = os.path.basename(file_path)
if mode == 'move':
shutil.move(file_path, "%s/%s"
% (handoff_dir, basename))
elif mode == 'copy':
shutil.copy(file_path, "%s/%s"
% (handoff_dir, basename))
mode = 'create'
return my.server.add_file(my.ticket, snapshot_code, file_paths,
file_types, use_handoff_dir, mode,
create_icon, dir_naming, file_naming,
checkin_type)
def remove_file(my, snapshot_code, file_type):
return my.server.remove_file(my.ticket, snapshot_code, file_type)
def add_group(my, snapshot_code, file_path, file_type, file_range,
use_handoff_dir=False, mode=None):
'''API Function: add_group(snapshot_code, file_path, file_type, file_range, use_handoff_dir=False, mode=None)
Add a file range to an already existing snapshot
@param:
snapshot_code - the unique code identifier of a snapshot
file_path - path of the file to add to the snapshot
file_type - type of the file to be added.
file_range - range with format s-e/b
@keyparam:
use_handoff_dir - use handoff dir to checkin file
mode - one of 'copy','move','preallocate'
@return:
dictionary - the resulting snapshot
'''
mode_options = ['upload', 'copy', 'move', 'preallocate']
if mode:
if mode not in mode_options:
raise TacticApiException('Mode must be in %s' % mode_options)
#dir = os.path.dirname(file_path)
handoff_dir = my.get_handoff_dir()
if mode == 'move':
expanded_paths = my._expand_paths(file_path, file_range)
for path in expanded_paths:
basename = os.path.basename(path)
shutil.move(path, '%s/%s' %(handoff_dir, basename))
use_handoff_dir = True
mode = 'create'
elif mode == 'copy':
expanded_paths = my._expand_paths(file_path, file_range)
for path in expanded_paths:
basename = os.path.basename(path)
shutil.copy(path, '%s/%s' %(handoff_dir, basename))
use_handoff_dir = True
mode = 'create'
elif mode == 'upload':
my.upload_group(file_path, file_range)
use_handoff_dir = False
elif mode == 'preallocate':
use_handoff_dir = True
return my.server.add_group(my.ticket, snapshot_code, file_path,
file_type, file_range, use_handoff_dir, mode)
def add_directory(my, snapshot_code, dir, file_type='main', mode="copy",
dir_naming='', file_naming=''):
'''API Function: add_directory(snapshot_code, dir, file_type='main', mode="copy", dir_naming='', file_naming='')
Add a full directory to an already existing checkin.
This informs TACTIC to treat the entire directory as single entity
without regard to the structure of the contents. TACTIC will not
know about the individual files and the directory hierarchy within
the base directory and it it left up to the and external program
to intepret and understand this.
This is often used when logic on the exact file structure exists in
some external source outside of TACTIC and it is deemed to complictaed
to map this into TACTIC's snapshot definition.
@param:
snapshot_code - a unique identifier key representing an sobject
dir - the directory that needs to be checked in
@keyparam:
file_type - file type is used more as snapshot type here
mode - copy, move, preallocate, manual, inplace
dir_naming - explicitly set a dir_naming expression to use
file_naming - explicitly set a file_naming expression to use
@return:
dictionary - snapshot
@example:
This will create a new snapshot for a search_key and add a directory using manual mode
[code]
dir = 'C:/images'
handoff_dir = my.server.get_handoff_dir()
shutil.copytree('%s/subfolder' %dir, '%s/images/subfolder' %handoff_dir)
snapshot_dict = my.server.create_snapshot(search_key, context='render')
snapshot_code = snapshot_dict.get('code')
my.server.add_directory(snapshot_code, dir, file_type='dir', mode='manual')
[/code]
'''
if mode not in ['copy', 'move', 'preallocate', 'manual', 'inplace']:
raise TacticApiException('Mode must be one of [move, copy, preallocate]')
if mode in ['copy', 'move']:
handoff_dir = my.get_handoff_dir()
# make sure that handoff dir is empty
try:
shutil.rmtree(handoff_dir)
os.makedirs(handoff_dir)
except OSError, e:
sys.stderr.write("WARNING: could not cleanup handoff directory [%s]: %s"
% (handoff_dir, e.__str__()))
# copy or move the tree
basename = os.path.basename(dir)
if mode == 'move':
shutil.move(dir, "%s/%s" % (handoff_dir, basename))
elif mode == 'copy':
shutil.copytree(dir, "%s/%s" % (handoff_dir, basename))
mode = 'create'
use_handoff_dir = True
create_icon = False
return my.server.add_file(my.ticket, snapshot_code, dir, file_type,
use_handoff_dir, mode, create_icon,
dir_naming, file_naming )
def checkout(my, search_key, context="publish", version=-1,
file_type='main', to_dir=".", level_key=None,
to_sandbox_dir=False, mode='copy'):
'''API Function: checkout(search_key, context, version=-1, file_type='main', dir='', level_key=None, to_sandbox_dir=False, mode='copy')
Check out files defined in a snapshot from the repository. This
will copy files to a particular directory so that a user can work
on them.
@param:
search_key - a unique identifier key representing an sobject
context - context of the snapshot
@keyparam:
version - version of the snapshot
file_type - file type defaults to 'main'. If set to '*', all paths are checked out
level_key - the unique identifier of the level in the form of a search key
to_dir - destination directory defaults to '.'
to_sandbox_dir - (True|False) destination directory defaults to
sandbox_dir (overrides "to_dir" arg)
mode - (copy|download) - determines the protocol that will be used
to copy the files to the destination location
@return:
list - a list of paths that were checked out
'''
if not os.path.isdir(to_dir):
raise TacticApiException("[%s] does not exist or is not a directory" % to_dir)
to_dir = to_dir.replace("\\","/")
#repo_paths = my.server.checkout(my.ticket, search_key, context, version, file_type, level_key)
paths = my.server.checkout(my.ticket, search_key, context, version,
file_type, level_key)
client_lib_paths = paths['client_lib_paths']
sandbox_paths = paths['sandbox_paths']
web_paths = paths['web_paths']
to_paths = []
for i, client_lib_path in enumerate(client_lib_paths):
if to_sandbox_dir:
to_path = sandbox_paths[i]
filename = os.path.basename(to_path)
else:
filename = os.path.basename(client_lib_path)
to_path = "%s/%s" % (to_dir, filename)
to_paths.append(to_path)
# copy the file from the repo
to_dir = os.path.dirname(to_path)
if not os.path.exists(to_dir):
os.makedirs(to_dir)
if mode == 'copy':
if os.path.exists(client_lib_path):
if os.path.isdir(client_lib_path):
shutil.copytree(client_lib_path, to_path)
else:
shutil.copy(client_lib_path, to_path)
else:
raise TacticApiException("Path [%s] does not exist"
% client_lib_path)
elif mode == 'download':
web_path = web_paths[i]
my.download(web_path, to_dir=to_dir, filename=filename)
else:
raise TacticApiException("Checkout mode [%s] not supported"
% mode)
return to_paths
def lock_sobject(my, search_key, context):
'''Locks the context for checking in and out. Locking a context
prevents the ability to checkout or checkin to that context for a
particular sobject.
@params
search_key - the search key of the sobject
context - the context that will be blocked
@return
None
'''
return my.server.lock_sobject(my.ticket, search_key, context)
def unlock_sobject(my, search_key, context):
'''Unocks the context for checking in and out. Locking a context
prevents the ability to checkout or checkin to that context for a
particular sobject.
@params
search_key - the search key of the sobject
context - the context that will be unblocked
@return
None
'''
return my.server.unlock_sobject(my.ticket, search_key, context)
def query_snapshots(my, filters=None, columns=None, order_bys=[], show_retired=False, limit=None, offset=None, single=False, include_paths=False, include_full_xml=False, include_paths_dict=False, include_parent=False, include_files=False):
'''API Function: query_snapshots(filters=None, columns=None, order_bys=[], show_retired=False, limit=None, offset=None, single=False, include_paths=False, include_full_xml=False, include_paths_dict=False, include_parent=False, include_files=False)
thin wrapper around query, but is specific to querying snapshots
with some useful included flags that are specific to snapshots
@params:
ticket - authentication ticket
filters - (optional) an array of filters to alter the search
columns - (optional) an array of columns whose values should be
retrieved
order_bys - (optional) an array of order_by to alter the search
show_retired - (optional) - sets whether retired sobjects are also
returned
limit - sets the maximum number of results returned
single - returns a single sobject that is not wrapped up in an array
include_paths - flag to specify whether to include a __paths__ property
containing a list of all paths in the dependent snapshots
include_paths_dict - flag to specify whether to include a
__paths_dict__ property containing a dict of all paths in the
dependent snapshots
include_full_xml - flag to return the full xml definition of a snapshot
include_parent - includes all of the parent attributes in a __parent__ dictionary
include_files - includes all of the file objects referenced in the
snapshots
@return:
list of snapshots
'''
return my.server.query_snapshots(my.ticket, filters, columns, order_bys,
show_retired, limit, offset, single,
include_paths, include_full_xml,
include_paths_dict, include_parent,
include_files)
def get_snapshot(my, search_key, context="publish", version='-1',
revision=None, level_key=None, include_paths=False,
include_full_xml=False, include_paths_dict=False,
include_files=False, include_web_paths_dict=False,
versionless=False, process=None):
'''API Function: get_snapshot(search_key, context="publish", version='-1', level_key=None, include_paths=False, include_full_xml=False, include_paths_dict=False, include_files=False, include_web_paths_dict=False, versionless=False)
Method to retrieve an sobject's snapshot
Retrieve the latest snapshot
@param:
search_key - unique identifier of sobject whose snapshot we are
looking for
@keyparam:
process - the process of the snapshot
context - the context of the snapshot
version - snapshot version
revision - snapshot revision
level_key - the unique identifier of the level in the form of a search key
include_paths - flag to include a list of paths to the files in this
snapshot.
include_full_xml - whether to include full xml in the return
include_paths_dict - flag to specify whether to include a
__paths_dict__ property containing a dict of all paths in the
dependent snapshots
include_web_paths_dict - flag to specify whether to include a
__web_paths_dict__ property containing a dict of all web paths in
the returned snapshots
include_files - includes all of the file objects referenced in the
snapshots
versionless - boolean to return the versionless snapshot, which takes a version of -1 (latest) or 0 (current)
@return:
dictionary - the resulting snapshot
@example:
[code]
search_key = 'prod/asset?project=sample3d&code=chr001'
snapshot = server.get_snapshot(search_key, context='icon', include_files=True)
[/code]
[code]
# get the versionless snapshot
search_key = 'prod/asset?project=sample3d&code=chr001'
snapshot = server.get_snapshot(search_key, context='anim', include_paths_dict=True, versionless=True)
[/code]
'''
return my.server.get_snapshot(my.ticket, search_key, context, version,
revision, level_key, include_paths,
include_full_xml, include_paths_dict,
include_files, include_web_paths_dict,
versionless, process)
def get_full_snapshot_xml(my, snapshot_code):
'''API Function: get_full_snapshot_xml(snapshot_code)
Retrieve a full snapshot xml. This snapshot definition
contains all the information about a snapshot in xml
@param:
snapshot_code - unique code of snapshot
@return:
string - the resulting snapshot xml
'''
return my.server.get_full_snapshot_xml(my.ticket, snapshot_code)
def set_current_snapshot(my, snapshot_code):
'''API Function: set_current_snapshot(snapshot_code)
Set this snapshot as a "current" snapshot
@param:
snapshot_code - unique code of snapshot
@return:
string - the resulting snapshot xml
'''
return my.server.set_current_snapshot(my.ticket, snapshot_code)
def get_dependencies(my, snapshot_code, mode='explicit', tag='main',
include_paths=False, include_paths_dict=False,
include_files=False, repo_mode='client_repo',
show_retired=False):
'''API Function: get_dependencies(snapshot_code, mode='explicit', tag='main', include_paths=False, include_paths_dict=False, include_files=False, repo_mode='client_repo', show_retired=False):
Return the dependent snapshots of a certain tag
@params:
snapshot_code - unique code of a snapshot
@keyparams:
mode - explict (get version as defined in snapshot)
- latest
- current
tag - retrieve only dependencies that have this named tag
include_paths - flag to specify whether to include a __paths__ property
containing all of the paths in the dependent snapshots
include_paths_dict - flag to specify whether to include a
__paths_dict__ property containing a dict of all paths in the
dependent snapshots
include_files - includes all of the file objects referenced in the
snapshots
repo_mode - client_repo, web, lib, relative
show_retired - defaults to False so that it doesn't show retired dependencies
@return:
a list of snapshots
'''
return my.server.get_dependencies(my.ticket, snapshot_code, mode, tag,
include_paths, include_paths_dict,
include_files, repo_mode,
show_retired)
def get_all_dependencies(my, snapshot_code, mode='explicit', type='ref',
include_paths=False, include_paths_dict=False,
include_files=False, repo_mode='client_repo',
show_retired=False):
'''API Function: get_all_dependencies(snapshot_code, mode='explicit', type='ref', include_paths=False, include_paths_dict=False, include_files=False, repo_mode='client_repo', show_retired=False):
Retrieve the latest dependent snapshots of the given snapshot
@param:
snapshot_code - the unique code of the snapshot
@keyparam:
mode - explicit (get version as defined in snapshot)
- latest
- current
type - one of ref or input_ref
include_paths - flag to specify whether to include a __paths__ property
containing all of the paths in the dependent snapshots
include_paths_dict - flag to specify whether to include a
__paths_dict__ property containing a dict of all paths in the
dependent snapshots
include_files - includes all of the file objects referenced in the
snapshots
repo_mode - client_repo, web, lib, relative
show_retired - defaults to False so that it doesn't show retired dependencies
@return:
list - snapshots
'''
return my.server.get_all_dependencies(my.ticket, snapshot_code, mode,
type, include_paths,
include_paths_dict, include_files,
repo_mode, show_retired)
#
# Task methods
#
def create_task(my, search_key, process="publish", subcontext=None,
description=None, bid_start_date=None, bid_end_date=None,
bid_duration=None, assigned=None):
'''API Function: create_task(search_key, process="publish", subcontext=None, description=None, bid_start_date=None, bid_end_date=None, bid_duration=None, assigned=None)
Create a task for a particular sobject
@param:
search_key - the key identifying a type of sobject as registered in
the search_type table.
@keyparam:
process - process that this task belongs to
subcontext - the subcontext of the process (context = procsss/subcontext)
description - detailed description of the task
bid_start_date - the expected start date for this task
bid_end_date - the expected end date for this task
bid_duration - the expected duration for this task
assigned - the user assigned to this task
@return:
dictionary - task created
'''
return my.server.create_task(my.ticket, search_key, process, subcontext,
description, bid_start_date, bid_end_date,
bid_duration, assigned)
def add_initial_tasks(my, search_key, pipeline_code=None, processes=[],
skip_duplicate=True, offset=0):
'''API Function: add_initial_tasks(search_key, pipeline_code=None, processes=[], skip_duplicate=True, offset=0)
Add initial tasks to an sobject
@param:
search_key - the key identifying a type of sobject as registered in
the search_type table.
@keyparam:
pipeline_code - override the sobject's pipeline and use this one instead
processes - create tasks for the given list of processes
skip_duplicate - boolean to skip duplicated task
offset - a number to offset the start date from today's date
@return:
list - tasks created
'''
return my.server.add_initial_tasks(my.ticket, search_key, pipeline_code,
processes, skip_duplicate, offset)
def get_input_tasks(my, search_key):
'''API Function: get_input_tasks(search_key)
Get the input tasks of a task based on the pipeline
associated with the sobject parent of the task
@param:
search_key - the key identifying an sobject as registered in
the search_type table.
@return:
list of input tasks
'''
return my.server.get_input_tasks(my.ticket, search_key)
def get_output_tasks(my, search_key):
'''API Function: get_output_tasks(search_key)
Get the output tasks of a task based on the pipeline
associated with the sobject parent of the task
@param:
search_key - the key identifying an sobject as registered in
the search_type table.
@return:
list of output tasks
'''
return my.server.get_output_tasks(my.ticket, search_key)
#
# Note methods
#
def create_note(my, search_key, note, process="publish", subcontext=None,
user=None):
''''API Function: create_note(search_key, note, process="publish", subcontext=None, user=None)
Add a note for a particular sobject
@params:
search_key - the key identifying a type of sobject as registered in
the search_type table.
note - detailed description of the task
process - process that this task belongs to
subcontext - the subcontex of the process (context = procsss/subcontext
user - the user the note is attached to
@return
note that was created
'''
return my.server.create_note(my.ticket, search_key, process, subcontext,
note, user)
#
# Pipeline methods
#
def get_pipeline_xml(my, search_key):
'''API Function: get_pipeline_xml(search_key)
DEPRECATED: use get_pipeline_xml_info()
Retrieve the pipeline of a specific sobject. The pipeline
return is an xml document and an optional dictionary of information.
@param:
search_key - a unique identifier key representing an sobject
@return:
dictionary - xml and the optional hierarachy info
'''
return my.server.get_pipeline_xml(my.ticket, search_key)
def get_pipeline_processes(my, search_key, recurse=False):
'''API Function: get_pipeline_processes(search_key, recurse=False)
DEPRECATED: use get_pipeline_processes_info()
Retrieve the pipeline processes information of a specific sobject.
@param:
search_key - a unique identifier key representing an sobject
@keyparams:
recurse - boolean to control whether to display sub pipeline processes
@return:
list - process names of the pipeline
'''
return my.server.get_pipeline_processes(my.ticket, search_key, recurse)
def get_pipeline_xml_info(my, search_key, include_hierarchy=False):
'''API Function: get_pipeline_xml_info(search_key, include_hierarchy=False)
Retrieve the pipeline of a specific sobject. The pipeline
returned is an xml document and an optional dictionary of information.
@param:
search_key - a unique identifier key representing an sobject
@keyparam:
include_hierarchy - include a list of dictionary with key info on each process of the pipeline
@return:
dictionary - xml and the optional hierarachy info
'''
return my.server.get_pipeline_xml_info(my.ticket, search_key,
include_hierarchy)
def get_pipeline_processes_info(my, search_key, recurse=False,
related_process=None):
'''API Function: get_pipeline_processes_info(search_key, recurse=False, related_process=None)
Retrieve the pipeline processes information of a specific sobject. It provides information from the perspective of a particular process if related_process is specified.
@param:
search_key - a unique identifier key representing an sobject
@keyparams:
recurse - boolean to control whether to display sub pipeline processes
related_process - given a process, it shows the input and output processes and contexts
@return:
dictionary - process names of the pipeline or a dictionary if related_process is specified
'''
return my.server.get_pipeline_processes_info(my.ticket, search_key,
recurse, related_process)
def execute_pipeline(my, pipeline_xml, package):
'''API Function: execute_pipeline(pipeline_xml, package)
Spawn an execution of a pipeline as delivered from
'get_pipeline_xml()'. The pipeline is a xml document that describes
a set of processes and their handlers
@param:
pipeline_xml - an xml document describing a standard Tactic pipeline.
package - a dictionary of data delivered to the handlers
@return:
instance - a reference to the interpreter
'''
# execute the pipeline
from interpreter import PipelineInterpreter
interpreter = PipelineInterpreter(pipeline_xml)
interpreter.set_server(my)
interpreter.set_package(package)
interpreter.execute()
return interpreter
def commit_session(my, session_xml, pid):
'''Takes a session xml and commits it. Also handles transfer to old
style xml data. Generally, this is executed through the application
package: tactic_client_lib/application/common/introspect.py. However,
this can be done manually if the proper session xml is provided.
@params
ticket - authentication ticket
session_xml - an xml document representing the session. This document
format is described below
@return
session_content object
The session_xml takes the form:
<session>
<ref search_key="prod/shot?project=bar&code=joe" context="model" version="3" revision="2" tactic_node="tactic_joe"/>
</session>
'''
return my.server.commit_session(my.ticket, session_xml, pid)
#
# Directory methods
#
def get_paths(my, search_key, context="publish", version=-1, file_type='main', level_key=None, single=False, versionless=False):
'''API Function: get_paths( search_key, context="publish", version=-1, file_type='main', level_key=None, single=False, versionless=False)
Get paths from an sobject
@params:
search_key - a unique identifier key representing an sobject
@keyparams:
context - context of the snapshot
version - version of the snapshot
file_type - file type defined for the file node in the snapshot
level_key - the unique identifier of the level that this
was checked into
single - If set to True, the first of each path set is returned
versionless - boolean to return the versionless snapshot, which takes a version of -1 (latest) or 0 (current)
@return
A dictionary of lists representing various paths. The paths returned
are as follows:
- client_lib_paths: all the paths to the repository relative to the client
- lib_paths: all the paths to the repository relative to the server
- sandbox_paths: all of the paths mapped to the sandbox
- web: all of the paths relative to the http server
'''
return my.server.get_paths(my.ticket, search_key, context, version,
file_type, level_key, single, versionless)
def get_base_dirs(my):
''''API Function: get_base_dirs()
Get all of the base directories defined on the server
@return:
dictionary of all the important configured base directories
with their keys
'''
return my.server.get_base_dirs(my.ticket)
def get_plugin_dir(my, plugin):
'''API Function: get_plugin_dir(plugin)
Return the web path for the specfied plugin
@params:
plugin - plugin name
@return:
string - the web path for the specified plugin
'''
return my.server.get_plugin_dir(my.ticket, plugin)
def get_handoff_dir(my):
'''API Function: get_handoff_dir()
Return a temporary path that files can be copied to
@return:
string - the directory to copy a file to handoff to TACTIC
without having to go through http protocol
'''
if my.handoff_dir:
return my.handoff_dir
handoff_dir = my.server.get_handoff_dir(my.ticket)
if not os.path.exists(handoff_dir):
os.makedirs(handoff_dir)
my.handoff_dir = handoff_dir
return handoff_dir
def clear_upload_dir(my):
'''API Function: clear_upload_dir()
Clear the upload directory to ensure clean checkins
@param:
None
@keyparam:
None
@return:
None
'''
return my.server.clear_upload_dir(my.ticket)
def get_client_dir(my, snapshot_code, file_type='main', mode='client_repo'):
'''API Function: get_client_dir(snapshot_code, file_type='main', mode='client_repo')
Get a dir segment from a snapshot
@param:
snapshot_code - the unique code of the snapshot
@keyparam:
file_type - each file in a snapshot is identified by a file type.
This parameter specifies which type. Defaults to 'main'
mode - Forces the type of folder path returned to use the value from the
appropriate tactic_<SERVER_OS>-conf.xml configuration file.
Values include 'lib', 'web', 'local_repo', 'sandbox', 'client_repo', 'relative'
lib = the NFS asset directory from the server point of view
web = the http asset directory from the client point of view
local_repo = the local sync of the TACTIC repository
sandbox = the local sandbox (work area) designated by TACTIC
client_repo (default) = the asset directory from the client point of view
If there is no value for win32_client_repo_dir or linux_client_repo_dir
in the config, then the value for asset_base_dir will be used instead.
relative = the relative direcory without any base
@return:
string - directory segment for a snapshot and file type
@example:
If the tactic_<SERVER_OS>-conf.xml configuration file contains the following:
[code]
<win32_client_repo_dir>T:/assets</win32_client_repo_dir>
[/code]
and if the call to the method is as follows:
[code]
snapshot = server.create_snapshot(search_key, context)
code = snapshot.get('code')
server.get_path_from_snapshot(snapshot.get('code'))
[/code]
Then, on a Windows client, get_client_dir() will return:
[code]
T:/assets/sample3d/asset/chr/chr003/scenes
[/code]
'''
return my.server.get_client_dir(my.ticket, snapshot_code,
file_type, mode)
def get_path_from_snapshot(my, snapshot_code, file_type='main',
mode='client_repo'):
'''API Function: get_path_from_snapshot(snapshot_code, file_type='main', mode='client_repo')
Get a full path from a snapshot
@param:
snapshot_code - the unique code / search_key of the snapshot
@keyparam:
file_type - each file in a snapshot is identified by a file type.
This parameter specifies which type. Defaults to 'main'
mode - Forces the type of folder path returned to use the value from the
appropriate tactic_<SERVER_OS>-conf.xml configuration file.
Values include 'lib', 'web', 'local_repo', 'sandbox', 'client_repo', 'relative'
lib = the NFS asset directory from the server point of view
web = the http asset directory from the client point of view
local_repo = the local sync of the TACTIC repository
sandbox = the local sandbox (work area) designated by TACTIC
client_repo (default) = the asset directory from the client point of view
If there is no value for win32_client_repo_dir or linux_client_repo_dir
in the config, then the value for asset_base_dir will be used instead.
relative = the relative direcory without any base
@return:
string - the directory to copy a file to handoff to Tactic without having to
go through http protocol
@example:
If the tactic_<SERVER_OS>-conf.xml configuration file contains the following:
[code]
<win32_client_repo_dir>T:/assets</win32_client_repo_dir>
[/code]
and if the call to the method is as follows:
[code]
snapshot = server.create_snapshot(search_key, context)
code = snapshot.get('code')
server.get_path_from_snapshot(snapshot.get('code'))
# in a trigger
snapshot_key = my.get_input_value("search_key")
server.get_path_from_snapshot(snapshot_key)
[/code]
Then, on a Windows client, get_path_from_snapshot() will return:
[code]
T:/assets/sample3d/asset/chr/chr003/scenes/chr003_rig_v003.txt
[/code]
'''
return my.server.get_path_from_snapshot(my.ticket, snapshot_code, file_type, mode)
def get_expanded_paths_from_snapshot(my, snapshot_code, file_type='main'):
'''API Function: get_expanded_paths_from_snapshot(snapshot_code, file_type='main')
Return the expanded path of a snapshot (used for
ranges of files)
@param:
snapshot_code - the unique code of the snapshot
@keyparam:
file_type - each file in a snapshot is identified by a file type.
This parameter specifies which type. Defaults to 'main'
@return:
string - path
'''
return my.server.get_expanded_paths_from_snapshot(my.ticket,
snapshot_code, file_type)
def get_all_paths_from_snapshot(my, snapshot_code, mode='client_repo',
expand_paths=False, filename_mode='',file_types=[]):
'''API Function: get_all_paths_from_snapshot(snapshot_code, mode='client_repo', expand_paths=False, filename_mode='')
Get all paths from snapshot
@param:
snapshot_code - the unique code of the snapshot
@keyparam:
mode - forces the type of folder path returned to use the value from the
appropriate tactic_<SERVER_OS>-conf.xml configuration file.
Values include 'lib', 'web', 'local_repo', 'sandbox', 'client_repo', 'relative'
lib = the NFS asset directory from the server point of view
web = the http asset directory from the client point of view
local_repo = the local sync of the TACTIC repository
sandbox = the local sandbox (work area) designated by TACTIC
client_repo (default) = the asset directory from the client point of view
If there is no value for win32_client_repo_dir or linux_client_repo_dir
in the config, then the value for asset_base_dir will be used instead.
relative = the relative direcory without any base
expand_paths - expand the paths of a sequence check-in or for a directory check-in, it will list the contents of the directory as well
filename_mode - source or '', where source reveals the source_path of the check-in
file_types - list: only return files in snapshot with these types
@return:
list - paths
'''
return my.server.get_all_paths_from_snapshot(my.ticket, snapshot_code,
mode, expand_paths,
filename_mode, file_types)
def get_preallocated_path(my, snapshot_code, file_type='main', file_name='', mkdir=True, protocol='client_repo', ext='', checkin_type='strict'):
'''API Function: get_preallocated_path(snapshot_code, file_type='main', file_name='', mkdir=True, protocol='client_repo', ext='', checkin_type='strict')
Get the preallocated path for this snapshot. It assumes that
this checkin actually exists in the repository and will create virtual
entities to simulate a checkin. This method can be used to determine
where a checkin will go. However, the snapshot must exist
using create_snapshot() or some other method. For a pure virtual naming
simulator, use get_virtual_snapshot_path().
@param:
snapshot_code - the code of a preallocated snapshot. This can be
create by get_snapshot()
@keyparam:
file_type - the type of file that will be checked in. Some naming
conventions make use of this information to separate directories
for different file types
file_name - the desired file name of the preallocation. This information
may be ignored by the naming convention or it may use this as a
base for the final file name
mkdir - an option which determines whether the directory of the
preallocation should be created
protocol - It's either client_repo, sandbox, or None. It determines whether the
path is from a client or server perspective
ext - force the extension of the file name returned
checkin_type - strict, auto , or '' can be used.. A naming entry in the naming, if found, will be used to determine the checkin type
@return:
string - the path where add_file() expects the file to be checked into
@example:
it saves time if you get the path and copy it to the final destination first.
[code]
snapshot = my.server.create_snapshot(search_key, context)
snapshot_code = snapshot.get('code')
file_name = 'input_file_name.txt'
orig_path = 'C:/input_file_name.txt'
path = my.server.get_preallocated_path(snapshot_code, file_type, file_name)
# the path where it is supposed to go is generated
new_dir = os.path.dirname(path)
if not os.path.exists(new_dir):
os.makedirs(new_dir)
shutil.copy(orig_path, path)
my.server.add_file(snapshot_code, path, file_type, mode='preallocate')
[/code]
'''
return my.server.get_preallocated_path(my.ticket, snapshot_code, file_type, file_name, mkdir, protocol, ext, checkin_type)
def get_virtual_snapshot_path(my, search_key, context="publish", snapshot_type="file", level_key=None, file_type='main', file_name='', mkdirs=False, protocol='client_repo', ext='', checkin_type='strict'):
'''API Function: get_virtual_snapshot_path(search_key, context, snapshot_type="file", level_key=None, file_type='main', file_name='', mkdirs=False, protocol='client_repo', ext='', checkin_type='strict')
Create a virtual snapshot and returns a path that this snapshot
would generate through the naming conventions. This is most useful
testing naming conventions.
@param:
snapshot creation:
-----------------
search_key - a unique identifier key representing an sobject
context - the context of the checkin
@keyparam:
snapshot_type - [optional] descibes what kind of a snapshot this is.
More information about a snapshot type can be found in the
prod/snapshot_type sobject
description - [optional] optional description for this checkin
level_key - the unique identifier of the level that this
is to be checked into
@keyparam:
path creation:
--------------
file_type - the type of file that will be checked in. Some naming
conventions make use of this information to separate directories
for different file types
file_name - the desired file name of the preallocation. This information
may be ignored by the naming convention or it may use this as a
base for the final file name
mkdir - an option which determines whether the directory of the
preallocation should be created
protocol - It's either client_repo, sandbox, or None. It determines whether the
path is from a client or server perspective
ext - force the extension of the file name returned
checkin_type - strict, auto, '' can be used to preset the checkin_type
@return:
string - path as determined by the naming conventions
'''
return my.server.get_virtual_snapshot_path(my.ticket, search_key, context, snapshot_type, level_key, file_type, file_name, mkdirs, protocol, ext, checkin_type)
# NOTE: this is very specific to the Maya tools and can be considered
# deprecated
def get_md5_info(my, md5_list, new_paths, parent_code, texture_cls,
file_group_dict, project_code, mode):
'''API Function: get_md5_info(md5_list, texture_codes, new_paths, parent_code, texture_cls, file_group_dict, project_code)
Get md5 info for a given list of texture paths, mainly returning if this md5 is a match or not
@param:
md5_list - md5_list
new_paths - list of file_paths
parent_code - parent code
texture_cls - Texture or ShotTexture
file_group_dict - file group dictionary storing all the file groups
project_code - project_code
mode - texture matching mode (md5, file_name)
@return:
dictionary - a dictionary of path and a subdictionary of is_match, repo_file_code, repo_path, repo_file_range
'''
return my.server.get_md5_info(my.ticket, md5_list, new_paths,
parent_code, texture_cls, file_group_dict,
project_code, mode )
#
# UI methods
#
def get_widget(my, class_name, args={}, values={}):
'''API Function: get_widget(class_name, args={}, values={})
Get a defined widget
@params:
class_name - the fully qualified class name of the widget
@keyparams:
args - keyword arguments required to create a specific widget
values - form values that are passed in from the interface
@return:
string - html form of the widget
@example:
class_name = 'tactic.ui.panel.TableLayoutWdg'
args = {
'view': 'task_list',
'search_type': 'sthpw/task',
}
filter = [{"prefix":"main_body","main_body_enabled":"on","main_body_column":"project_code","main_body_relation":"is","main_body_value":"{$PROJECT}"}, {"prefix":"main_body","main_body_enabled":"on","main_body_column":"search_type","main_body_relation":"is not","main_body_value":"sthpw/project"}]
from simplejson import dumps
values = {'json': dumps(filter)}
widget_html = server.get_widget(class_name, args, values)
'''
return my.server.get_widget(my.ticket, class_name, args, values)
def class_exists(my, class_path):
'''determines if a class exists on the server
@params
class_path - fully qualified python class path
@return
boolean: true if class exists and can be seen
'''
return my.server.class_exists(my.ticket, class_path)
def execute_python_script(my, script_path, kwargs={}):
'''API Function: execute_python_script(script_path, kwargs)
Execute a python script defined in Script Editor
@param:
script_path - script path in Script Editor, e.g. test/eval_sobj
@keyparam:
kwargs - keyword arguments for this script
@return:
dictionary - returned data structure
'''
return my.server.execute_python_script(my.ticket, script_path, kwargs)
def execute_cmd(my, class_name, args={}, values={}):
'''API Function: execute_cmd(class_name, args={}, values={})
Execute a command
@param:
class_name - the fully qualified class name of the widget
@keyparam:
args - keyword arguments required to create a specific widget
values - form values that are passed in from the interface
@return:
string - description of command
'''
return my.server.execute_cmd(my.ticket, class_name, args, values)
def execute_transaction(my, transaction_xml, file_mode=None):
'''Run a tactic transaction a defined by the instructions in the
given transaction xml. The format of the xml is identical to
the format of how transactions are stored internally
@params
ticket - authentication ticket
transaction_xml - transction instructions
@return
None
@usage
transaction_xml = """<?xml version='1.0' encoding='UTF-8'?>
<transaction>
<sobject search_type="project/asset?project=gbs"
search_code="shot01" action="update">
<column name="description" from="" to="Big Money Shot"/>
</sobject>
</transaction>
"""
server.execute_transaction(transaction_xml)
'''
return my.server.execute_transaction(my.ticket, transaction_xml, file_mode)
#
# Widget Config methods
#
def set_config_definition(my, search_type, element_name, config_xml="",
login=None):
'''API Function: set_config_definition(search_type, element_name, config_xml="", login=None)
Set the widget configuration definition for an element
@param:
search_type - search type that this config relates to
element_name - name of the element
@keyparam:
config_xml - The configuration xml to be set
login - A user's login name, if specifically choosing one
@return:
True on success, exception message on failure
'''
return my.server.set_config_definition(my.ticket, search_type,
element_name, config_xml, login)
def get_config_definition(my, search_type, view, element_name,
personal=False):
'''API Function: get_config_definition(search_type, view, element_name, personal=False)
Get the widget configuration definition for an element
@param:
search_type - search type that this config relates to
view - view to look for the element
element_name - name of the element
@keyparam:
personal - True if it is a personal definition
@return:
string - xml of the configuration
'''
return my.server.get_config_definition(my.ticket, search_type, view, element_name, personal)
def update_config(my, search_type, view, element_names):
'''API Function: update_config(search_type, view, element_names)
Update the widget configuration like ordering for a view
@param:
search_type - search type that this config relates to
view - view to look for the element
element_names - element names in a list
@return:
string - updated config xml snippet
'''
return my.server.update_config(my.ticket, search_type, view,
element_names)
def add_config_element(my, search_type, view, name, class_name=None,
display_options={}, action_class_name=None,
action_options={}, element_attrs={},login=None,
unique=True, auto_unique_name=False,
auto_unique_view=False):
'''API Function: add_config_element(search_type, view, name, class_name=None, display_options={}, action_class_name=None, action_options={}, element_attrs={},login=None, unique=True, auto_unique_name=False, auto_unique_view=False)
This method adds an element into a config. It is used by various
UI components to add new widget element to a particular view.
@param:
search_type - the search type that this config belongs to
view - the specific view of the search type
name - the name of the element
@keyparam:
class_name - the fully qualified class of the display
action_class_name - the fully qualified class of the action
display_options - keyward options in a dictionary to construct the specific display
action_options - keyward options in a dictionary to construct the specific action
element_attrs - element attributes in a dictionary
login - login name if it is for a specific user
unique - add an unique element if True. update the element if False.
auto_unique_name - auto generate a unique element and display view name
auto_unique_view - auto generate a unique display view name
@return:
boolean - True
@example:
This will add a new element to the "character" view for a 3D asset
[code]
search_type = 'prod/asset'
view = 'characters'
class_name = 'tactic.ui.common.SimpleElementWdg'
server.add_config_element(search_type, view, class_name)
[/code]
This will add a new element named "user" to the "definition" view. It contains detailed display and action nodes
[code]
data_dict = {} # some data here
search_type = 'prod/asset'
server.add_config_element(search_type, 'definition', 'user', class_name = data_dict['class_name'], display_options=data_dict['display_options'], element_attrs=data_dict['element_attrs'], unique=True, action_class_name=data_dict['action_class_name'], action_options=data_dict['action_options'])
[/code]
'''
return my.server.add_config_element(my.ticket, search_type, view, name,
class_name, display_options,
action_class_name, action_options,
element_attrs, login, unique,
auto_unique_name, auto_unique_view)
def _setup(my, protocol="xmlrpc"):
# if this is being run in the tactic server, have the option
# to use TACTIC code directly
if protocol == 'local':
# import some server libraries
from pyasm.biz import Project
from pyasm.common import Environment
from pyasm.prod.service import ApiXMLRPC
from pyasm.web import WebContainer
# set the ticket
security = Environment.get_security()
if not security:
raise TacticApiException("Security not initialized. This may be because you are running the client API in 'local' mode without run initializing Batch")
# set the project
project_code = Project.get_project_code()
my.set_project(project_code)
# set the ticket
ticket = security.get_ticket_key()
my.set_ticket(ticket)
# set the protocol to local for the api class
# note ticket has to be set first
my.server = ApiXMLRPC()
my.server.set_protocol(protocol)
# if server name has already been set, use that one
if my.server_name:
my.has_server = True
return
web = WebContainer.get_web()
if web:
my.server_name = web.get_http_host()
if my.server_name:
my.has_server = True
else:
# else guess that it is localhost
my.server_name = "localhost"
my.has_server = True
return
elif protocol =='xmlrpc':
# get the env variables
env_user = os.environ.get('TACTIC_USER')
env_password = os.environ.get('TACTIC_PASSWORD')
env_server = os.environ.get('TACTIC_SERVER')
env_ticket = os.environ.get('TACTIC_TICKET')
env_project = os.environ.get('TACTIC_PROJECT')
# if all three are set, then it is not necessary to look at
# the resource file
if not (env_server and (env_user or env_ticket) and env_project):
# need to scope by user
# this is dealt with in get_resource_path already
#if not my.login:
# my.login = getpass.getuser()
file_path = my.get_resource_path()
if not os.path.exists(file_path):
msg = "[%s] does not exist yet. There is not enough information to authenticate the server. Either set the appropriate environment variables or run get_ticket.py" %file_path
raise TacticApiException(msg)
# try to open the resource file
file = open(file_path)
lines = file.readlines()
file.close()
rc_server = None
rc_ticket = None
rc_project = None
rc_login = None
for line in lines:
line = line.strip()
if line.startswith("#"):
continue
name, value = line.split("=")
if name == "server":
#my.set_server(value)
rc_server = value
elif name == "ticket":
#my.set_ticket(value)
rc_ticket = value
elif name == "project":
#my.set_project(value)
rc_project = value
elif name == "login":
#my.set_project(value)
rc_login = value
# these have to be issued in the correct order
if rc_server:
my.set_server(rc_server)
if rc_project:
my.set_project(rc_project)
if rc_ticket:
# get the project
project = my.get_project()
# set a default if one does not exist
if not project:
my.set_project("admin")
my.set_ticket(rc_ticket)
if rc_login:
my.login = rc_login
# override with any environment variables that are set
if env_server:
my.set_server(env_server)
if env_project:
my.set_project(env_project)
if env_user:
# try to get a ticket with a set password
ticket = my.get_ticket(env_user, env_password)
my.set_ticket(ticket)
if env_ticket:
my.set_ticket(env_ticket)
#my.server.set_protocol(protocol)
#
# Doc methods
#
def get_doc_link(my, alias):
return my.server.get_doc_link(my.ticket, alias);
#
# API/Server Version functions
#
def get_release_version(my):
# DEPRECATED
print "WARNING: Deprecated function 'get_release_version'"
return my.server.get_release_version(my.ticket)
def get_server_version(my):
'''API Function: get_server_version()
@return:
string - server version'''
return my.server.get_server_version(my.ticket)
def get_server_api_version(my):
'''API Function: get_server_api_version()
@return:
string - server API version'''
version = my.server.get_server_api_version(my.ticket)
return version
def get_client_version(my):
'''API Function: get_client_version()
@return:
string - Version of TACTIC that this client came from'''
# may use pkg_resources in 2.6
if '.zip' in __file__:
import zipfile
parts = __file__.split('.zip')
zip_name = '%s.zip'%parts[0]
if zipfile.is_zipfile(zip_name):
z = zipfile.ZipFile(zip_name)
version = z.read('pyasm/application/common/interpreter/tactic_client_lib/VERSION')
version = version.strip()
z.close()
else:
dir = os.path.dirname(__file__)
f = open('%s/VERSION' % dir, 'r')
version = f.readline().strip()
f.close()
return version
def get_client_api_version(my):
'''API Function: get_client_api_version()
@return:
string - client api version'''
# may use pkg_resources in 2.6
if '.zip' in __file__:
import zipfile
parts = __file__.split('.zip')
zip_name = '%s.zip'%parts[0]
if zipfile.is_zipfile(zip_name):
z = zipfile.ZipFile(zip_name)
version = z.read('pyasm/application/common/interpreter/tactic_client_lib/VERSION_API')
version = version.strip()
z.close()
else:
dir = os.path.dirname(__file__)
f = open('%s/VERSION_API' % dir, 'r')
version = f.readline().strip()
f.close()
return version
server = None
def get(cls, protocol='', setup=True):
'''get function which treats the server stub as a singleton'''
try:
from pyasm.common import Container
server = Container.get("TacticServerStub")
if not server:
from pyasm.common import Environment
app_server = Environment.get_app_server()
if protocol:
server = TacticServerStub(protocol=protocol, setup=setup)
elif app_server in ["batch", "xmlrpc"]:
server = TacticServerStub(protocol='local', setup=setup)
else:
server = TacticServerStub(setup=setup)
Container.put("TacticServerStub", server)
return server
except ImportError, e:
if not cls.server:
cls.server = TacticServerStub(protocol='xmlrpc', setup=setup)
return cls.server
get = classmethod(get)
def set(cls, server=None):
try:
from pyasm.common import Container
Container.put("TacticServerStub", server)
except ImportError:
cls.server = server
set = classmethod(set)
#
# Objects
#
class Command(object):
def get_description(my):
return "No description"
def execute_cmd(my):
my.server = TacticServerStub()
my.server.start(my.get_description())
try:
my.execute()
except Exception, e:
my.server.abort()
raise
else:
my.server.finish()
def execute(my):
my.execute()
class Search(object):
pass
class SObject(dict):
def get_search_key(my):
return my['__search_key__']
| epl-1.0 |
JaronArmiger/tenshi | angel-player/src/chrome/content/angelic/test.py | 11 | 4815 | #!/usr/bin/env python
# Licensed to Pioneers in Engineering under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Pioneers in Engineering licenses
# this file to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
from __future__ import print_function
import os
import subprocess as subp
import shutil
import argparse
import re
import traceback
import sys
def is_subpath_of_set(path, pathset):
part_path = ''
for part in path.split(os.path.sep):
part_path = os.path.join(part_path, part)
if part_path in pathset:
return True
return False
def setup():
os.chdir('tests')
try:
os.mkdir('tmp')
except IOError as e:
# One reason we would get here is that the tmp directory was not
# cleaned up. Delete the directory and try again.
shutil.rmtree('tmp')
os.mkdir('tmp')
os.chdir('tmp')
def cleanup():
os.chdir(os.pardir)
shutil.rmtree('tmp')
os.chdir(os.pardir)
def get_test(path):
return os.path.join(os.pardir, os.pardir, path)
def get_test_base(root, path):
return os.path.join(root, os.path.dirname(path))
EXT_TO_CMD = {'py': ['python'], 'js': ['node', '--harmony'], 'sh': ['bash']}
def get_tests():
'''
Get all files under the tests directory, skipping directories that have
tests files with the same name.
'''
tests = set()
dirs_to_skip = set(['tests/tmp'])
for dirpath, dirnames, filenames in os.walk('tests', topdown=True):
if not is_subpath_of_set(dirpath, dirs_to_skip):
for filename in filenames:
if filename[0] == '.':
continue
fullname = os.path.join(dirpath, filename)
tests.add(fullname)
base, ext = os.path.splitext(fullname)
dirs_to_skip.add(base)
return tests
def run_test(name, root, failed_tests, stdout_logs):
_, ext = os.path.splitext(name)
cmd = EXT_TO_CMD[ext[1:]]
args = cmd + [get_test(name),
root,
get_test_base(root, name)]
p = subp.Popen(args,
stdout=subp.PIPE,
stderr=subp.STDOUT)
stdout_logs[name] = p.communicate()
ret = p.returncode
if ret == 0:
print('.', end='')
else:
failed_tests.append(name)
print('x', end='')
# Flush out the . or x we just printed, instead of waiting for a
# newline to flush them.
sys.stdout.flush()
def main():
parser = argparse.ArgumentParser(description='Run angelic tests.')
parser.add_argument('--no-cleanup', action='store_true')
parser.add_argument('--verbose', action='store_true')
parser.add_argument('--matching', action='store', default='')
args = parser.parse_args()
pattern = None
if args.matching:
pattern = re.compile(args.matching)
tests = get_tests()
failed_tests = []
stdout_logs = dict()
root = os.getcwd()
tests_run = 0
setup()
try:
for test in sorted(tests):
if not pattern or pattern.search(test):
tests_run += 1
try:
run_test(test, root, failed_tests, stdout_logs)
except KeyboardInterrupt as e:
print('Encountered exception while running test{}:'
.format(test))
traceback.print_exc()
if args.verbose:
print(stdout_logs[test][0].decode())
except Exception as e:
print('Encountered exception while running tests:')
traceback.print_exc()
finally:
if not args.no_cleanup:
cleanup()
if not failed_tests:
print()
print('OK (Ran {0} test)'.format(tests_run))
else:
print()
for failure in failed_tests:
print('FAILED:', failure)
print(' BEGIN TEST OUTPUT '.center(80, '*'))
print(stdout_logs[failure][0].decode(), end='')
print(' END TEST OUTPUT '.center(80, '*'))
print()
print('TEST FAILED ({0}/{1} tests failed)'
.format(len(failed_tests), tests_run))
if __name__ == '__main__':
main()
| apache-2.0 |
binhqnguyen/lena-local | src/visualizer/visualizer/higcontainer.py | 189 | 3560 | import gtk
import gobject
try:
from gazpacho.widgets.base.base import SimpleContainerAdaptor
except ImportError:
pass
#root_library = 'hig'
class HIGContainer(gtk.Bin):
__gtype_name__ = 'HIGContainer'
__gproperties__ = {
'title': (str, 'Group Title', 'the group title',
'', gobject.PARAM_READWRITE|gobject.PARAM_CONSTRUCT),
}
def __init__(self, title=None):
self.__title_text = None
gtk.widget_push_composite_child()
self.__title = gobject.new(gtk.Label, visible=True, xalign=0, yalign=0.5)
self.__indent = gobject.new(gtk.Label, visible=True, label=' ')
gtk.widget_pop_composite_child()
gtk.Bin.__init__(self)
self.__title.set_parent(self)
self.__indent.set_parent(self)
if title is not None:
self.props.title = title
def do_size_request(self, requisition):
title_req = gtk.gdk.Rectangle(0, 0, *self.__title.size_request())
indent_req = gtk.gdk.Rectangle(0, 0, *self.__indent.size_request())
if self.child is None:
child_req = gtk.gdk.Rectangle()
else:
child_req = gtk.gdk.Rectangle(0, 0, *self.child.size_request())
requisition.height = (title_req.height + 6 +
max(child_req.height, indent_req.height))
requisition.width = max(title_req.width, indent_req.width + child_req.width)
def do_size_allocate(self, allocation):
self.allocation = allocation
## title
title_req = gtk.gdk.Rectangle(0, 0, *self.__title.get_child_requisition())
title_alloc = gtk.gdk.Rectangle()
title_alloc.x = allocation.x
title_alloc.y = allocation.y
title_alloc.width = min(title_req.width, allocation.width)
title_alloc.height = min(title_req.height, allocation.height)
self.__title.size_allocate(title_alloc)
## child
if self.child is None:
return
indent_req = gtk.gdk.Rectangle(0, 0, *self.__indent.get_child_requisition())
child_req = gtk.gdk.Rectangle(0, 0, *self.child.get_child_requisition())
child_alloc = gtk.gdk.Rectangle()
child_alloc.x = allocation.x + indent_req.width
child_alloc.y = allocation.y + title_alloc.height + 6
child_alloc.width = allocation.width - indent_req.width
child_alloc.height = allocation.height - 6 - title_alloc.height
self.child.size_allocate(child_alloc)
def do_forall(self, internal, callback, data):
if internal:
callback(self.__title, data)
callback(self.__indent, data)
if self.child is not None:
callback(self.child, data)
def do_set_property(self, pspec, value):
if pspec.name == 'title':
self.__title.set_markup('<span weight="bold">%s</span>' %
gobject.markup_escape_text(value))
self.__title_text = value
else:
raise AttributeError, 'unknown property %s' % pspec.name
def do_get_property(self, pspec):
if pspec.name == 'title':
return self.__title_text
else:
raise AttributeError, 'unknown property %s' % pspec.name
if __name__ == '__main__':
frame = gtk.Frame()
group = gobject.new(HIGContainer, title="Hello")
frame.add(group)
check = gtk.CheckButton("foobar")
group.add(check)
w = gtk.Window()
w.add(frame)
w.show_all()
w.connect("destroy", lambda w: gtk.main_quit())
gtk.main()
| gpl-2.0 |
dednal/chromium.src | net/data/ssl/scripts/crlsetutil.py | 75 | 5856 | #!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This utility takes a JSON input that describes a CRLSet and produces a
CRLSet from it.
The input is taken on stdin and is a dict with the following keys:
- BlockedBySPKI: An array of strings, where each string is a filename
containing a PEM certificate, from which an SPKI will be extracted.
- BlockedByHash: A dict of string to an array of ints, where the string is
a filename containing a PEM format certificate, and the ints are the
serial numbers. The listed serial numbers will be blocked when issued by
the given certificate.
For example:
{
"BlockedBySPKI": ["/tmp/blocked-certificate"],
"BlockedByHash": {
"/tmp/intermediate-certificate": [1, 2, 3]
}
}
"""
import hashlib
import json
import optparse
import struct
import sys
def _pem_cert_to_binary(pem_filename):
"""Decodes the first PEM-encoded certificate in a given file into binary
Args:
pem_filename: A filename that contains a PEM-encoded certificate. It may
contain additional data (keys, textual representation) which will be
ignored
Returns:
A byte array containing the decoded certificate data
"""
base64 = ""
started = False
with open(pem_filename, 'r') as pem_file:
for line in pem_file:
if not started:
if line.startswith('-----BEGIN CERTIFICATE'):
started = True
else:
if line.startswith('-----END CERTIFICATE'):
break
base64 += line[:-1].strip()
return base64.decode('base64')
def _parse_asn1_element(der_bytes):
"""Parses a DER-encoded tag/Length/Value into its component parts
Args:
der_bytes: A DER-encoded ASN.1 data type
Returns:
A tuple of the ASN.1 tag value, the length of the ASN.1 header that was
read, the sequence of bytes for the value, and then any data from der_bytes
that was not part of the tag/Length/Value.
"""
tag = ord(der_bytes[0])
length = ord(der_bytes[1])
header_length = 2
if length & 0x80:
num_length_bytes = length & 0x7f
length = 0
for i in xrange(2, 2 + num_length_bytes):
length <<= 8
length += ord(der_bytes[i])
header_length = 2 + num_length_bytes
contents = der_bytes[:header_length + length]
rest = der_bytes[header_length + length:]
return (tag, header_length, contents, rest)
class ASN1Iterator(object):
"""Iterator that parses and iterates through a ASN.1 DER structure"""
def __init__(self, contents):
self._tag = 0
self._header_length = 0
self._rest = None
self._contents = contents
self.step_into()
def step_into(self):
"""Begins processing the inner contents of the next ASN.1 element"""
(self._tag, self._header_length, self._contents, self._rest) = (
_parse_asn1_element(self._contents[self._header_length:]))
def step_over(self):
"""Skips/ignores the next ASN.1 element"""
(self._tag, self._header_length, self._contents, self._rest) = (
_parse_asn1_element(self._rest))
def tag(self):
"""Returns the ASN.1 tag of the current element"""
return self._tag
def contents(self):
"""Returns the raw data of the current element"""
return self._contents
def _der_cert_to_spki(der_bytes):
"""Returns the subjectPublicKeyInfo of a DER-encoded certificate
Args:
der_bytes: A DER-encoded certificate (RFC 5280)
Returns:
A byte array containing the subjectPublicKeyInfo
"""
iterator = ASN1Iterator(der_bytes)
iterator.step_into() # enter certificate structure
iterator.step_into() # enter TBSCertificate
iterator.step_over() # over version
iterator.step_over() # over serial
iterator.step_over() # over signature algorithm
iterator.step_over() # over issuer name
iterator.step_over() # over validity
iterator.step_over() # over subject name
return iterator.contents()
def pem_cert_file_to_spki_hash(pem_filename):
"""Gets the SHA-256 hash of the subjectPublicKeyInfo of a cert in a file
Args:
pem_filename: A file containing a PEM-encoded certificate.
Returns:
The SHA-256 hash of the first certificate in the file, as a byte sequence
"""
return hashlib.sha256(
_der_cert_to_spki(_pem_cert_to_binary(pem_filename))).digest()
def main():
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option('-o', '--output',
help='Specifies the output file. The default is stdout.')
options, _ = parser.parse_args()
outfile = sys.stdout
if options.output and options.output != '-':
outfile = open(options.output, 'wb')
config = json.load(sys.stdin)
blocked_spkis = [
pem_cert_file_to_spki_hash(pem_file).encode('base64').strip()
for pem_file in config.get('BlockedBySPKI', [])]
parents = {
pem_cert_file_to_spki_hash(pem_file): serials
for pem_file, serials in config.get('BlockedByHash', {}).iteritems()
}
header_json = {
'Version': 0,
'ContentType': 'CRLSet',
'Sequence': 0,
'DeltaFrom': 0,
'NumParents': len(parents),
'BlockedSPKIs': blocked_spkis,
}
header = json.dumps(header_json)
outfile.write(struct.pack('<H', len(header)))
outfile.write(header)
for spki, serials in sorted(parents.iteritems()):
outfile.write(spki)
outfile.write(struct.pack('<I', len(serials)))
for serial in serials:
raw_serial = []
if not serial:
raw_serial = ['\x00']
else:
while serial:
raw_serial.insert(0, chr(serial & 0xff))
serial >>= 8
outfile.write(struct.pack('<B', len(raw_serial)))
outfile.write(''.join(raw_serial))
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
apagac/cfme_tests | cfme/scripting/appliance.py | 2 | 6850 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Script to encrypt config files.
Usage:
scripts/encrypt_conf.py confname1 confname2 ... confnameN
scripts/encrypt_conf.py credentials
"""
from functools import partial
import click
from cached_property import cached_property
from cfme.utils.conf import cfme_data
from cfme.utils.conf import env
def get_appliance(appliance_ip):
"""Checks an appliance is not None and if so, loads the appropriate things"""
from cfme.utils.appliance import IPAppliance, load_appliances_from_config, stack
if not appliance_ip:
app = load_appliances_from_config(env)[0]
else:
app = IPAppliance(hostname=appliance_ip)
stack.push(app) # ensure safety from bad code, phase out later
return app
@click.group(help='Helper commands for appliances')
def main():
"""Main appliance group"""
pass
@main.command('upgrade', help='Upgrades an appliance to latest Z-stream')
@click.argument('appliance-ip', default=None, required=False)
@click.option('--cfme-only', is_flag=True, help='Upgrade cfme packages only')
@click.option('--update-to', default='5.9.z', help='Supported versions 5.9.z,'
' 5.10.z (.z means latest and default is 5.9.z)') # leaving 59z support for upgrades
def upgrade_appliance(appliance_ip, cfme_only, update_to):
"""Upgrades an appliance"""
supported_version_repo_map = {
'5.9.z': 'update_url_59', '5.10.z': 'update_url_510',
}
assert update_to in supported_version_repo_map, "{} is not a supported version".format(
update_to
)
update_url = supported_version_repo_map[update_to]
if appliance_ip:
print('Connecting to {}'.format(appliance_ip))
else:
print('Fetching appliance from env.local.yaml')
app = get_appliance(appliance_ip)
assert app.version > '5.7', "{} is not supported, must be 5.7 or higher".format(app.version)
print('Extending appliance partitions')
app.db.extend_partition()
urls = cfme_data['basic_info'][update_url]
print('Adding update repo to appliance')
app.ssh_client.run_command(
"curl {} -o /etc/yum.repos.d/update.repo".format(urls)
)
cfme = '-y'
if cfme_only:
cfme = 'cfme -y'
print('Stopping EVM')
app.evmserverd.stop()
print('Running yum update')
result = app.ssh_client.run_command('yum update {}'.format(cfme), timeout=3600)
assert result.success, "update failed {}".format(result.output)
print('Running database migration')
app.db.migrate()
app.db.automate_reset()
print('Restarting postgres service')
app.db_service.restart()
print('Starting EVM')
app.evmserverd.start()
print('Waiting for webui')
app.wait_for_web_ui()
print('Appliance upgrade completed')
@main.command('migrate', help='Restores/migrates database from file or downloaded')
@click.argument('appliance-ip', default=None, required=True)
@click.option('--db-url', default=None, help='Download a backup file')
@click.option('--keys-url', default=None, help='URL for matching db v2key and GUID if available')
@click.option('--backup', default=None, help='Location of local backup file, including file name')
def backup_migrate(appliance_ip, db_url, keys_url, backup):
"""Restores and migrates database backup on an appliance"""
print('Connecting to {}'.format(appliance_ip))
app = get_appliance(appliance_ip)
if db_url:
print('Downloading database backup')
result = app.ssh_client.run_command(
'curl -o "/evm_db.backup" "{}"'.format(db_url), timeout=30)
assert result.success, "Failed to download database: {}".format(result.output)
backup = '/evm_db.backup'
else:
backup = backup
print('Stopping EVM')
app.evmserverd.stop()
print('Dropping/Creating database')
app.db.drop()
app.db.create()
print('Restoring database from backup')
result = app.ssh_client.run_command(
'pg_restore -v --dbname=vmdb_production {}'.format(backup), timeout=600)
assert result.success, "Failed to restore new database: {}".format(result.output)
print('Running database migration')
app.db.migrate()
app.db.automate_reset()
if keys_url:
result = app.ssh_client.run_command(
'curl -o "/var/www/miq/vmdb/certs/v2_key" "{}v2_key"'.format(keys_url), timeout=15)
assert result.success, "Failed to download v2_key: {}".format(result.output)
result = app.ssh_client.run_command(
'curl -o "/var/www/miq/vmdb/GUID" "{}GUID"'.format(keys_url), timeout=15)
assert result.success, "Failed to download GUID: {}".format(result.output)
else:
app.db.fix_auth_key()
app.db.fix_auth_dbyml()
print('Restarting postgres service')
app.db_service.restart()
print('Starting EVM')
app.evmserverd.start()
print('Waiting for webui')
app.wait_for_web_ui()
print('Appliance upgrade completed')
@main.command('reboot', help='Reboots the appliance')
@click.argument('appliance_ip', default=None, required=False)
@click.option('--wait-for-ui', is_flag=True, default=True)
def reboot_appliance(appliance_ip, wait_for_ui):
"""Reboots an appliance"""
app = get_appliance(appliance_ip)
app.reboot(wait_for_ui)
@main.command('setup-webmks', help='Setups VMware WebMKS on an appliance by downloading'
'and extracting SDK to required location')
@click.argument('appliance_ip', default=None, required=False)
def config_webmks(appliance_ip):
appliance = get_appliance(appliance_ip)
server_settings = appliance.server.settings
server_settings.update_vmware_console({'console_type': 'VMware WebMKS'})
roles = server_settings.server_roles_db
if 'websocket' in roles and not roles['websocket']:
server_settings.enable_server_roles('websocket')
# Useful Properties
methods_to_install = [
'is_db_enabled',
'managed_provider_names',
'miqqe_version',
'os_version',
'swap',
'miqqe_patch_applied']
def fn(method, *args, **kwargs):
"""Helper to access the right properties"""
from cfme.utils.appliance import IPAppliance
appliance_ip = kwargs.get('appliance_ip', None)
app = get_appliance(appliance_ip)
descriptor = getattr(IPAppliance, method)
if isinstance(descriptor, (cached_property, property)):
out = getattr(app, method)
else:
out = getattr(app, method)(*args, **kwargs)
if out is not None:
print(out)
for method in methods_to_install:
command = click.Command(
method.replace('_', '-'),
short_help='Returns the {} property'.format(method),
callback=partial(fn, method), params=[
click.Argument(['appliance_ip'], default=None, required=False)])
main.add_command(command)
if __name__ == "__main__":
main()
| gpl-2.0 |
glemaitre/UnbalancedDataset | imblearn/ensemble/tests/test_classifier.py | 2 | 17981 | """Test the module ensemble classifiers."""
# Authors: Guillaume Lemaitre <[email protected]>
# Christos Aridas
# License: MIT
import numpy as np
from sklearn.datasets import load_iris, make_hastie_10_2
from sklearn.model_selection import (GridSearchCV, ParameterGrid,
train_test_split)
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.feature_selection import SelectKBest
from sklearn.utils.testing import (assert_array_equal,
assert_array_almost_equal,
assert_raises,
assert_warns,
assert_warns_message)
from imblearn.datasets import make_imbalance
from imblearn.ensemble import BalancedBaggingClassifier
from imblearn.pipeline import make_pipeline
from imblearn.under_sampling import RandomUnderSampler
iris = load_iris()
def test_balanced_bagging_classifier():
# Check classification for various parameter settings.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BalancedBaggingClassifier(
base_estimator=base_estimator,
random_state=0,
**params).fit(X_train, y_train).predict(X_test)
def test_bootstrap_samples():
# Test that bootstrapping samples generate non-perfect base estimators.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
base_estimator = DecisionTreeClassifier().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
# disable the resampling by passing an empty dictionary.
ensemble = BalancedBaggingClassifier(
base_estimator=DecisionTreeClassifier(),
max_samples=1.0,
bootstrap=False,
n_estimators=10,
ratio={},
random_state=0).fit(X_train, y_train)
assert (ensemble.score(X_train, y_train) ==
base_estimator.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BalancedBaggingClassifier(
base_estimator=DecisionTreeClassifier(),
max_samples=1.0,
bootstrap=True,
random_state=0).fit(X_train, y_train)
assert (ensemble.score(X_train, y_train) <
base_estimator.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstrapping features may generate duplicate features.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
ensemble = BalancedBaggingClassifier(
base_estimator=DecisionTreeClassifier(),
max_features=1.0,
bootstrap_features=False,
random_state=0).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert np.unique(features).shape[0] == X.shape[1]
ensemble = BalancedBaggingClassifier(
base_estimator=DecisionTreeClassifier(),
max_features=1.0,
bootstrap_features=True,
random_state=0).fit(X_train, y_train)
unique_features = [np.unique(features).shape[0]
for features in ensemble.estimators_features_]
assert np.median(unique_features) < X.shape[1]
def test_probability():
# Predict probabilities.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BalancedBaggingClassifier(
base_estimator=DecisionTreeClassifier(),
random_state=0).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BalancedBaggingClassifier(
base_estimator=LogisticRegression(),
random_state=0,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BalancedBaggingClassifier(
base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=0).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert abs(test_score - clf.oob_score_) < 0.1
# Test with few estimators
assert_warns(UserWarning,
BalancedBaggingClassifier(
base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=0).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
clf1 = BalancedBaggingClassifier(
base_estimator=KNeighborsClassifier(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=0).fit(X_train, y_train)
clf2 = make_pipeline(RandomUnderSampler(
random_state=clf1.estimators_[0].steps[0][1].random_state),
KNeighborsClassifier()).fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50})
base = DecisionTreeClassifier()
# Test n_estimators
assert_raises(ValueError,
BalancedBaggingClassifier(base, n_estimators=1.5).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, n_estimators=-1).fit, X, y)
# Test max_samples
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_samples="foobar").fit,
X, y)
# Test max_features
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BalancedBaggingClassifier(base, max_features="foobar").fit,
X, y)
# Test support of decision_function
assert not (hasattr(BalancedBaggingClassifier(base).fit(X, y),
'decision_function'))
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target.copy()
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BalancedBaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0)
ensemble = BalancedBaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.base_estimator_.steps[-1][1],
DecisionTreeClassifier)
ensemble = BalancedBaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.base_estimator_.steps[-1][1],
DecisionTreeClassifier)
ensemble = BalancedBaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert isinstance(ensemble.base_estimator_.steps[-1][1],
Perceptron)
def test_bagging_with_pipeline():
X, y = make_imbalance(iris.data, iris.target, ratio={0: 20, 1: 25, 2: 50},
random_state=0)
estimator = BalancedBaggingClassifier(
make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(X, y).predict(X)
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BalancedBaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert len(clf_ws) == n_estimators
clf_no_ws = BalancedBaggingClassifier(n_estimators=10,
random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert (set([pipe.steps[-1][1].random_state for pipe in clf_ws]) ==
set([pipe.steps[-1][1].random_state for pipe in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True,
random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators"
" does not", clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BalancedBaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BalancedBaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BalancedBaggingClassifier(n_estimators=5, warm_start=True,
oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BalancedBaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
def test_oob_score_consistency():
# Make sure OOB scores are identical when random_state, estimator, and
# training data are fixed and fitting is done twice
X, y = make_hastie_10_2(n_samples=200, random_state=1)
bagging = BalancedBaggingClassifier(KNeighborsClassifier(),
max_samples=0.5,
max_features=0.5, oob_score=True,
random_state=1)
assert bagging.fit(X, y).oob_score_ == bagging.fit(X, y).oob_score_
# FIXME: uncomment when #9723 is merged in scikit-learn
# def test_estimators_samples():
# # Check that format of estimators_samples_ is correct and that results
# # generated at fit time can be identically reproduced at a later time
# # using data saved in object attributes.
# X, y = make_hastie_10_2(n_samples=200, random_state=1)
# # remap the y outside of the BalancedBaggingclassifier
# # _, y = np.unique(y, return_inverse=True)
# bagging = BalancedBaggingClassifier(LogisticRegression(),
# max_samples=0.5,
# max_features=0.5, random_state=1,
# bootstrap=False)
# bagging.fit(X, y)
# # Get relevant attributes
# estimators_samples = bagging.estimators_samples_
# estimators_features = bagging.estimators_features_
# estimators = bagging.estimators_
# # Test for correct formatting
# assert len(estimators_samples) == len(estimators)
# assert len(estimators_samples[0]) == len(X)
# assert estimators_samples[0].dtype.kind == 'b'
# # Re-fit single estimator to test for consistent sampling
# estimator_index = 0
# estimator_samples = estimators_samples[estimator_index]
# estimator_features = estimators_features[estimator_index]
# estimator = estimators[estimator_index]
# X_train = (X[estimator_samples])[:, estimator_features]
# y_train = y[estimator_samples]
# orig_coefs = estimator.steps[-1][1].coef_
# estimator.fit(X_train, y_train)
# new_coefs = estimator.steps[-1][1].coef_
# assert_array_almost_equal(orig_coefs, new_coefs)
def test_max_samples_consistency():
# Make sure validated max_samples and original max_samples are identical
# when valid integer max_samples supplied by user
max_samples = 100
X, y = make_hastie_10_2(n_samples=2*max_samples, random_state=1)
bagging = BalancedBaggingClassifier(KNeighborsClassifier(),
max_samples=max_samples,
max_features=0.5, random_state=1)
bagging.fit(X, y)
assert bagging._max_samples == max_samples
| mit |
iddqd1/django-cms | cms/models/titlemodels.py | 50 | 5426 | # -*- coding: utf-8 -*-
from datetime import timedelta
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from cms.constants import PUBLISHER_STATE_DIRTY
from cms.models.managers import TitleManager
from cms.models.pagemodel import Page
from cms.utils.helpers import reversion_register
@python_2_unicode_compatible
class Title(models.Model):
language = models.CharField(_("language"), max_length=15, db_index=True)
title = models.CharField(_("title"), max_length=255)
page_title = models.CharField(_("title"), max_length=255, blank=True, null=True,
help_text=_("overwrite the title (html title tag)"))
menu_title = models.CharField(_("title"), max_length=255, blank=True, null=True,
help_text=_("overwrite the title in the menu"))
meta_description = models.TextField(_("description"), max_length=155, blank=True, null=True,
help_text=_("The text displayed in search engines."))
slug = models.SlugField(_("slug"), max_length=255, db_index=True, unique=False)
path = models.CharField(_("Path"), max_length=255, db_index=True)
has_url_overwrite = models.BooleanField(_("has url overwrite"), default=False, db_index=True, editable=False)
redirect = models.CharField(_("redirect"), max_length=2048, blank=True, null=True)
page = models.ForeignKey(Page, verbose_name=_("page"), related_name="title_set")
creation_date = models.DateTimeField(_("creation date"), editable=False, default=timezone.now)
# Publisher fields
published = models.BooleanField(_("is published"), blank=True, default=False)
publisher_is_draft = models.BooleanField(default=True, editable=False, db_index=True)
# This is misnamed - the one-to-one relation is populated on both ends
publisher_public = models.OneToOneField('self', related_name='publisher_draft', null=True, editable=False)
publisher_state = models.SmallIntegerField(default=0, editable=False, db_index=True)
objects = TitleManager()
class Meta:
unique_together = (('language', 'page'),)
app_label = 'cms'
def __str__(self):
return u"%s (%s, %s)" % (self.title, self.slug, self.language)
def update_path(self):
# Build path from parent page's path and slug
slug = u'%s' % self.slug
if not self.has_url_overwrite:
self.path = u'%s' % slug
if self.page.parent_id:
parent_page = self.page.parent_id
parent_title = Title.objects.get_title(parent_page, language=self.language, language_fallback=True)
if parent_title:
self.path = u'%s/%s' % (parent_title.path, slug)
@property
def overwrite_url(self):
"""Return overwritten url, or None
"""
if self.has_url_overwrite:
return self.path
return None
def is_dirty(self):
return self.publisher_state == PUBLISHER_STATE_DIRTY
def save_base(self, *args, **kwargs):
"""Overridden save_base. If an instance is draft, and was changed, mark
it as dirty.
Dirty flag is used for changed nodes identification when publish method
takes place. After current changes are published, state is set back to
PUBLISHER_STATE_DEFAULT (in publish method).
"""
keep_state = getattr(self, '_publisher_keep_state', None)
# Published pages should always have a publication date
# if the page is published we set the publish date if not set yet.
if self.page.publication_date is None and self.published:
self.page.publication_date = timezone.now() - timedelta(seconds=5)
if self.publisher_is_draft and not keep_state and self.is_new_dirty():
self.publisher_state = PUBLISHER_STATE_DIRTY
if keep_state:
delattr(self, '_publisher_keep_state')
ret = super(Title, self).save_base(*args, **kwargs)
return ret
def is_new_dirty(self):
if self.pk:
fields = [
'title', 'page_title', 'menu_title', 'meta_description', 'slug', 'has_url_overwrite', 'redirect'
]
try:
old_title = Title.objects.get(pk=self.pk)
except Title.DoesNotExist:
return True
for field in fields:
old_val = getattr(old_title, field)
new_val = getattr(self, field)
if not old_val == new_val:
return True
return False
return True
class EmptyTitle(object):
def __init__(self, language):
self.language = language
"""Empty title object, can be returned from Page.get_title_obj() if required
title object doesn't exists.
"""
title = ""
slug = ""
path = ""
meta_description = ""
redirect = ""
has_url_overwrite = False
application_urls = ""
menu_title = ""
page_title = ""
published = False
@property
def overwrite_url(self):
return None
def _reversion():
exclude_fields = ['publisher_is_draft', 'publisher_public', 'publisher_state']
reversion_register(
Title,
exclude_fields=exclude_fields
)
_reversion() | bsd-3-clause |
savfod/ptset | ptset.py | 1 | 14571 | #!/usr/bin/python3
DESCRIPTION = '''
Program for ptset drawing.
Some examples of usage:
python ptset.py -h
python ptset.py --curve 9 --draw_points A,0.199,True B,0.412,True
python ptset.py --curve 8 --draw_points A,-0.36,True X1,-0.26 B,0,True X2,0.26 C,0.36,True
python ptset.py --curve 7 --tangent_curve
python ptset.py --curve 4 --points_count 50
'''
from vec import Vec
from line import Line
from curve import Curve
from drawer import Drawer
import argparse
import math
import random
import sys
RADIUS = 2
RAD_ADD = 3
COUNT = None # getting from params
START_X = -1
FINISH_X = 1
FILL1 = "green"
FILL2 = "blue"
FILL3 = "red"
drawer = None # useful for debugging
DRAW_POINTS = [] # getting from params
POINTS_MULTIPLIER = 1000
def init_tk_drawer():
global drawer
drawer = Drawer()
return drawer.tk, drawer
def vertical_line(x):
return Line(Vec(x, 0), Vec(0, 1))
class Interface:
def __init__(self, drawer, function, tangent_function=None):
self.drawer = drawer
self.function = function
self.tangent_function = tangent_function
self.prev_points = {"tang_point":None, "tang_pair_point":None}
self.current_index = 0
self.to_remove = []
points = []
for i in range(COUNT):
x = START_X + (FINISH_X-START_X)*i/float(COUNT-1)
y = function(x)
points.append(Vec(x,y))
self.curve = Curve(points, closed=False)
self.drawer.draw_curve(self.curve, fill=FILL1)
semiplane = []
HOR_COUNT = COUNT * 2
def i_to_x(i):
return START_X + (FINISH_X-START_X)*i/float(HOR_COUNT-1)
def j_to_x(j):
return START_X + (FINISH_X-START_X)*j/float(VER_COUNT-1)
VER_COUNT = COUNT * 2
for j in range(VER_COUNT):
semiplane.append([])
px = j_to_x(j)
px_line = vertical_line(px)
for i in range(HOR_COUNT):
tx = i_to_x(i)
ty = function(tx)
T = Vec(tx, ty)
dx = 0.001
der = (function(tx + dx) - function(tx - dx))/(2 * dx)
tangent = Line(T, Vec(1, der))
t_value = px_line.intersect(tangent).y
t_value_2 = ty + der * (px - tx)
# print(t_value, t_value_2)
semiplane_value = self.function(px) < t_value
semiplane[-1].append(semiplane_value)
# self.drawer.draw_circle(Vec(px,tx), r=1, img_index=1)
#draw edges
def draw_edge(i1, i2, j1, j2):
def to_vec(i, j):
return Vec(i_to_x(i), j_to_x(j))
self.drawer.draw_line(to_vec(i1, j1), to_vec(i2, j2), fill=FILL2, img_index=2, width=2)
self.drawer.draw_line(to_vec(i1, j1), to_vec(i2, j2), fill=FILL2, img_index=3, width=2)
for i in range(VER_COUNT - 1):
for j in range(HOR_COUNT - 1):
four_value = (
semiplane[i][j],
semiplane[i+1][j],
semiplane[i][j+1],
semiplane[i+1][j+1]
)
#horizontal_edges
if four_value == (True, True, False, False):
draw_edge(i, i+1, j, j)
elif four_value == (False, False, True, True):
draw_edge(i, i+1, j+1, j+1)
#vertical_edges
elif four_value == (True, False, True, False):
draw_edge(i, i, j, j+1)
elif four_value == (False, True, False, True):
draw_edge(i+1, i+1, j, j+1)
#diagonal_edge
else:
d1 = four_value[0], four_value[3]
d2 = four_value[1], four_value[2]
if d1 == (True, True) and False in d2:
draw_edge(i, i+1, j, j+1)
elif d2 == (True, True) and False in d1:
draw_edge(i, i+1, j+1, j)
DIAG_COUNT = COUNT // 5
def diag_x(i):
return START_X + (FINISH_X-START_X)*i/float(DIAG_COUNT-1)
for i in range(DIAG_COUNT):
x1 = diag_x(i)
x2 = diag_x(i+1)
self.drawer.draw_line(Vec(x1, x1), Vec(x2, x2), width=3, fill=FILL1, img_index=2)
self.drawer.draw_line(Vec(x1, x1), Vec(x2, x2), width=3, fill=FILL1, img_index=3)
self.points = []
self.is_drawing = True
self.selected_point = None
self.tangent_points = self.calc_tangent_points(function)
# for x in [-0.65, -0.45, -0.25, -0.05]:
# x -= 0.02
# self.drawer.draw_line(Vec(x, 0.2), Vec(x, 0.4), img_index=2, fill=FILL3, width=1)
def calc_tangent_points(self, function):
DIFF = 0.1
max_skip = (FINISH_X - START_X)*3 / float(POINTS_MULTIPLIER*COUNT)
average_skip = (FINISH_X - START_X) / float(POINTS_MULTIPLIER*COUNT)
min_skip = (FINISH_X - START_X) / float(5*POINTS_MULTIPLIER*COUNT)
points = [START_X]
while points[-1] < FINISH_X:
x = points[-1]
der2 = (function(x - DIFF) + function(x + DIFF) - 2*function(x)) / DIFF**2
skip = 100 * average_skip / (abs(der2)**2 + 0.00001)
# if min_skip < skip < max_skip:
# print ("Success") #DEBUG. TO CALC GOOD COEFFICIENT
# else:
# if min_skip < skip:
# print("Small")
# else:
# print("Big")
skip = min(skip, max_skip)
skip = max(min_skip, skip)
points.append(x + skip)
return points
def draw_point(self, x, label, with_tangent=False):
l = vertical_line(x)
points = self.curve.find_intersections(l)
p = points[0]
tangents = self.curve.find_intersections_tangents(l)
t = tangents[0]
self.drawer.draw_circle(p, fill=FILL2, label=label)
if with_tangent:
self.drawer.draw_line(t.start - t.vec*(10/abs(t.vec)), t.start + t.vec*(10/abs(t.vec)), dash=[8,4])
self.drawer.draw_line(Vec(p.x, p.x) - Vec(10,0), Vec(p.x, p.x) + Vec(10,0) , img_index=2, dash=[8,4])
self.drawer.draw_circle(Vec(p.x, p.x), fill=FILL2, img_index=2, label=label)
def draw_pic(self):
self.is_drawing = True
self.drawer.tk.after(10, self.draw_pic_iteration)
def parse_str(s):
try:
parts = s.strip().split(",")
if len(parts) == 2:
parts = parts + [""] # bool("") == False
return parts[0].strip(), float(parts[1]), bool(parts[2])
except:
raise ValueError('Not expected point params. Expected string in format x_coordinate,label[,draw_tangent]. E.g. "A,0" or "B,-0.5,True")')
if DRAW_POINTS:
for s in DRAW_POINTS:
label, x, with_tangent = parse_str(s)
self.draw_point(x, label, with_tangent)
def image2(self, vec):
return Vec(vec.x + 2, vec.y)
def draw_pic_iteration(self):
self.drawer.remove_tmp()
if self.current_index + 1 < len(self.tangent_points):
self.current_index += 1
else:
self.current_index = 0
for k in self.prev_points.keys():
self.prev_points[k] = None
i = self.current_index
skip = self.tangent_points[i+1] - self.tangent_points[i] if i+1 < len(self.tangent_points) else self.tangent_points[i] - self.tangent_points[i-1]
x = self.tangent_points[i] + random.random()*skip
# print("iteration, x=", x)
l = vertical_line(x)
self.drawer.draw_line(Vec(START_X,x), Vec(FINISH_X,x), tmp_object=True, img_index=2)
tangents = self.curve.find_intersections_tangents(l)
points = self.curve.find_intersections(l)
if len(tangents) == 1:
self.drawer.draw_line(tangents[0].start - tangents[0].vec*(10/abs(tangents[0].vec)), tangents[0].start + tangents[0].vec*(10/abs(tangents[0].vec)), tmp_object=True)
self.drawer.draw_circle(points[0], r=RAD_ADD+RADIUS, fill=FILL1, tmp_object=True)
points = self.curve.find_intersections(tangents[0])
for (ind,p) in enumerate(points):
self.drawer.draw_circle(p, r=RAD_ADD+ind+RADIUS, fill=FILL2, tmp_object=True)
# self.drawer.draw_circle(Vec(p.x, x), img_index=2)
# self.drawer.draw_circle(Vec(p.x, x), img_index=3)
self.drawer.draw_circle(Vec(p.x, x), r=(RAD_ADD+ind)+RADIUS, fill=FILL2, img_index=2, tmp_object=True)
self.drawer.draw_circle(Vec(p.x, x), r=(RAD_ADD+ind)+RADIUS, fill=FILL2, img_index=3, tmp_object=True)
if self.tangent_function:
l2 = vertical_line(self.tangent_function(x))
tang_p = tangents[0].intersect(l2)
self.drawer.draw_circle(Vec(tang_p.x, x), r=2*RADIUS, fill=FILL3, img_index=2, tmp_object=True)
self.drawer.draw_circle(Vec(tang_p.x, x), r=2*RADIUS, fill=FILL3, img_index=3, tmp_object=True)
#self.drawer.draw_circle(Vec(tang_p.x, x), r=RADIUS//2, fill=FILL3, img_index=2)
if self.prev_points["tang_pair_point"]:
self.drawer.draw_line(self.prev_points["tang_pair_point"], Vec(tang_p.x, x), fill=FILL3, img_index=2)
self.prev_points["tang_pair_point"] = Vec(tang_p.x, x)
self.drawer.draw_circle(tang_p, r=2*RADIUS, fill=FILL3, tmp_object=True)
# self.drawer.draw_circle(tang_p, r=RADIUS//2, fill=FILL3)
if self.prev_points["tang_point"]:
self.drawer.draw_line(self.prev_points["tang_point"], tang_p, fill=FILL3)
self.prev_points["tang_point"] = Vec(tang_p.x, tang_p.y)
else:
#print(x, len(tangents), len(points))
pass
self.drawer.draw_circle(Vec(x,x), r=RAD_ADD+RADIUS, fill=FILL1, img_index=2, tmp_object=True)
self.drawer.draw_circle(Vec(x,x), r=RAD_ADD+RADIUS, fill=FILL1, img_index=3, tmp_object=True)
# self.drawer.draw_circle(Vec(x,x), fill=FILL1, img_index=2)
# self.drawer.draw_circle(Vec(x,x), fill=FILL1, img_index=3)
if self.is_drawing:
self.drawer.tk.after(10, self.draw_pic_iteration)
# for v in self.drawer.canvases.values():
# v.update_idletasks()
def start_drawing(self, event):
self.is_drawing = True
self.draw_pic()
# self.add_point(event.x, event.y)
def stop_drawing(self, event):
self.is_drawing = False
def remove_tmp(self):
self.is_drawing = False
self.drawer.remove_tmp()
def zoom(self, event):
print("Hello windows/macos! Not-tested scaling.")
self.drawer.scale(1.1 ** event.delta, event.x, event.y)
def zoom_in(self, event):
self.drawer.scale(1.1, event.x, event.y)
def zoom_out(self, event):
self.drawer.scale(1.1 ** (-1), event.x, event.y)
def parse_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=DESCRIPTION,
)
# parser.add_argument('--rounds', type=int, default=2, help='how many rounds each pair plays')
parser.add_argument('--curve', type=int, default="9", help='curve funciton index')
parser.add_argument('--points_multiplier', type=int, default="2", help='how many points to use')
parser.add_argument('--tangent_curve', action="store_true", help='draw tangent curve')
parser.add_argument('--points_count', type=int, default=180, help='how many points to use (more points is slower)')
# parser.add_argument('--cyclic', action="store_true", default="False", help='draw tangent curve')
# parser.add_argument('--draw_points', action="store_true", default=False, help='draw selected points')
parser.add_argument('--draw_points', nargs="+", help='draw selected points. format: x_coordinate,label[,draw_tangent]')
parsed_args = parser.parse_args()
global POINTS_MULTIPLIER
POINTS_MULTIPLIER = parsed_args.points_multiplier
global DRAW_POINTS
DRAW_POINTS = parsed_args.draw_points
global COUNT
COUNT = parsed_args.points_count
return parsed_args
def func(x, ind=9):
# several types of prepared functions
x *= 2
if ind == 1:
return (x**6 - 5*x**4 + 6*x**2 - 1)/2
elif ind == 2:
return (x**6 - 5*x**4 + 6*x**2 - 1)/2/(1 + (2*x)**8)
elif ind == 3:
return (128*x**8 - 256*x**6 + 160*x**4 - 32*x**2 + 1)
elif ind == 4:
return (128*x**8 - 256*x**6 + 160*x**4 - 32*x**2 + 1)/(1 + 128*x**12)
elif ind == 5:
return (x**6 - 5*x**4 + 6*x**2 - 1)/2
elif ind == 6:
x = 1.3*x
return (15*x**5 - 29*x**3 + 7*x)/(3 + 30*x**10) + 0.01
elif ind == 7:
return (x**3 - x) / (10*x**4 + 1)
elif ind == 8:
return (x) / (10*x**6 + 1) + 0.01
elif ind == 9:
# special curve with isolated closed curves in ptset
x *= 10
x += 2
x1 = x + 8
x2 = x - 8
x3 = x2 + 3.5
res = 1/(0.01*x1**6 + 0.03*x1**2 + 0.8) \
- 1/(0.01*x2**6 - 0.01*(x3)**2 + 0.8) \
- 0.04
return res / 2
elif ind == 10:
x = 2*x
return (x)/(0.1*x**6 + 0.8) - x/(10*x**2 + 1) + 0.01
else:
raise ValueError("no function with such index")
def main():
args = parse_args()
tk, drawer = init_tk_drawer()
def function(x):
return func(x, args.curve)
tang_func = (lambda x: x+2/(100*x**2 + 4)) if args.tangent_curve else None
interface = Interface(drawer, function, tang_func)
# interface.is_drawing = args.cyclic
tk.bind("<Button-1>", interface.start_drawing)
# tk.bind("<ButtonRelease-1>", interface.stop_drawing)
# tk.bind("<Motion>", interface.draw)
tk.bind("<ButtonRelease-2>", interface.stop_drawing)
tk.bind("<ButtonRelease-3>", interface.stop_drawing)
tk.bind("<MouseWheel>", interface.zoom)
tk.bind("<Button-4>", interface.zoom_in)
tk.bind("<Button-5>", interface.zoom_out)
# tk.focus_set() #comment this line for image without (with pale) edge
tk.bind("<Escape>", lambda x: interface.remove_tmp())
tk.after(100, lambda: interface.start_drawing(None))
tk.mainloop()
if __name__ == "__main__":
main()
| mit |
nwchandler/ansible | lib/ansible/cli/__init__.py | 15 | 33623 | # (c) 2012-2014, Michael DeHaan <[email protected]>
# (c) 2016, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import getpass
import operator
import optparse
import os
import subprocess
import re
import sys
import time
import yaml
from abc import ABCMeta, abstractmethod
import ansible
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.inventory.manager import InventoryManager
from ansible.module_utils.six import with_metaclass, string_types
from ansible.module_utils._text import to_bytes, to_text
from ansible.parsing.dataloader import DataLoader
from ansible.release import __version__
from ansible.utils.path import unfrackpath
from ansible.utils.vars import load_extra_vars, load_options_vars
from ansible.vars.manager import VariableManager
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class SortedOptParser(optparse.OptionParser):
'''Optparser which sorts the options by opt before outputting --help'''
def format_help(self, formatter=None, epilog=None):
self.option_list.sort(key=operator.methodcaller('get_opt_string'))
return optparse.OptionParser.format_help(self, formatter=None)
# Note: Inherit from SortedOptParser so that we get our format_help method
class InvalidOptsParser(SortedOptParser):
'''Ignore invalid options.
Meant for the special case where we need to take care of help and version
but may not know the full range of options yet. (See it in use in set_action)
'''
def __init__(self, parser):
# Since this is special purposed to just handle help and version, we
# take a pre-existing option parser here and set our options from
# that. This allows us to give accurate help based on the given
# option parser.
SortedOptParser.__init__(self, usage=parser.usage,
option_list=parser.option_list,
option_class=parser.option_class,
conflict_handler=parser.conflict_handler,
description=parser.description,
formatter=parser.formatter,
add_help_option=False,
prog=parser.prog,
epilog=parser.epilog)
self.version = parser.version
def _process_long_opt(self, rargs, values):
try:
optparse.OptionParser._process_long_opt(self, rargs, values)
except optparse.BadOptionError:
pass
def _process_short_opts(self, rargs, values):
try:
optparse.OptionParser._process_short_opts(self, rargs, values)
except optparse.BadOptionError:
pass
class CLI(with_metaclass(ABCMeta, object)):
''' code behind bin/ansible* programs '''
VALID_ACTIONS = []
_ITALIC = re.compile(r"I\(([^)]+)\)")
_BOLD = re.compile(r"B\(([^)]+)\)")
_MODULE = re.compile(r"M\(([^)]+)\)")
_URL = re.compile(r"U\(([^)]+)\)")
_CONST = re.compile(r"C\(([^)]+)\)")
PAGER = 'less'
# -F (quit-if-one-screen) -R (allow raw ansi control chars)
# -S (chop long lines) -X (disable termcap init and de-init)
LESS_OPTS = 'FRSX'
def __init__(self, args, callback=None):
"""
Base init method for all command line programs
"""
self.args = args
self.options = None
self.parser = None
self.action = None
self.callback = callback
def set_action(self):
"""
Get the action the user wants to execute from the sys argv list.
"""
for i in range(0, len(self.args)):
arg = self.args[i]
if arg in self.VALID_ACTIONS:
self.action = arg
del self.args[i]
break
if not self.action:
# if we're asked for help or version, we don't need an action.
# have to use a special purpose Option Parser to figure that out as
# the standard OptionParser throws an error for unknown options and
# without knowing action, we only know of a subset of the options
# that could be legal for this command
tmp_parser = InvalidOptsParser(self.parser)
tmp_options, tmp_args = tmp_parser.parse_args(self.args)
if not(hasattr(tmp_options, 'help') and tmp_options.help) or (hasattr(tmp_options, 'version') and tmp_options.version):
raise AnsibleOptionsError("Missing required action")
def execute(self):
"""
Actually runs a child defined method using the execute_<action> pattern
"""
fn = getattr(self, "execute_%s" % self.action)
fn()
@abstractmethod
def run(self):
"""Run the ansible command
Subclasses must implement this method. It does the actual work of
running an Ansible command.
"""
display.vv(self.parser.get_version())
if C.CONFIG_FILE:
display.v(u"Using %s as config file" % to_text(C.CONFIG_FILE))
else:
display.v(u"No config file found; using defaults")
@staticmethod
def ask_vault_passwords():
''' prompt for vault password and/or password change '''
vault_pass = None
try:
vault_pass = getpass.getpass(prompt="Vault password: ")
except EOFError:
pass
# enforce no newline chars at the end of passwords
if vault_pass:
vault_pass = to_bytes(vault_pass, errors='surrogate_or_strict', nonstring='simplerepr').strip()
return vault_pass
@staticmethod
def ask_new_vault_passwords():
new_vault_pass = None
try:
new_vault_pass = getpass.getpass(prompt="New Vault password: ")
new_vault_pass2 = getpass.getpass(prompt="Confirm New Vault password: ")
if new_vault_pass != new_vault_pass2:
raise AnsibleError("Passwords do not match")
except EOFError:
pass
if new_vault_pass:
new_vault_pass = to_bytes(new_vault_pass, errors='surrogate_or_strict', nonstring='simplerepr').strip()
return new_vault_pass
def ask_passwords(self):
''' prompt for connection and become passwords if needed '''
op = self.options
sshpass = None
becomepass = None
become_prompt = ''
try:
if op.ask_pass:
sshpass = getpass.getpass(prompt="SSH password: ")
become_prompt = "%s password[defaults to SSH password]: " % op.become_method.upper()
if sshpass:
sshpass = to_bytes(sshpass, errors='strict', nonstring='simplerepr')
else:
become_prompt = "%s password: " % op.become_method.upper()
if op.become_ask_pass:
becomepass = getpass.getpass(prompt=become_prompt)
if op.ask_pass and becomepass == '':
becomepass = sshpass
if becomepass:
becomepass = to_bytes(becomepass)
except EOFError:
pass
return (sshpass, becomepass)
def normalize_become_options(self):
''' this keeps backwards compatibility with sudo/su self.options '''
self.options.become_ask_pass = self.options.become_ask_pass or self.options.ask_sudo_pass or self.options.ask_su_pass or C.DEFAULT_BECOME_ASK_PASS
self.options.become_user = self.options.become_user or self.options.sudo_user or self.options.su_user or C.DEFAULT_BECOME_USER
def _dep(which):
display.deprecated('The %s command line option has been deprecated in favor of the "become" command line arguments' % which, '2.6')
if self.options.become:
pass
elif self.options.sudo:
self.options.become = True
self.options.become_method = 'sudo'
_dep('sudo')
elif self.options.su:
self.options.become = True
self.options.become_method = 'su'
_dep('su')
# other deprecations:
if self.options.ask_sudo_pass or self.options.sudo_user:
_dep('sudo')
if self.options.ask_su_pass or self.options.su_user:
_dep('su')
def validate_conflicts(self, vault_opts=False, runas_opts=False, fork_opts=False):
''' check for conflicting options '''
op = self.options
if vault_opts:
# Check for vault related conflicts
if (op.ask_vault_pass and op.vault_password_file):
self.parser.error("--ask-vault-pass and --vault-password-file are mutually exclusive")
if runas_opts:
# Check for privilege escalation conflicts
if ((op.su or op.su_user) and (op.sudo or op.sudo_user) or
(op.su or op.su_user) and (op.become or op.become_user) or
(op.sudo or op.sudo_user) and (op.become or op.become_user)):
self.parser.error("Sudo arguments ('--sudo', '--sudo-user', and '--ask-sudo-pass') and su arguments ('--su', '--su-user', and '--ask-su-pass') "
"and become arguments ('--become', '--become-user', and '--ask-become-pass') are exclusive of each other")
if fork_opts:
if op.forks < 1:
self.parser.error("The number of processes (--forks) must be >= 1")
@staticmethod
def unfrack_paths(option, opt, value, parser):
if isinstance(value, string_types):
setattr(parser.values, option.dest, [unfrackpath(x) for x in value.split(os.pathsep)])
elif isinstance(value, list):
setattr(parser.values, option.dest, [unfrackpath(x) for x in value])
else:
pass # FIXME: should we raise options error?
@staticmethod
def unfrack_path(option, opt, value, parser):
setattr(parser.values, option.dest, unfrackpath(value))
@staticmethod
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False,
async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False,
runas_prompt_opts=False, desc=None):
''' create an options parser for most ansible scripts '''
# base opts
parser = SortedOptParser(usage, version=CLI.version("%prog"), description=desc, epilog=epilog)
parser.add_option('-v', '--verbose', dest='verbosity', default=C.DEFAULT_VERBOSITY, action="count",
help="verbose mode (-vvv for more, -vvvv to enable connection debugging)")
if inventory_opts:
parser.add_option('-i', '--inventory', '--inventory-file', dest='inventory', action="append",
help="specify inventory host path (default=[%s]) or comma separated host list. "
"--inventory-file is deprecated" % C.DEFAULT_HOST_LIST)
parser.add_option('--list-hosts', dest='listhosts', action='store_true',
help='outputs a list of matching hosts; does not execute anything else')
parser.add_option('-l', '--limit', default=C.DEFAULT_SUBSET, dest='subset',
help='further limit selected hosts to an additional pattern')
if module_opts:
parser.add_option('-M', '--module-path', dest='module_path', default=None,
help="prepend path(s) to module library (default=%s)" % C.DEFAULT_MODULE_PATH,
action="callback", callback=CLI.unfrack_path, type='str')
if runtask_opts:
parser.add_option('-e', '--extra-vars', dest="extra_vars", action="append",
help="set additional variables as key=value or YAML/JSON, if filename prepend with @", default=[])
if fork_opts:
parser.add_option('-f', '--forks', dest='forks', default=C.DEFAULT_FORKS, type='int',
help="specify number of parallel processes to use (default=%s)" % C.DEFAULT_FORKS)
if vault_opts:
parser.add_option('--ask-vault-pass', default=C.DEFAULT_ASK_VAULT_PASS, dest='ask_vault_pass', action='store_true',
help='ask for vault password')
parser.add_option('--vault-password-file', default=C.DEFAULT_VAULT_PASSWORD_FILE, dest='vault_password_file',
help="vault password file", action="callback", callback=CLI.unfrack_path, type='string')
parser.add_option('--new-vault-password-file', dest='new_vault_password_file',
help="new vault password file for rekey", action="callback", callback=CLI.unfrack_path, type='string')
parser.add_option('--output', default=None, dest='output_file',
help='output file name for encrypt or decrypt; use - for stdout',
action="callback", callback=CLI.unfrack_path, type='string')
if subset_opts:
parser.add_option('-t', '--tags', dest='tags', default=[], action='append',
help="only run plays and tasks tagged with these values")
parser.add_option('--skip-tags', dest='skip_tags', default=[], action='append',
help="only run plays and tasks whose tags do not match these values")
if output_opts:
parser.add_option('-o', '--one-line', dest='one_line', action='store_true',
help='condense output')
parser.add_option('-t', '--tree', dest='tree', default=None,
help='log output to this directory')
if connect_opts:
connect_group = optparse.OptionGroup(parser, "Connection Options", "control as whom and how to connect to hosts")
connect_group.add_option('-k', '--ask-pass', default=C.DEFAULT_ASK_PASS, dest='ask_pass', action='store_true',
help='ask for connection password')
connect_group.add_option('--private-key', '--key-file', default=C.DEFAULT_PRIVATE_KEY_FILE, dest='private_key_file',
help='use this file to authenticate the connection', action="callback", callback=CLI.unfrack_path, type='string')
connect_group.add_option('-u', '--user', default=C.DEFAULT_REMOTE_USER, dest='remote_user',
help='connect as this user (default=%s)' % C.DEFAULT_REMOTE_USER)
connect_group.add_option('-c', '--connection', dest='connection', default=C.DEFAULT_TRANSPORT,
help="connection type to use (default=%s)" % C.DEFAULT_TRANSPORT)
connect_group.add_option('-T', '--timeout', default=C.DEFAULT_TIMEOUT, type='int', dest='timeout',
help="override the connection timeout in seconds (default=%s)" % C.DEFAULT_TIMEOUT)
connect_group.add_option('--ssh-common-args', default='', dest='ssh_common_args',
help="specify common arguments to pass to sftp/scp/ssh (e.g. ProxyCommand)")
connect_group.add_option('--sftp-extra-args', default='', dest='sftp_extra_args',
help="specify extra arguments to pass to sftp only (e.g. -f, -l)")
connect_group.add_option('--scp-extra-args', default='', dest='scp_extra_args',
help="specify extra arguments to pass to scp only (e.g. -l)")
connect_group.add_option('--ssh-extra-args', default='', dest='ssh_extra_args',
help="specify extra arguments to pass to ssh only (e.g. -R)")
parser.add_option_group(connect_group)
runas_group = None
rg = optparse.OptionGroup(parser, "Privilege Escalation Options", "control how and which user you become as on target hosts")
if runas_opts:
runas_group = rg
# priv user defaults to root later on to enable detecting when this option was given here
runas_group.add_option("-s", "--sudo", default=C.DEFAULT_SUDO, action="store_true", dest='sudo',
help="run operations with sudo (nopasswd) (deprecated, use become)")
runas_group.add_option('-U', '--sudo-user', dest='sudo_user', default=None,
help='desired sudo user (default=root) (deprecated, use become)')
runas_group.add_option('-S', '--su', default=C.DEFAULT_SU, action='store_true',
help='run operations with su (deprecated, use become)')
runas_group.add_option('-R', '--su-user', default=None,
help='run operations with su as this user (default=%s) (deprecated, use become)' % C.DEFAULT_SU_USER)
# consolidated privilege escalation (become)
runas_group.add_option("-b", "--become", default=C.DEFAULT_BECOME, action="store_true", dest='become',
help="run operations with become (does not imply password prompting)")
runas_group.add_option('--become-method', dest='become_method', default=C.DEFAULT_BECOME_METHOD, type='choice', choices=C.BECOME_METHODS,
help="privilege escalation method to use (default=%s), valid choices: [ %s ]" %
(C.DEFAULT_BECOME_METHOD, ' | '.join(C.BECOME_METHODS)))
runas_group.add_option('--become-user', default=None, dest='become_user', type='string',
help='run operations as this user (default=%s)' % C.DEFAULT_BECOME_USER)
if runas_opts or runas_prompt_opts:
if not runas_group:
runas_group = rg
runas_group.add_option('--ask-sudo-pass', default=C.DEFAULT_ASK_SUDO_PASS, dest='ask_sudo_pass', action='store_true',
help='ask for sudo password (deprecated, use become)')
runas_group.add_option('--ask-su-pass', default=C.DEFAULT_ASK_SU_PASS, dest='ask_su_pass', action='store_true',
help='ask for su password (deprecated, use become)')
runas_group.add_option('-K', '--ask-become-pass', default=False, dest='become_ask_pass', action='store_true',
help='ask for privilege escalation password')
if runas_group:
parser.add_option_group(runas_group)
if async_opts:
parser.add_option('-P', '--poll', default=C.DEFAULT_POLL_INTERVAL, type='int', dest='poll_interval',
help="set the poll interval if using -B (default=%s)" % C.DEFAULT_POLL_INTERVAL)
parser.add_option('-B', '--background', dest='seconds', type='int', default=0,
help='run asynchronously, failing after X seconds (default=N/A)')
if check_opts:
parser.add_option("-C", "--check", default=False, dest='check', action='store_true',
help="don't make any changes; instead, try to predict some of the changes that may occur")
parser.add_option('--syntax-check', dest='syntax', action='store_true',
help="perform a syntax check on the playbook, but do not execute it")
parser.add_option("-D", "--diff", default=False, dest='diff', action='store_true',
help="when changing (small) files and templates, show the differences in those files; works great with --check")
if meta_opts:
parser.add_option('--force-handlers', default=C.DEFAULT_FORCE_HANDLERS, dest='force_handlers', action='store_true',
help="run handlers even if a task fails")
parser.add_option('--flush-cache', dest='flush_cache', action='store_true',
help="clear the fact cache")
return parser
@abstractmethod
def parse(self):
"""Parse the command line args
This method parses the command line arguments. It uses the parser
stored in the self.parser attribute and saves the args and options in
self.args and self.options respectively.
Subclasses need to implement this method. They will usually create
a base_parser, add their own options to the base_parser, and then call
this method to do the actual parsing. An implementation will look
something like this::
def parse(self):
parser = super(MyCLI, self).base_parser(usage="My Ansible CLI", inventory_opts=True)
parser.add_option('--my-option', dest='my_option', action='store')
self.parser = parser
super(MyCLI, self).parse()
# If some additional transformations are needed for the
# arguments and options, do it here.
"""
self.options, self.args = self.parser.parse_args(self.args[1:])
# process tags
if hasattr(self.options, 'tags') and not self.options.tags:
# optparse defaults does not do what's expected
self.options.tags = ['all']
if hasattr(self.options, 'tags') and self.options.tags:
if not C.MERGE_MULTIPLE_CLI_TAGS:
if len(self.options.tags) > 1:
display.deprecated('Specifying --tags multiple times on the command line currently uses the last specified value. '
'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.',
version=2.5, removed=False)
self.options.tags = [self.options.tags[-1]]
tags = set()
for tag_set in self.options.tags:
for tag in tag_set.split(u','):
tags.add(tag.strip())
self.options.tags = list(tags)
# process skip_tags
if hasattr(self.options, 'skip_tags') and self.options.skip_tags:
if not C.MERGE_MULTIPLE_CLI_TAGS:
if len(self.options.skip_tags) > 1:
display.deprecated('Specifying --skip-tags multiple times on the command line currently uses the last specified value. '
'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.',
version=2.5, removed=False)
self.options.skip_tags = [self.options.skip_tags[-1]]
skip_tags = set()
for tag_set in self.options.skip_tags:
for tag in tag_set.split(u','):
skip_tags.add(tag.strip())
self.options.skip_tags = list(skip_tags)
# process inventory options
if hasattr(self.options, 'inventory'):
if self.options.inventory:
# should always be list
if isinstance(self.options.inventory, string_types):
self.options.inventory = [self.options.inventory]
# Ensure full paths when needed
self.options.inventory = [unfrackpath(opt) if ',' not in opt else opt for opt in self.options.inventory]
else:
# set default if it exists
if os.path.exists(C.DEFAULT_HOST_LIST):
self.options.inventory = [C.DEFAULT_HOST_LIST]
@staticmethod
def version(prog):
''' return ansible version '''
result = "{0} {1}".format(prog, __version__)
gitinfo = CLI._gitinfo()
if gitinfo:
result = result + " {0}".format(gitinfo)
result += "\n config file = %s" % C.CONFIG_FILE
if C.DEFAULT_MODULE_PATH is None:
cpath = "Default w/o overrides"
else:
cpath = C.DEFAULT_MODULE_PATH
result = result + "\n configured module search path = %s" % cpath
result = result + "\n ansible python module location = %s" % ':'.join(ansible.__path__)
result = result + "\n executable location = %s" % sys.argv[0]
result = result + "\n python version = %s" % ''.join(sys.version.splitlines())
return result
@staticmethod
def version_info(gitinfo=False):
''' return full ansible version info '''
if gitinfo:
# expensive call, user with care
ansible_version_string = CLI.version('')
else:
ansible_version_string = __version__
ansible_version = ansible_version_string.split()[0]
ansible_versions = ansible_version.split('.')
for counter in range(len(ansible_versions)):
if ansible_versions[counter] == "":
ansible_versions[counter] = 0
try:
ansible_versions[counter] = int(ansible_versions[counter])
except:
pass
if len(ansible_versions) < 3:
for counter in range(len(ansible_versions), 3):
ansible_versions.append(0)
return {'string': ansible_version_string.strip(),
'full': ansible_version,
'major': ansible_versions[0],
'minor': ansible_versions[1],
'revision': ansible_versions[2]}
@staticmethod
def _git_repo_info(repo_path):
''' returns a string containing git branch, commit id and commit date '''
result = None
if os.path.exists(repo_path):
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a possibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path[:-4], gitdir)
except (IOError, AttributeError):
return ''
f = open(os.path.join(repo_path, "HEAD"))
line = f.readline().rstrip("\n")
if line.startswith("ref:"):
branch_path = os.path.join(repo_path, line[5:])
else:
branch_path = None
f.close()
if branch_path and os.path.exists(branch_path):
branch = '/'.join(line.split('/')[2:])
f = open(branch_path)
commit = f.readline()[:10]
f.close()
else:
# detached HEAD
commit = line[:10]
branch = 'detached HEAD'
branch_path = os.path.join(repo_path, "HEAD")
date = time.localtime(os.stat(branch_path).st_mtime)
if time.daylight == 0:
offset = time.timezone
else:
offset = time.altzone
result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, time.strftime("%Y/%m/%d %H:%M:%S", date), int(offset / -36))
else:
result = ''
return result
@staticmethod
def _gitinfo():
basedir = os.path.join(os.path.dirname(__file__), '..', '..', '..')
repo_path = os.path.join(basedir, '.git')
result = CLI._git_repo_info(repo_path)
submodules = os.path.join(basedir, '.gitmodules')
if not os.path.exists(submodules):
return result
f = open(submodules)
for line in f:
tokens = line.strip().split(' ')
if tokens[0] == 'path':
submodule_path = tokens[2]
submodule_info = CLI._git_repo_info(os.path.join(basedir, submodule_path, '.git'))
if not submodule_info:
submodule_info = ' not found - use git submodule update --init ' + submodule_path
result += "\n {0}: {1}".format(submodule_path, submodule_info)
f.close()
return result
def pager(self, text):
''' find reasonable way to display text '''
# this is a much simpler form of what is in pydoc.py
if not sys.stdout.isatty():
display.display(text, screen_only=True)
elif 'PAGER' in os.environ:
if sys.platform == 'win32':
display.display(text, screen_only=True)
else:
self.pager_pipe(text, os.environ['PAGER'])
else:
p = subprocess.Popen('less --version', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
if p.returncode == 0:
self.pager_pipe(text, 'less')
else:
display.display(text, screen_only=True)
@staticmethod
def pager_pipe(text, cmd):
''' pipe text through a pager '''
if 'LESS' not in os.environ:
os.environ['LESS'] = CLI.LESS_OPTS
try:
cmd = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=sys.stdout)
cmd.communicate(input=to_bytes(text))
except IOError:
pass
except KeyboardInterrupt:
pass
@classmethod
def tty_ify(cls, text):
t = cls._ITALIC.sub("`" + r"\1" + "'", text) # I(word) => `word'
t = cls._BOLD.sub("*" + r"\1" + "*", t) # B(word) => *word*
t = cls._MODULE.sub("[" + r"\1" + "]", t) # M(word) => [word]
t = cls._URL.sub(r"\1", t) # U(word) => word
t = cls._CONST.sub("`" + r"\1" + "'", t) # C(word) => `word'
return t
@staticmethod
def read_vault_password_file(vault_password_file, loader):
"""
Read a vault password from a file or if executable, execute the script and
retrieve password from STDOUT
"""
this_path = os.path.realpath(os.path.expanduser(vault_password_file))
if not os.path.exists(this_path):
raise AnsibleError("The vault password file %s was not found" % this_path)
if loader.is_executable(this_path):
try:
# STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
except OSError as e:
raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, "
"remove the executable bit from the file." % (' '.join(this_path), e))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("Vault password script %s returned non-zero (%s): %s" % (this_path, p.returncode, p.stderr))
vault_pass = stdout.strip(b'\r\n')
else:
try:
f = open(this_path, "rb")
vault_pass = f.read().strip()
f.close()
except (OSError, IOError) as e:
raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e))
return vault_pass
@staticmethod
def _play_prereqs(options):
# all needs loader
loader = DataLoader()
# vault
b_vault_pass = None
if options.vault_password_file:
# read vault_pass from a file
b_vault_pass = CLI.read_vault_password_file(options.vault_password_file, loader=loader)
elif options.ask_vault_pass:
b_vault_pass = CLI.ask_vault_passwords()
if b_vault_pass is not None:
loader.set_vault_password(b_vault_pass)
# create the inventory, and filter it based on the subset specified (if any)
inventory = InventoryManager(loader=loader, sources=options.inventory)
# create the variable manager, which will be shared throughout
# the code, ensuring a consistent view of global variables
variable_manager = VariableManager(loader=loader, inventory=inventory)
# load vars from cli options
variable_manager.extra_vars = load_extra_vars(loader=loader, options=options)
variable_manager.options_vars = load_options_vars(options, CLI.version_info(gitinfo=False))
return loader, inventory, variable_manager
| gpl-3.0 |
kaustubh-kabra/modified-xen | tools/python/logging/logging-0.4.9.2/test/log_test12.py | 42 | 1951 | #!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""
A test harness for the logging module. Tests HTTPHandler.
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import sys, string, logging, logging.handlers
def main():
import pdb
host = "localhost:%d" % logging.handlers.DEFAULT_HTTP_LOGGING_PORT
gh = logging.handlers.HTTPHandler(host, '/log', 'GET')
ph = logging.handlers.HTTPHandler(host, '/log', 'POST')
logger = logging.getLogger("log_test12")
logger.propagate = 0
logger.addHandler(gh)
logger.addHandler(ph)
logging.getLogger("").setLevel(logging.DEBUG)
logger.info("Jackdaws love my big %s of %s", "sphinx", "quartz")
logger.debug("Pack my %s with twelve dozen %s", "box", "liquor jugs")
gh.close()
ph.close()
logger.removeHandler(gh)
logger.removeHandler(ph)
if __name__ == "__main__":
main()
| gpl-2.0 |
markpudd/logistic_regression | logisticReg.py | 1 | 1904 | # Helper functions to do logistic regression
# To use the logRegCost function needs to be minimised, use the logRegGrad method to provide derivative
#
import cv2
import numpy as np
import scipy.io as sio
import csv as csv
from sklearn.preprocessing import normalize
def featureNormalize(data):
mu = data.mean(0)
data_norm = data.__sub__(mu)
sigma = np.std(data_norm, axis=0,ddof=1)
data_norm = data_norm.__div__(sigma)
return data_norm;
def addFirstOnes(data):
return np.concatenate((np.ones((np.size(data,0),1)),data),1)
def sigmoid(z):
return 1/(1+np.exp(-z))
def logRegGrad(theta, data_x, data_y, lamb):
m = float(np.size(data_y))
theta=np.array([theta]).T
temp = np.array(theta);
temp[0] = 0;
ha = data_x.dot(theta)
h=sigmoid(ha);
grad = 1/m * ((h-data_y).T.dot(data_x)).T;
grad = grad + ((lamb/m)*temp);
return grad.T[0]
def logRegCost(theta, data_x, data_y, lamb):
m = float(np.size(data_y))
theta=np.array([theta]).T
ha = data_x.dot(theta)
h=sigmoid(ha);
J = 1/m *((-data_y.T.dot(np.log(h))-(1-data_y.T).dot(np.log(1-h))));
temp = np.array(theta);
temp[0] = 0; # because we don't add anything for j = 0
J = J + (lamb/(2*m))*sum(np.power(temp,2));
return J[0,0]
def predict(theta, data_x):
n = np.size(data_x,1)
theta=np.array([theta]).T
ha = data_x.dot(theta)
p=sigmoid(ha);
for i in range(0,np.size(data_x,0)):
if p[i]>=0.5:
p[i]=1
else:
p[i]=0
return p
def testError(theta, data_x,data_y):
m = float(np.size(data_y))
sum =0
p=predict(theta, data_x);
for i in range(0,np.size(data_x,0)):
if p[i,0]==1 and data_y[0,i]==0:
sum = sum+1;
elif p[i,0]==0 and data_y[0,i]==1:
sum = sum+1;
return 1/m * sum
| mit |
alex/boto | boto/sns/__init__.py | 131 | 2117 | # Copyright (c) 2010-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010-2011, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# this is here for backward compatibility
# originally, the SNSConnection class was defined here
from boto.sns.connection import SNSConnection
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the SNS service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo` instances
"""
return get_regions('sns', connection_cls=SNSConnection)
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.sns.connection.SNSConnection`.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.sns.connection.SNSConnection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit |
tinloaf/home-assistant | homeassistant/components/media_player/cast.py | 3 | 24944 | """
Provide functionality to interact with Cast devices on the network.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/media_player.cast/
"""
import asyncio
import logging
import threading
from typing import Optional, Tuple
import attr
import voluptuous as vol
from homeassistant.components.cast import DOMAIN as CAST_DOMAIN
from homeassistant.components.media_player import (
MEDIA_TYPE_MOVIE, MEDIA_TYPE_MUSIC, MEDIA_TYPE_TVSHOW, PLATFORM_SCHEMA,
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, MediaPlayerDevice)
from homeassistant.const import (
CONF_HOST, EVENT_HOMEASSISTANT_STOP, STATE_IDLE, STATE_OFF, STATE_PAUSED,
STATE_PLAYING)
from homeassistant.core import callback
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, dispatcher_send)
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
import homeassistant.util.dt as dt_util
DEPENDENCIES = ('cast',)
_LOGGER = logging.getLogger(__name__)
CONF_IGNORE_CEC = 'ignore_cec'
CAST_SPLASH = 'https://home-assistant.io/images/cast/splash.png'
DEFAULT_PORT = 8009
SUPPORT_CAST = SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_NEXT_TRACK | SUPPORT_PLAY_MEDIA | SUPPORT_STOP | SUPPORT_PLAY
# Stores a threading.Lock that is held by the internal pychromecast discovery.
INTERNAL_DISCOVERY_RUNNING_KEY = 'cast_discovery_running'
# Stores all ChromecastInfo we encountered through discovery or config as a set
# If we find a chromecast with a new host, the old one will be removed again.
KNOWN_CHROMECAST_INFO_KEY = 'cast_known_chromecasts'
# Stores UUIDs of cast devices that were added as entities. Doesn't store
# None UUIDs.
ADDED_CAST_DEVICES_KEY = 'cast_added_cast_devices'
# Dispatcher signal fired with a ChromecastInfo every time we discover a new
# Chromecast or receive it through configuration
SIGNAL_CAST_DISCOVERED = 'cast_discovered'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_IGNORE_CEC, default=[]):
vol.All(cv.ensure_list, [cv.string]),
})
@attr.s(slots=True, frozen=True)
class ChromecastInfo:
"""Class to hold all data about a chromecast for creating connections.
This also has the same attributes as the mDNS fields by zeroconf.
"""
host = attr.ib(type=str)
port = attr.ib(type=int)
uuid = attr.ib(type=Optional[str], converter=attr.converters.optional(str),
default=None) # always convert UUID to string if not None
manufacturer = attr.ib(type=str, default='')
model_name = attr.ib(type=str, default='')
friendly_name = attr.ib(type=Optional[str], default=None)
@property
def is_audio_group(self) -> bool:
"""Return if this is an audio group."""
return self.port != DEFAULT_PORT
@property
def is_information_complete(self) -> bool:
"""Return if all information is filled out."""
return all(attr.astuple(self))
@property
def host_port(self) -> Tuple[str, int]:
"""Return the host+port tuple."""
return self.host, self.port
def _fill_out_missing_chromecast_info(info: ChromecastInfo) -> ChromecastInfo:
"""Fill out missing attributes of ChromecastInfo using blocking HTTP."""
if info.is_information_complete or info.is_audio_group:
# We have all information, no need to check HTTP API. Or this is an
# audio group, so checking via HTTP won't give us any new information.
return info
# Fill out missing information via HTTP dial.
from pychromecast import dial
http_device_status = dial.get_device_status(info.host)
if http_device_status is None:
# HTTP dial didn't give us any new information.
return info
return ChromecastInfo(
host=info.host, port=info.port,
uuid=(info.uuid or http_device_status.uuid),
friendly_name=(info.friendly_name or http_device_status.friendly_name),
manufacturer=(info.manufacturer or http_device_status.manufacturer),
model_name=(info.model_name or http_device_status.model_name)
)
def _discover_chromecast(hass: HomeAssistantType, info: ChromecastInfo):
if info in hass.data[KNOWN_CHROMECAST_INFO_KEY]:
_LOGGER.debug("Discovered previous chromecast %s", info)
return
# Either discovered completely new chromecast or a "moved" one.
info = _fill_out_missing_chromecast_info(info)
_LOGGER.debug("Discovered chromecast %s", info)
if info.uuid is not None:
# Remove previous cast infos with same uuid from known chromecasts.
same_uuid = set(x for x in hass.data[KNOWN_CHROMECAST_INFO_KEY]
if info.uuid == x.uuid)
hass.data[KNOWN_CHROMECAST_INFO_KEY] -= same_uuid
hass.data[KNOWN_CHROMECAST_INFO_KEY].add(info)
dispatcher_send(hass, SIGNAL_CAST_DISCOVERED, info)
def _setup_internal_discovery(hass: HomeAssistantType) -> None:
"""Set up the pychromecast internal discovery."""
if INTERNAL_DISCOVERY_RUNNING_KEY not in hass.data:
hass.data[INTERNAL_DISCOVERY_RUNNING_KEY] = threading.Lock()
if not hass.data[INTERNAL_DISCOVERY_RUNNING_KEY].acquire(blocking=False):
# Internal discovery is already running
return
import pychromecast
def internal_callback(name):
"""Handle zeroconf discovery of a new chromecast."""
mdns = listener.services[name]
_discover_chromecast(hass, ChromecastInfo(
host=mdns[0],
port=mdns[1],
uuid=mdns[2],
model_name=mdns[3],
friendly_name=mdns[4],
))
_LOGGER.debug("Starting internal pychromecast discovery.")
listener, browser = pychromecast.start_discovery(internal_callback)
def stop_discovery(event):
"""Stop discovery of new chromecasts."""
_LOGGER.debug("Stopping internal pychromecast discovery.")
pychromecast.stop_discovery(browser)
hass.data[INTERNAL_DISCOVERY_RUNNING_KEY].release()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_discovery)
@callback
def _async_create_cast_device(hass: HomeAssistantType,
info: ChromecastInfo):
"""Create a CastDevice Entity from the chromecast object.
Returns None if the cast device has already been added.
"""
if info.uuid is None:
# Found a cast without UUID, we don't store it because we won't be able
# to update it anyway.
return CastDevice(info)
# Found a cast with UUID
added_casts = hass.data[ADDED_CAST_DEVICES_KEY]
if info.uuid in added_casts:
# Already added this one, the entity will take care of moved hosts
# itself
return None
# -> New cast device
added_casts.add(info.uuid)
return CastDevice(info)
async def async_setup_platform(hass: HomeAssistantType, config: ConfigType,
async_add_entities, discovery_info=None):
"""Set up thet Cast platform.
Deprecated.
"""
_LOGGER.warning(
'Setting configuration for Cast via platform is deprecated. '
'Configure via Cast component instead.')
await _async_setup_platform(
hass, config, async_add_entities, discovery_info)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Cast from a config entry."""
config = hass.data[CAST_DOMAIN].get('media_player', {})
if not isinstance(config, list):
config = [config]
# no pending task
done, _ = await asyncio.wait([
_async_setup_platform(hass, cfg, async_add_entities, None)
for cfg in config])
if any([task.exception() for task in done]):
raise PlatformNotReady
async def _async_setup_platform(hass: HomeAssistantType, config: ConfigType,
async_add_entities, discovery_info):
"""Set up the cast platform."""
import pychromecast
# Import CEC IGNORE attributes
pychromecast.IGNORE_CEC += config.get(CONF_IGNORE_CEC, [])
hass.data.setdefault(ADDED_CAST_DEVICES_KEY, set())
hass.data.setdefault(KNOWN_CHROMECAST_INFO_KEY, set())
info = None
if discovery_info is not None:
info = ChromecastInfo(host=discovery_info['host'],
port=discovery_info['port'])
elif CONF_HOST in config:
info = ChromecastInfo(host=config[CONF_HOST],
port=DEFAULT_PORT)
@callback
def async_cast_discovered(discover: ChromecastInfo) -> None:
"""Handle discovery of a new chromecast."""
if info is not None and info.host_port != discover.host_port:
# Not our requested cast device.
return
cast_device = _async_create_cast_device(hass, discover)
if cast_device is not None:
async_add_entities([cast_device])
remove_handler = async_dispatcher_connect(
hass, SIGNAL_CAST_DISCOVERED, async_cast_discovered)
# Re-play the callback for all past chromecasts, store the objects in
# a list to avoid concurrent modification resulting in exception.
for chromecast in list(hass.data[KNOWN_CHROMECAST_INFO_KEY]):
async_cast_discovered(chromecast)
if info is None or info.is_audio_group:
# If we were a) explicitly told to enable discovery or
# b) have an audio group cast device, we need internal discovery.
hass.async_add_job(_setup_internal_discovery, hass)
else:
info = await hass.async_add_job(_fill_out_missing_chromecast_info,
info)
if info.friendly_name is None:
_LOGGER.debug("Cannot retrieve detail information for chromecast"
" %s, the device may not be online", info)
remove_handler()
raise PlatformNotReady
hass.async_add_job(_discover_chromecast, hass, info)
class CastStatusListener:
"""Helper class to handle pychromecast status callbacks.
Necessary because a CastDevice entity can create a new socket client
and therefore callbacks from multiple chromecast connections can
potentially arrive. This class allows invalidating past chromecast objects.
"""
def __init__(self, cast_device, chromecast):
"""Initialize the status listener."""
self._cast_device = cast_device
self._valid = True
chromecast.register_status_listener(self)
chromecast.socket_client.media_controller.register_status_listener(
self)
chromecast.register_connection_listener(self)
def new_cast_status(self, cast_status):
"""Handle reception of a new CastStatus."""
if self._valid:
self._cast_device.new_cast_status(cast_status)
def new_media_status(self, media_status):
"""Handle reception of a new MediaStatus."""
if self._valid:
self._cast_device.new_media_status(media_status)
def new_connection_status(self, connection_status):
"""Handle reception of a new ConnectionStatus."""
if self._valid:
self._cast_device.new_connection_status(connection_status)
def invalidate(self):
"""Invalidate this status listener.
All following callbacks won't be forwarded.
"""
self._valid = False
class CastDevice(MediaPlayerDevice):
"""Representation of a Cast device on the network.
This class is the holder of the pychromecast.Chromecast object and its
socket client. It therefore handles all reconnects and audio group changing
"elected leader" itself.
"""
def __init__(self, cast_info):
"""Initialize the cast device."""
self._cast_info = cast_info # type: ChromecastInfo
self._chromecast = None # type: Optional[pychromecast.Chromecast]
self.cast_status = None
self.media_status = None
self.media_status_received = None
self._available = False # type: bool
self._status_listener = None # type: Optional[CastStatusListener]
async def async_added_to_hass(self):
"""Create chromecast object when added to hass."""
@callback
def async_cast_discovered(discover: ChromecastInfo):
"""Handle discovery of new Chromecast."""
if self._cast_info.uuid is None:
# We can't handle empty UUIDs
return
if self._cast_info.uuid != discover.uuid:
# Discovered is not our device.
return
_LOGGER.debug("Discovered chromecast with same UUID: %s", discover)
self.hass.async_create_task(self.async_set_cast_info(discover))
async def async_stop(event):
"""Disconnect socket on Home Assistant stop."""
await self._async_disconnect()
async_dispatcher_connect(self.hass, SIGNAL_CAST_DISCOVERED,
async_cast_discovered)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, async_stop)
self.hass.async_create_task(self.async_set_cast_info(self._cast_info))
async def async_will_remove_from_hass(self) -> None:
"""Disconnect Chromecast object when removed."""
await self._async_disconnect()
if self._cast_info.uuid is not None:
# Remove the entity from the added casts so that it can dynamically
# be re-added again.
self.hass.data[ADDED_CAST_DEVICES_KEY].remove(self._cast_info.uuid)
async def async_set_cast_info(self, cast_info):
"""Set the cast information and set up the chromecast object."""
import pychromecast
old_cast_info = self._cast_info
self._cast_info = cast_info
if self._chromecast is not None:
if old_cast_info.host_port == cast_info.host_port:
_LOGGER.debug("No connection related update: %s",
cast_info.host_port)
return
await self._async_disconnect()
# pylint: disable=protected-access
_LOGGER.debug("Connecting to cast device %s", cast_info)
chromecast = await self.hass.async_add_job(
pychromecast._get_chromecast_from_host, (
cast_info.host, cast_info.port, cast_info.uuid,
cast_info.model_name, cast_info.friendly_name
))
self._chromecast = chromecast
self._status_listener = CastStatusListener(self, chromecast)
# Initialise connection status as connected because we can only
# register the connection listener *after* the initial connection
# attempt. If the initial connection failed, we would never reach
# this code anyway.
self._available = True
self.cast_status = chromecast.status
self.media_status = chromecast.media_controller.status
_LOGGER.debug("Connection successful!")
self.async_schedule_update_ha_state()
async def _async_disconnect(self):
"""Disconnect Chromecast object if it is set."""
if self._chromecast is None:
# Can't disconnect if not connected.
return
_LOGGER.debug("Disconnecting from chromecast socket.")
self._available = False
self.async_schedule_update_ha_state()
await self.hass.async_add_job(self._chromecast.disconnect)
self._invalidate()
self.async_schedule_update_ha_state()
def _invalidate(self):
"""Invalidate some attributes."""
self._chromecast = None
self.cast_status = None
self.media_status = None
self.media_status_received = None
if self._status_listener is not None:
self._status_listener.invalidate()
self._status_listener = None
# ========== Callbacks ==========
def new_cast_status(self, cast_status):
"""Handle updates of the cast status."""
self.cast_status = cast_status
self.schedule_update_ha_state()
def new_media_status(self, media_status):
"""Handle updates of the media status."""
self.media_status = media_status
self.media_status_received = dt_util.utcnow()
self.schedule_update_ha_state()
def new_connection_status(self, connection_status):
"""Handle updates of connection status."""
from pychromecast.socket_client import CONNECTION_STATUS_CONNECTED, \
CONNECTION_STATUS_DISCONNECTED
_LOGGER.debug("Received cast device connection status: %s",
connection_status.status)
if connection_status.status == CONNECTION_STATUS_DISCONNECTED:
self._available = False
self._invalidate()
self.schedule_update_ha_state()
return
new_available = connection_status.status == CONNECTION_STATUS_CONNECTED
if new_available != self._available:
# Connection status callbacks happen often when disconnected.
# Only update state when availability changed to put less pressure
# on state machine.
_LOGGER.debug("Cast device availability changed: %s",
connection_status.status)
self._available = new_available
self.schedule_update_ha_state()
# ========== Service Calls ==========
def turn_on(self):
"""Turn on the cast device."""
import pychromecast
if not self._chromecast.is_idle:
# Already turned on
return
if self._chromecast.app_id is not None:
# Quit the previous app before starting splash screen
self._chromecast.quit_app()
# The only way we can turn the Chromecast is on is by launching an app
self._chromecast.play_media(CAST_SPLASH,
pychromecast.STREAM_TYPE_BUFFERED)
def turn_off(self):
"""Turn off the cast device."""
self._chromecast.quit_app()
def mute_volume(self, mute):
"""Mute the volume."""
self._chromecast.set_volume_muted(mute)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._chromecast.set_volume(volume)
def media_play(self):
"""Send play command."""
self._chromecast.media_controller.play()
def media_pause(self):
"""Send pause command."""
self._chromecast.media_controller.pause()
def media_stop(self):
"""Send stop command."""
self._chromecast.media_controller.stop()
def media_previous_track(self):
"""Send previous track command."""
self._chromecast.media_controller.rewind()
def media_next_track(self):
"""Send next track command."""
self._chromecast.media_controller.skip()
def media_seek(self, position):
"""Seek the media to a specific location."""
self._chromecast.media_controller.seek(position)
def play_media(self, media_type, media_id, **kwargs):
"""Play media from a URL."""
self._chromecast.media_controller.play_media(media_id, media_type)
# ========== Properties ==========
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._cast_info.friendly_name
@property
def device_info(self):
"""Return information about the device."""
cast_info = self._cast_info
if cast_info.model_name == "Google Cast Group":
return None
return {
'name': cast_info.friendly_name,
'identifiers': {
(CAST_DOMAIN, cast_info.uuid.replace('-', ''))
},
'model': cast_info.model_name,
'manufacturer': cast_info.manufacturer,
}
@property
def state(self):
"""Return the state of the player."""
if self.media_status is None:
return None
if self.media_status.player_is_playing:
return STATE_PLAYING
if self.media_status.player_is_paused:
return STATE_PAUSED
if self.media_status.player_is_idle:
return STATE_IDLE
if self._chromecast is not None and self._chromecast.is_idle:
return STATE_OFF
return None
@property
def available(self):
"""Return True if the cast device is connected."""
return self._available
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self.cast_status.volume_level if self.cast_status else None
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self.cast_status.volume_muted if self.cast_status else None
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self.media_status.content_id if self.media_status else None
@property
def media_content_type(self):
"""Content type of current playing media."""
if self.media_status is None:
return None
if self.media_status.media_is_tvshow:
return MEDIA_TYPE_TVSHOW
if self.media_status.media_is_movie:
return MEDIA_TYPE_MOVIE
if self.media_status.media_is_musictrack:
return MEDIA_TYPE_MUSIC
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return self.media_status.duration if self.media_status else None
@property
def media_image_url(self):
"""Image url of current playing media."""
if self.media_status is None:
return None
images = self.media_status.images
return images[0].url if images and images[0].url else None
@property
def media_title(self):
"""Title of current playing media."""
return self.media_status.title if self.media_status else None
@property
def media_artist(self):
"""Artist of current playing media (Music track only)."""
return self.media_status.artist if self.media_status else None
@property
def media_album(self):
"""Album of current playing media (Music track only)."""
return self.media_status.album_name if self.media_status else None
@property
def media_album_artist(self):
"""Album artist of current playing media (Music track only)."""
return self.media_status.album_artist if self.media_status else None
@property
def media_track(self):
"""Track number of current playing media (Music track only)."""
return self.media_status.track if self.media_status else None
@property
def media_series_title(self):
"""Return the title of the series of current playing media."""
return self.media_status.series_title if self.media_status else None
@property
def media_season(self):
"""Season of current playing media (TV Show only)."""
return self.media_status.season if self.media_status else None
@property
def media_episode(self):
"""Episode of current playing media (TV Show only)."""
return self.media_status.episode if self.media_status else None
@property
def app_id(self):
"""Return the ID of the current running app."""
return self._chromecast.app_id if self._chromecast else None
@property
def app_name(self):
"""Name of the current running app."""
return self._chromecast.app_display_name if self._chromecast else None
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_CAST
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self.media_status is None or \
not (self.media_status.player_is_playing or
self.media_status.player_is_paused or
self.media_status.player_is_idle):
return None
return self.media_status.current_time
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
return self.media_status_received
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return self._cast_info.uuid
| apache-2.0 |
Juniper/nova | nova/virt/powervm/host.py | 2 | 2812 | # Copyright 2014, 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova import conf as cfg
from nova.objects import fields
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
# Power VM hypervisor info
# Normally, the hypervisor version is a string in the form of '8.0.0' and
# converted to an int with nova.virt.utils.convert_version_to_int() however
# there isn't currently a mechanism to retrieve the exact version.
# Complicating this is the fact that nova conductor only allows live migration
# from the source host to the destination if the source is equal to or less
# than the destination version. PowerVM live migration limitations are
# checked by the PowerVM capabilities flags and not specific version levels.
# For that reason, we'll just publish the major level.
IBM_POWERVM_HYPERVISOR_VERSION = 8
# The types of LPARS that are supported.
POWERVM_SUPPORTED_INSTANCES = [
(fields.Architecture.PPC64, fields.HVType.PHYP, fields.VMMode.HVM),
(fields.Architecture.PPC64LE, fields.HVType.PHYP, fields.VMMode.HVM)]
def build_host_resource_from_ms(ms_w):
"""Build the host resource dict from a ManagedSystem PowerVM wrapper.
:param ms_w: The pypowervm System wrapper describing the managed system.
"""
data = {}
# Calculate the vcpus
proc_units = ms_w.proc_units_configurable
pu_used = float(proc_units) - float(ms_w.proc_units_avail)
data['vcpus'] = int(math.ceil(float(proc_units)))
data['vcpus_used'] = int(math.ceil(pu_used))
data['memory_mb'] = ms_w.memory_configurable
data['memory_mb_used'] = (ms_w.memory_configurable -
ms_w.memory_free)
data["hypervisor_type"] = fields.HVType.PHYP
data["hypervisor_version"] = IBM_POWERVM_HYPERVISOR_VERSION
data["hypervisor_hostname"] = CONF.host
data["cpu_info"] = jsonutils.dumps({'vendor': 'ibm', 'arch': 'ppc64'})
data["numa_topology"] = None
data["supported_instances"] = POWERVM_SUPPORTED_INSTANCES
stats = {'proc_units': '%.2f' % float(proc_units),
'proc_units_used': '%.2f' % pu_used,
'memory_region_size': ms_w.memory_region_size}
data["stats"] = stats
return data
| apache-2.0 |
raildo/nova | nova/tests/unit/test_cinder.py | 14 | 6997 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.v1 import client as cinder_client_v1
from cinderclient.v2 import client as cinder_client_v2
from requests_mock.contrib import fixture
from testtools import matchers
from nova import context
from nova import exception
from nova import test
from nova.volume import cinder
_image_metadata = {
'kernel_id': 'fake',
'ramdisk_id': 'fake'
}
class BaseCinderTestCase(object):
def setUp(self):
super(BaseCinderTestCase, self).setUp()
cinder.reset_globals()
self.requests = self.useFixture(fixture.Fixture())
self.api = cinder.API()
self.context = context.RequestContext('username',
'project_id',
auth_token='token',
service_catalog=self.CATALOG)
def flags(self, *args, **kwargs):
super(BaseCinderTestCase, self).flags(*args, **kwargs)
cinder.reset_globals()
def create_client(self):
return cinder.cinderclient(self.context)
def test_context_with_catalog(self):
self.assertEqual(self.URL, self.create_client().client.get_endpoint())
def test_cinder_http_retries(self):
retries = 42
self.flags(http_retries=retries, group='cinder')
self.assertEqual(retries, self.create_client().client.connect_retries)
def test_cinder_api_insecure(self):
# The True/False negation is awkward, but better for the client
# to pass us insecure=True and we check verify_cert == False
self.flags(insecure=True, group='cinder')
self.assertFalse(self.create_client().client.session.verify)
def test_cinder_http_timeout(self):
timeout = 123
self.flags(timeout=timeout, group='cinder')
self.assertEqual(timeout, self.create_client().client.session.timeout)
def test_cinder_api_cacert_file(self):
cacert = "/etc/ssl/certs/ca-certificates.crt"
self.flags(cafile=cacert, group='cinder')
self.assertEqual(cacert, self.create_client().client.session.verify)
class CinderTestCase(BaseCinderTestCase, test.NoDBTestCase):
"""Test case for cinder volume v1 api."""
URL = "http://localhost:8776/v1/project_id"
CATALOG = [{
"type": "volumev2",
"name": "cinderv2",
"endpoints": [{"publicURL": URL}]
}]
def create_client(self):
c = super(CinderTestCase, self).create_client()
self.assertIsInstance(c, cinder_client_v1.Client)
return c
def stub_volume(self, **kwargs):
volume = {
'display_name': None,
'display_description': None,
"attachments": [],
"availability_zone": "cinder",
"created_at": "2012-09-10T00:00:00.000000",
"id": '00000000-0000-0000-0000-000000000000',
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
"bootable": "true"
}
volume.update(kwargs)
return volume
def test_cinder_endpoint_template(self):
endpoint = 'http://other_host:8776/v1/%(project_id)s'
self.flags(endpoint_template=endpoint, group='cinder')
self.assertEqual('http://other_host:8776/v1/project_id',
self.create_client().client.endpoint_override)
def test_get_non_existing_volume(self):
self.requests.get(self.URL + '/volumes/nonexisting',
status_code=404)
self.assertRaises(exception.VolumeNotFound, self.api.get, self.context,
'nonexisting')
def test_volume_with_image_metadata(self):
v = self.stub_volume(id='1234', volume_image_metadata=_image_metadata)
m = self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertThat(m.last_request.path,
matchers.EndsWith('/volumes/5678'))
self.assertIn('volume_image_metadata', volume)
self.assertEqual(_image_metadata, volume['volume_image_metadata'])
class CinderV2TestCase(BaseCinderTestCase, test.NoDBTestCase):
"""Test case for cinder volume v2 api."""
URL = "http://localhost:8776/v2/project_id"
CATALOG = [{
"type": "volumev2",
"name": "cinder",
"endpoints": [{"publicURL": URL}]
}]
def setUp(self):
super(CinderV2TestCase, self).setUp()
cinder.CONF.set_override('catalog_info',
'volumev2:cinder:publicURL', group='cinder')
self.addCleanup(cinder.CONF.reset)
def create_client(self):
c = super(CinderV2TestCase, self).create_client()
self.assertIsInstance(c, cinder_client_v2.Client)
return c
def stub_volume(self, **kwargs):
volume = {
'name': None,
'description': None,
"attachments": [],
"availability_zone": "cinderv2",
"created_at": "2013-08-10T00:00:00.000000",
"id": '00000000-0000-0000-0000-000000000000',
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
"bootable": "true"
}
volume.update(kwargs)
return volume
def test_cinder_endpoint_template(self):
endpoint = 'http://other_host:8776/v2/%(project_id)s'
self.flags(endpoint_template=endpoint, group='cinder')
self.assertEqual('http://other_host:8776/v2/project_id',
self.create_client().client.endpoint_override)
def test_get_non_existing_volume(self):
self.requests.get(self.URL + '/volumes/nonexisting',
status_code=404)
self.assertRaises(exception.VolumeNotFound, self.api.get, self.context,
'nonexisting')
def test_volume_with_image_metadata(self):
v = self.stub_volume(id='1234', volume_image_metadata=_image_metadata)
self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertIn('volume_image_metadata', volume)
self.assertEqual(_image_metadata, volume['volume_image_metadata'])
| apache-2.0 |
whitehorse-io/encarnia | pyenv/lib/python2.7/site-packages/twisted/python/_shellcomp.py | 15 | 24327 | # -*- test-case-name: twisted.python.test.test_shellcomp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
No public APIs are provided by this module. Internal use only.
This module implements dynamic tab-completion for any command that uses
twisted.python.usage. Currently, only zsh is supported. Bash support may
be added in the future.
Maintainer: Eric P. Mangold - twisted AT teratorn DOT org
In order for zsh completion to take place the shell must be able to find an
appropriate "stub" file ("completion function") that invokes this code and
displays the results to the user.
The stub used for Twisted commands is in the file C{twisted-completion.zsh},
which is also included in the official Zsh distribution at
C{Completion/Unix/Command/_twisted}. Use this file as a basis for completion
functions for your own commands. You should only need to change the first line
to something like C{#compdef mycommand}.
The main public documentation exists in the L{twisted.python.usage.Options}
docstring, the L{twisted.python.usage.Completions} docstring, and the
Options howto.
"""
import itertools, getopt, inspect
from twisted.python import reflect, util, usage
def shellComplete(config, cmdName, words, shellCompFile):
"""
Perform shell completion.
A completion function (shell script) is generated for the requested
shell and written to C{shellCompFile}, typically C{stdout}. The result
is then eval'd by the shell to produce the desired completions.
@type config: L{twisted.python.usage.Options}
@param config: The L{twisted.python.usage.Options} instance to generate
completions for.
@type cmdName: C{str}
@param cmdName: The name of the command we're generating completions for.
In the case of zsh, this is used to print an appropriate
"#compdef $CMD" line at the top of the output. This is
not necessary for the functionality of the system, but it
helps in debugging, since the output we produce is properly
formed and may be saved in a file and used as a stand-alone
completion function.
@type words: C{list} of C{str}
@param words: The raw command-line words passed to use by the shell
stub function. argv[0] has already been stripped off.
@type shellCompFile: C{file}
@param shellCompFile: The file to write completion data to.
"""
# shellName is provided for forward-compatibility. It is not used,
# since we currently only support zsh.
shellName, position = words[-1].split(":")
position = int(position)
# zsh gives the completion position ($CURRENT) as a 1-based index,
# and argv[0] has already been stripped off, so we subtract 2 to
# get the real 0-based index.
position -= 2
cWord = words[position]
# since the user may hit TAB at any time, we may have been called with an
# incomplete command-line that would generate getopt errors if parsed
# verbatim. However, we must do *some* parsing in order to determine if
# there is a specific subcommand that we need to provide completion for.
# So, to make the command-line more sane we work backwards from the
# current completion position and strip off all words until we find one
# that "looks" like a subcommand. It may in fact be the argument to a
# normal command-line option, but that won't matter for our purposes.
while position >= 1:
if words[position - 1].startswith("-"):
position -= 1
else:
break
words = words[:position]
subCommands = getattr(config, 'subCommands', None)
if subCommands:
# OK, this command supports sub-commands, so lets see if we have been
# given one.
# If the command-line arguments are not valid then we won't be able to
# sanely detect the sub-command, so just generate completions as if no
# sub-command was found.
args = None
try:
opts, args = getopt.getopt(words,
config.shortOpt, config.longOpt)
except getopt.error:
pass
if args:
# yes, we have a subcommand. Try to find it.
for (cmd, short, parser, doc) in config.subCommands:
if args[0] == cmd or args[0] == short:
subOptions = parser()
subOptions.parent = config
gen = ZshSubcommandBuilder(subOptions, config, cmdName,
shellCompFile)
gen.write()
return
# sub-command not given, or did not match any knowns sub-command names
genSubs = True
if cWord.startswith("-"):
# optimization: if the current word being completed starts
# with a hyphen then it can't be a sub-command, so skip
# the expensive generation of the sub-command list
genSubs = False
gen = ZshBuilder(config, cmdName, shellCompFile)
gen.write(genSubs=genSubs)
else:
gen = ZshBuilder(config, cmdName, shellCompFile)
gen.write()
class SubcommandAction(usage.Completer):
def _shellCode(self, optName, shellType):
if shellType == usage._ZSH:
return '*::subcmd:->subcmd'
raise NotImplementedError("Unknown shellType %r" % (shellType,))
class ZshBuilder(object):
"""
Constructs zsh code that will complete options for a given usage.Options
instance, possibly including a list of subcommand names.
Completions for options to subcommands won't be generated because this
class will never be used if the user is completing options for a specific
subcommand. (See L{ZshSubcommandBuilder} below)
@type options: L{twisted.python.usage.Options}
@ivar options: The L{twisted.python.usage.Options} instance defined for this
command.
@type cmdName: C{str}
@ivar cmdName: The name of the command we're generating completions for.
@type file: C{file}
@ivar file: The C{file} to write the completion function to.
"""
def __init__(self, options, cmdName, file):
self.options = options
self.cmdName = cmdName
self.file = file
def write(self, genSubs=True):
"""
Generate the completion function and write it to the output file
@return: L{None}
@type genSubs: C{bool}
@param genSubs: Flag indicating whether or not completions for the list
of subcommand should be generated. Only has an effect
if the C{subCommands} attribute has been defined on the
L{twisted.python.usage.Options} instance.
"""
if genSubs and getattr(self.options, 'subCommands', None) is not None:
gen = ZshArgumentsGenerator(self.options, self.cmdName, self.file)
gen.extraActions.insert(0, SubcommandAction())
gen.write()
self.file.write(b'local _zsh_subcmds_array\n_zsh_subcmds_array=(\n')
for (cmd, short, parser, desc) in self.options.subCommands:
self.file.write(
b'\"' + cmd.encode('utf-8') + b':' + desc.encode('utf-8') +b'\"\n')
self.file.write(b")\n\n")
self.file.write(b'_describe "sub-command" _zsh_subcmds_array\n')
else:
gen = ZshArgumentsGenerator(self.options, self.cmdName, self.file)
gen.write()
class ZshSubcommandBuilder(ZshBuilder):
"""
Constructs zsh code that will complete options for a given usage.Options
instance, and also for a single sub-command. This will only be used in
the case where the user is completing options for a specific subcommand.
@type subOptions: L{twisted.python.usage.Options}
@ivar subOptions: The L{twisted.python.usage.Options} instance defined for
the sub command.
"""
def __init__(self, subOptions, *args):
self.subOptions = subOptions
ZshBuilder.__init__(self, *args)
def write(self):
"""
Generate the completion function and write it to the output file
@return: L{None}
"""
gen = ZshArgumentsGenerator(self.options, self.cmdName, self.file)
gen.extraActions.insert(0, SubcommandAction())
gen.write()
gen = ZshArgumentsGenerator(self.subOptions, self.cmdName, self.file)
gen.write()
class ZshArgumentsGenerator(object):
"""
Generate a call to the zsh _arguments completion function
based on data in a usage.Options instance
@type options: L{twisted.python.usage.Options}
@ivar options: The L{twisted.python.usage.Options} instance to generate for
@type cmdName: C{str}
@ivar cmdName: The name of the command we're generating completions for.
@type file: C{file}
@ivar file: The C{file} to write the completion function to
The following non-constructor variables are populated by this class
with data gathered from the C{Options} instance passed in, and its
base classes.
@type descriptions: C{dict}
@ivar descriptions: A dict mapping long option names to alternate
descriptions. When this variable is defined, the descriptions
contained here will override those descriptions provided in the
optFlags and optParameters variables.
@type multiUse: C{list}
@ivar multiUse: An iterable containing those long option names which may
appear on the command line more than once. By default, options will
only be completed one time.
@type mutuallyExclusive: C{list} of C{tuple}
@ivar mutuallyExclusive: A sequence of sequences, with each sub-sequence
containing those long option names that are mutually exclusive. That is,
those options that cannot appear on the command line together.
@type optActions: C{dict}
@ivar optActions: A dict mapping long option names to shell "actions".
These actions define what may be completed as the argument to the
given option, and should be given as instances of
L{twisted.python.usage.Completer}.
Callables may instead be given for the values in this dict. The
callable should accept no arguments, and return a C{Completer}
instance used as the action.
@type extraActions: C{list} of C{twisted.python.usage.Completer}
@ivar extraActions: Extra arguments are those arguments typically
appearing at the end of the command-line, which are not associated
with any particular named option. That is, the arguments that are
given to the parseArgs() method of your usage.Options subclass.
"""
def __init__(self, options, cmdName, file):
self.options = options
self.cmdName = cmdName
self.file = file
self.descriptions = {}
self.multiUse = set()
self.mutuallyExclusive = []
self.optActions = {}
self.extraActions = []
for cls in reversed(inspect.getmro(options.__class__)):
data = getattr(cls, 'compData', None)
if data:
self.descriptions.update(data.descriptions)
self.optActions.update(data.optActions)
self.multiUse.update(data.multiUse)
self.mutuallyExclusive.extend(data.mutuallyExclusive)
# I don't see any sane way to aggregate extraActions, so just
# take the one at the top of the MRO (nearest the `options'
# instance).
if data.extraActions:
self.extraActions = data.extraActions
aCL = reflect.accumulateClassList
optFlags = []
optParams = []
aCL(options.__class__, 'optFlags', optFlags)
aCL(options.__class__, 'optParameters', optParams)
for i, optList in enumerate(optFlags):
if len(optList) != 3:
optFlags[i] = util.padTo(3, optList)
for i, optList in enumerate(optParams):
if len(optList) != 5:
optParams[i] = util.padTo(5, optList)
self.optFlags = optFlags
self.optParams = optParams
paramNameToDefinition = {}
for optList in optParams:
paramNameToDefinition[optList[0]] = optList[1:]
self.paramNameToDefinition = paramNameToDefinition
flagNameToDefinition = {}
for optList in optFlags:
flagNameToDefinition[optList[0]] = optList[1:]
self.flagNameToDefinition = flagNameToDefinition
allOptionsNameToDefinition = {}
allOptionsNameToDefinition.update(paramNameToDefinition)
allOptionsNameToDefinition.update(flagNameToDefinition)
self.allOptionsNameToDefinition = allOptionsNameToDefinition
self.addAdditionalOptions()
# makes sure none of the Completions metadata references
# option names that don't exist. (great for catching typos)
self.verifyZshNames()
self.excludes = self.makeExcludesDict()
def write(self):
"""
Write the zsh completion code to the file given to __init__
@return: L{None}
"""
self.writeHeader()
self.writeExtras()
self.writeOptions()
self.writeFooter()
def writeHeader(self):
"""
This is the start of the code that calls _arguments
@return: L{None}
"""
self.file.write(b'#compdef ' + self.cmdName.encode('utf-8') +
b'\n\n'
b'_arguments -s -A "-*" \\\n')
def writeOptions(self):
"""
Write out zsh code for each option in this command
@return: L{None}
"""
optNames = list(self.allOptionsNameToDefinition.keys())
optNames.sort()
for longname in optNames:
self.writeOpt(longname)
def writeExtras(self):
"""
Write out completion information for extra arguments appearing on the
command-line. These are extra positional arguments not associated
with a named option. That is, the stuff that gets passed to
Options.parseArgs().
@return: L{None}
@raises: ValueError: if C{Completer} with C{repeat=True} is found and
is not the last item in the C{extraActions} list.
"""
for i, action in enumerate(self.extraActions):
# a repeatable action must be the last action in the list
if action._repeat and i != len(self.extraActions) - 1:
raise ValueError("Completer with repeat=True must be "
"last item in Options.extraActions")
self.file.write(
escape(action._shellCode('', usage._ZSH)).encode('utf-8'))
self.file.write(b' \\\n')
def writeFooter(self):
"""
Write the last bit of code that finishes the call to _arguments
@return: L{None}
"""
self.file.write(b'&& return 0\n')
def verifyZshNames(self):
"""
Ensure that none of the option names given in the metadata are typoed
@return: L{None}
@raise ValueError: Raised if unknown option names have been found.
"""
def err(name):
raise ValueError("Unknown option name \"%s\" found while\n"
"examining Completions instances on %s" % (
name, self.options))
for name in itertools.chain(self.descriptions, self.optActions,
self.multiUse):
if name not in self.allOptionsNameToDefinition:
err(name)
for seq in self.mutuallyExclusive:
for name in seq:
if name not in self.allOptionsNameToDefinition:
err(name)
def excludeStr(self, longname, buildShort=False):
"""
Generate an "exclusion string" for the given option
@type longname: C{str}
@param longname: The long option name (e.g. "verbose" instead of "v")
@type buildShort: C{bool}
@param buildShort: May be True to indicate we're building an excludes
string for the short option that corresponds to the given long opt.
@return: The generated C{str}
"""
if longname in self.excludes:
exclusions = self.excludes[longname].copy()
else:
exclusions = set()
# if longname isn't a multiUse option (can't appear on the cmd line more
# than once), then we have to exclude the short option if we're
# building for the long option, and vice versa.
if longname not in self.multiUse:
if buildShort is False:
short = self.getShortOption(longname)
if short is not None:
exclusions.add(short)
else:
exclusions.add(longname)
if not exclusions:
return ''
strings = []
for optName in exclusions:
if len(optName) == 1:
# short option
strings.append("-" + optName)
else:
strings.append("--" + optName)
strings.sort() # need deterministic order for reliable unit-tests
return "(%s)" % " ".join(strings)
def makeExcludesDict(self):
"""
@return: A C{dict} that maps each option name appearing in
self.mutuallyExclusive to a list of those option names that is it
mutually exclusive with (can't appear on the cmd line with).
"""
#create a mapping of long option name -> single character name
longToShort = {}
for optList in itertools.chain(self.optParams, self.optFlags):
if optList[1] != None:
longToShort[optList[0]] = optList[1]
excludes = {}
for lst in self.mutuallyExclusive:
for i, longname in enumerate(lst):
tmp = set(lst[:i] + lst[i+1:])
for name in tmp.copy():
if name in longToShort:
tmp.add(longToShort[name])
if longname in excludes:
excludes[longname] = excludes[longname].union(tmp)
else:
excludes[longname] = tmp
return excludes
def writeOpt(self, longname):
"""
Write out the zsh code for the given argument. This is just part of the
one big call to _arguments
@type longname: C{str}
@param longname: The long option name (e.g. "verbose" instead of "v")
@return: L{None}
"""
if longname in self.flagNameToDefinition:
# It's a flag option. Not one that takes a parameter.
longField = "--%s" % longname
else:
longField = "--%s=" % longname
short = self.getShortOption(longname)
if short != None:
shortField = "-" + short
else:
shortField = ''
descr = self.getDescription(longname)
descriptionField = descr.replace("[", "\[")
descriptionField = descriptionField.replace("]", "\]")
descriptionField = '[%s]' % descriptionField
actionField = self.getAction(longname)
if longname in self.multiUse:
multiField = '*'
else:
multiField = ''
longExclusionsField = self.excludeStr(longname)
if short:
#we have to write an extra line for the short option if we have one
shortExclusionsField = self.excludeStr(longname, buildShort=True)
self.file.write(escape('%s%s%s%s%s' % (shortExclusionsField,
multiField, shortField, descriptionField, actionField)).encode('utf-8'))
self.file.write(b' \\\n')
self.file.write(escape('%s%s%s%s%s' % (longExclusionsField,
multiField, longField, descriptionField, actionField)).encode('utf-8'))
self.file.write(b' \\\n')
def getAction(self, longname):
"""
Return a zsh "action" string for the given argument
@return: C{str}
"""
if longname in self.optActions:
if callable(self.optActions[longname]):
action = self.optActions[longname]()
else:
action = self.optActions[longname]
return action._shellCode(longname, usage._ZSH)
if longname in self.paramNameToDefinition:
return ':%s:_files' % (longname,)
return ''
def getDescription(self, longname):
"""
Return the description to be used for this argument
@return: C{str}
"""
#check if we have an alternate descr for this arg, and if so use it
if longname in self.descriptions:
return self.descriptions[longname]
#otherwise we have to get it from the optFlags or optParams
try:
descr = self.flagNameToDefinition[longname][1]
except KeyError:
try:
descr = self.paramNameToDefinition[longname][2]
except KeyError:
descr = None
if descr is not None:
return descr
# let's try to get it from the opt_foo method doc string if there is one
longMangled = longname.replace('-', '_') # this is what t.p.usage does
obj = getattr(self.options, 'opt_%s' % longMangled, None)
if obj is not None:
descr = descrFromDoc(obj)
if descr is not None:
return descr
return longname # we really ought to have a good description to use
def getShortOption(self, longname):
"""
Return the short option letter or None
@return: C{str} or L{None}
"""
optList = self.allOptionsNameToDefinition[longname]
return optList[0] or None
def addAdditionalOptions(self):
"""
Add additional options to the optFlags and optParams lists.
These will be defined by 'opt_foo' methods of the Options subclass
@return: L{None}
"""
methodsDict = {}
reflect.accumulateMethods(self.options, methodsDict, 'opt_')
methodToShort = {}
for name in methodsDict.copy():
if len(name) == 1:
methodToShort[methodsDict[name]] = name
del methodsDict[name]
for methodName, methodObj in methodsDict.items():
longname = methodName.replace('_', '-') # t.p.usage does this
# if this option is already defined by the optFlags or
# optParameters then we don't want to override that data
if longname in self.allOptionsNameToDefinition:
continue
descr = self.getDescription(longname)
short = None
if methodObj in methodToShort:
short = methodToShort[methodObj]
reqArgs = methodObj.__func__.__code__.co_argcount
if reqArgs == 2:
self.optParams.append([longname, short, None, descr])
self.paramNameToDefinition[longname] = [short, None, descr]
self.allOptionsNameToDefinition[longname] = [short, None, descr]
else:
# reqArgs must equal 1. self.options would have failed
# to instantiate if it had opt_ methods with bad signatures.
self.optFlags.append([longname, short, descr])
self.flagNameToDefinition[longname] = [short, descr]
self.allOptionsNameToDefinition[longname] = [short, None, descr]
def descrFromDoc(obj):
"""
Generate an appropriate description from docstring of the given object
"""
if obj.__doc__ is None or obj.__doc__.isspace():
return None
lines = [x.strip() for x in obj.__doc__.split("\n")
if x and not x.isspace()]
return " ".join(lines)
def escape(x):
"""
Shell escape the given string
Implementation borrowed from now-deprecated commands.mkarg() in the stdlib
"""
if '\'' not in x:
return '\'' + x + '\''
s = '"'
for c in x:
if c in '\\$"`':
s = s + '\\'
s = s + c
s = s + '"'
return s
| mit |
MadCat34/Sick-Beard | sickbeard/clients/requests/status_codes.py | 252 | 3043 | # -*- coding: utf-8 -*-
from .structures import LookupDict
_codes = {
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('resume_incomplete', 'resume'),
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\', '✗'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
}
codes = LookupDict(name='status_codes')
for (code, titles) in list(_codes.items()):
for title in titles:
setattr(codes, title, code)
if not title.startswith('\\'):
setattr(codes, title.upper(), code)
| gpl-3.0 |
richardgroves/namebench | nb_third_party/jinja2/environment.py | 199 | 43213 | # -*- coding: utf-8 -*-
"""
jinja2.environment
~~~~~~~~~~~~~~~~~~
Provides a class that holds runtime and parsing time options.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from jinja2 import nodes
from jinja2.defaults import *
from jinja2.lexer import get_lexer, TokenStream
from jinja2.parser import Parser
from jinja2.optimizer import optimize
from jinja2.compiler import generate
from jinja2.runtime import Undefined, new_context
from jinja2.exceptions import TemplateSyntaxError, TemplateNotFound, \
TemplatesNotFound
from jinja2.utils import import_string, LRUCache, Markup, missing, \
concat, consume, internalcode, _encode_filename
# for direct template usage we have up to ten living environments
_spontaneous_environments = LRUCache(10)
# the function to create jinja traceback objects. This is dynamically
# imported on the first exception in the exception handler.
_make_traceback = None
def get_spontaneous_environment(*args):
"""Return a new spontaneous environment. A spontaneous environment is an
unnamed and unaccessible (in theory) environment that is used for
templates generated from a string and not from the file system.
"""
try:
env = _spontaneous_environments.get(args)
except TypeError:
return Environment(*args)
if env is not None:
return env
_spontaneous_environments[args] = env = Environment(*args)
env.shared = True
return env
def create_cache(size):
"""Return the cache class for the given size."""
if size == 0:
return None
if size < 0:
return {}
return LRUCache(size)
def copy_cache(cache):
"""Create an empty copy of the given cache."""
if cache is None:
return None
elif type(cache) is dict:
return {}
return LRUCache(cache.capacity)
def load_extensions(environment, extensions):
"""Load the extensions from the list and bind it to the environment.
Returns a dict of instanciated environments.
"""
result = {}
for extension in extensions:
if isinstance(extension, basestring):
extension = import_string(extension)
result[extension.identifier] = extension(environment)
return result
def _environment_sanity_check(environment):
"""Perform a sanity check on the environment."""
assert issubclass(environment.undefined, Undefined), 'undefined must ' \
'be a subclass of undefined because filters depend on it.'
assert environment.block_start_string != \
environment.variable_start_string != \
environment.comment_start_string, 'block, variable and comment ' \
'start strings must be different'
assert environment.newline_sequence in ('\r', '\r\n', '\n'), \
'newline_sequence set to unknown line ending string.'
return environment
class Environment(object):
r"""The core component of Jinja is the `Environment`. It contains
important shared variables like configuration, filters, tests,
globals and others. Instances of this class may be modified if
they are not shared and if no template was loaded so far.
Modifications on environments after the first template was loaded
will lead to surprising effects and undefined behavior.
Here the possible initialization parameters:
`block_start_string`
The string marking the begin of a block. Defaults to ``'{%'``.
`block_end_string`
The string marking the end of a block. Defaults to ``'%}'``.
`variable_start_string`
The string marking the begin of a print statement.
Defaults to ``'{{'``.
`variable_end_string`
The string marking the end of a print statement. Defaults to
``'}}'``.
`comment_start_string`
The string marking the begin of a comment. Defaults to ``'{#'``.
`comment_end_string`
The string marking the end of a comment. Defaults to ``'#}'``.
`line_statement_prefix`
If given and a string, this will be used as prefix for line based
statements. See also :ref:`line-statements`.
`line_comment_prefix`
If given and a string, this will be used as prefix for line based
based comments. See also :ref:`line-statements`.
.. versionadded:: 2.2
`trim_blocks`
If this is set to ``True`` the first newline after a block is
removed (block, not variable tag!). Defaults to `False`.
`newline_sequence`
The sequence that starts a newline. Must be one of ``'\r'``,
``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
useful default for Linux and OS X systems as well as web
applications.
`extensions`
List of Jinja extensions to use. This can either be import paths
as strings or extension classes. For more information have a
look at :ref:`the extensions documentation <jinja-extensions>`.
`optimized`
should the optimizer be enabled? Default is `True`.
`undefined`
:class:`Undefined` or a subclass of it that is used to represent
undefined values in the template.
`finalize`
A callable that can be used to process the result of a variable
expression before it is output. For example one can convert
`None` implicitly into an empty string here.
`autoescape`
If set to true the XML/HTML autoescaping feature is enabled by
default. For more details about auto escaping see
:class:`~jinja2.utils.Markup`. As of Jinja 2.4 this can also
be a callable that is passed the template name and has to
return `True` or `False` depending on autoescape should be
enabled by default.
.. versionchanged:: 2.4
`autoescape` can now be a function
`loader`
The template loader for this environment.
`cache_size`
The size of the cache. Per default this is ``50`` which means
that if more than 50 templates are loaded the loader will clean
out the least recently used template. If the cache size is set to
``0`` templates are recompiled all the time, if the cache size is
``-1`` the cache will not be cleaned.
`auto_reload`
Some loaders load templates from locations where the template
sources may change (ie: file system or database). If
`auto_reload` is set to `True` (default) every time a template is
requested the loader checks if the source changed and if yes, it
will reload the template. For higher performance it's possible to
disable that.
`bytecode_cache`
If set to a bytecode cache object, this object will provide a
cache for the internal Jinja bytecode so that templates don't
have to be parsed if they were not changed.
See :ref:`bytecode-cache` for more information.
"""
#: if this environment is sandboxed. Modifying this variable won't make
#: the environment sandboxed though. For a real sandboxed environment
#: have a look at jinja2.sandbox
sandboxed = False
#: True if the environment is just an overlay
overlayed = False
#: the environment this environment is linked to if it is an overlay
linked_to = None
#: shared environments have this set to `True`. A shared environment
#: must not be modified
shared = False
#: these are currently EXPERIMENTAL undocumented features.
exception_handler = None
exception_formatter = None
def __init__(self,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False,
loader=None,
cache_size=50,
auto_reload=True,
bytecode_cache=None):
# !!Important notice!!
# The constructor accepts quite a few arguments that should be
# passed by keyword rather than position. However it's important to
# not change the order of arguments because it's used at least
# internally in those cases:
# - spontaneus environments (i18n extension and Template)
# - unittests
# If parameter changes are required only add parameters at the end
# and don't change the arguments (or the defaults!) of the arguments
# existing already.
# lexer / parser information
self.block_start_string = block_start_string
self.block_end_string = block_end_string
self.variable_start_string = variable_start_string
self.variable_end_string = variable_end_string
self.comment_start_string = comment_start_string
self.comment_end_string = comment_end_string
self.line_statement_prefix = line_statement_prefix
self.line_comment_prefix = line_comment_prefix
self.trim_blocks = trim_blocks
self.newline_sequence = newline_sequence
# runtime information
self.undefined = undefined
self.optimized = optimized
self.finalize = finalize
self.autoescape = autoescape
# defaults
self.filters = DEFAULT_FILTERS.copy()
self.tests = DEFAULT_TESTS.copy()
self.globals = DEFAULT_NAMESPACE.copy()
# set the loader provided
self.loader = loader
self.bytecode_cache = None
self.cache = create_cache(cache_size)
self.bytecode_cache = bytecode_cache
self.auto_reload = auto_reload
# load extensions
self.extensions = load_extensions(self, extensions)
_environment_sanity_check(self)
def extend(self, **attributes):
"""Add the items to the instance of the environment if they do not exist
yet. This is used by :ref:`extensions <writing-extensions>` to register
callbacks and configuration values without breaking inheritance.
"""
for key, value in attributes.iteritems():
if not hasattr(self, key):
setattr(self, key, value)
def overlay(self, block_start_string=missing, block_end_string=missing,
variable_start_string=missing, variable_end_string=missing,
comment_start_string=missing, comment_end_string=missing,
line_statement_prefix=missing, line_comment_prefix=missing,
trim_blocks=missing, extensions=missing, optimized=missing,
undefined=missing, finalize=missing, autoescape=missing,
loader=missing, cache_size=missing, auto_reload=missing,
bytecode_cache=missing):
"""Create a new overlay environment that shares all the data with the
current environment except of cache and the overridden attributes.
Extensions cannot be removed for an overlayed environment. An overlayed
environment automatically gets all the extensions of the environment it
is linked to plus optional extra extensions.
Creating overlays should happen after the initial environment was set
up completely. Not all attributes are truly linked, some are just
copied over so modifications on the original environment may not shine
through.
"""
args = dict(locals())
del args['self'], args['cache_size'], args['extensions']
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.overlayed = True
rv.linked_to = self
for key, value in args.iteritems():
if value is not missing:
setattr(rv, key, value)
if cache_size is not missing:
rv.cache = create_cache(cache_size)
else:
rv.cache = copy_cache(self.cache)
rv.extensions = {}
for key, value in self.extensions.iteritems():
rv.extensions[key] = value.bind(rv)
if extensions is not missing:
rv.extensions.update(load_extensions(extensions))
return _environment_sanity_check(rv)
lexer = property(get_lexer, doc="The lexer for this environment.")
def iter_extensions(self):
"""Iterates over the extensions by priority."""
return iter(sorted(self.extensions.values(),
key=lambda x: x.priority))
def getitem(self, obj, argument):
"""Get an item or attribute of an object but prefer the item."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, basestring):
try:
attr = str(argument)
except:
pass
else:
try:
return getattr(obj, attr)
except AttributeError:
pass
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Get an item or attribute of an object but prefer the attribute.
Unlike :meth:`getitem` the attribute *must* be a bytestring.
"""
try:
return getattr(obj, attribute)
except AttributeError:
pass
try:
return obj[attribute]
except (TypeError, LookupError, AttributeError):
return self.undefined(obj=obj, name=attribute)
@internalcode
def parse(self, source, name=None, filename=None):
"""Parse the sourcecode and return the abstract syntax tree. This
tree of nodes is used by the compiler to convert the template into
executable source- or bytecode. This is useful for debugging or to
extract information from templates.
If you are :ref:`developing Jinja2 extensions <writing-extensions>`
this gives you a good overview of the node tree generated.
"""
try:
return self._parse(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def _parse(self, source, name, filename):
"""Internal parsing function used by `parse` and `compile`."""
return Parser(self, source, name, _encode_filename(filename)).parse()
def lex(self, source, name=None, filename=None):
"""Lex the given sourcecode and return a generator that yields
tokens as tuples in the form ``(lineno, token_type, value)``.
This can be useful for :ref:`extension development <writing-extensions>`
and debugging templates.
This does not perform preprocessing. If you want the preprocessing
of the extensions to be applied you have to filter source through
the :meth:`preprocess` method.
"""
source = unicode(source)
try:
return self.lexer.tokeniter(source, name, filename)
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def preprocess(self, source, name=None, filename=None):
"""Preprocesses the source with all extensions. This is automatically
called for all parsing and compiling methods but *not* for :meth:`lex`
because there you usually only want the actual source tokenized.
"""
return reduce(lambda s, e: e.preprocess(s, name, filename),
self.iter_extensions(), unicode(source))
def _tokenize(self, source, name, filename=None, state=None):
"""Called by the parser to do the preprocessing and filtering
for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
"""
source = self.preprocess(source, name, filename)
stream = self.lexer.tokenize(source, name, filename, state)
for ext in self.iter_extensions():
stream = ext.filter_stream(stream)
if not isinstance(stream, TokenStream):
stream = TokenStream(stream, name, filename)
return stream
@internalcode
def compile(self, source, name=None, filename=None, raw=False,
defer_init=False):
"""Compile a node or template source code. The `name` parameter is
the load name of the template after it was joined using
:meth:`join_path` if necessary, not the filename on the file system.
the `filename` parameter is the estimated filename of the template on
the file system. If the template came from a database or memory this
can be omitted.
The return value of this method is a python code object. If the `raw`
parameter is `True` the return value will be a string with python
code equivalent to the bytecode returned otherwise. This method is
mainly used internally.
`defer_init` is use internally to aid the module code generator. This
causes the generated code to be able to import without the global
environment variable to be set.
.. versionadded:: 2.4
`defer_init` parameter added.
"""
source_hint = None
try:
if isinstance(source, basestring):
source_hint = source
source = self._parse(source, name, filename)
if self.optimized:
source = optimize(source, self)
source = generate(source, self, name, filename,
defer_init=defer_init)
if raw:
return source
if filename is None:
filename = '<template>'
else:
filename = _encode_filename(filename)
return compile(source, filename, 'exec')
except TemplateSyntaxError:
exc_info = sys.exc_info()
self.handle_exception(exc_info, source_hint=source)
def compile_expression(self, source, undefined_to_none=True):
"""A handy helper method that returns a callable that accepts keyword
arguments that appear as variables in the expression. If called it
returns the result of the expression.
This is useful if applications want to use the same rules as Jinja
in template "configuration files" or similar situations.
Example usage:
>>> env = Environment()
>>> expr = env.compile_expression('foo == 42')
>>> expr(foo=23)
False
>>> expr(foo=42)
True
Per default the return value is converted to `None` if the
expression returns an undefined value. This can be changed
by setting `undefined_to_none` to `False`.
>>> env.compile_expression('var')() is None
True
>>> env.compile_expression('var', undefined_to_none=False)()
Undefined
.. versionadded:: 2.1
"""
parser = Parser(self, source, state='variable')
exc_info = None
try:
expr = parser.parse_expression()
if not parser.stream.eos:
raise TemplateSyntaxError('chunk after expression',
parser.stream.current.lineno,
None, None)
expr.set_environment(self)
except TemplateSyntaxError:
exc_info = sys.exc_info()
if exc_info is not None:
self.handle_exception(exc_info, source_hint=source)
body = [nodes.Assign(nodes.Name('result', 'store'), expr, lineno=1)]
template = self.from_string(nodes.Template(body, lineno=1))
return TemplateExpression(template, undefined_to_none)
def compile_templates(self, target, extensions=None, filter_func=None,
zip='deflated', log_function=None,
ignore_errors=True, py_compile=False):
"""Compiles all the templates the loader can find, compiles them
and stores them in `target`. If `zip` is `None`, instead of in a
zipfile, the templates will be will be stored in a directory.
By default a deflate zip algorithm is used, to switch to
the stored algorithm, `zip` can be set to ``'stored'``.
`extensions` and `filter_func` are passed to :meth:`list_templates`.
Each template returned will be compiled to the target folder or
zipfile.
By default template compilation errors are ignored. In case a
log function is provided, errors are logged. If you want template
syntax errors to abort the compilation you can set `ignore_errors`
to `False` and you will get an exception on syntax errors.
If `py_compile` is set to `True` .pyc files will be written to the
target instead of standard .py files.
.. versionadded:: 2.4
"""
from jinja2.loaders import ModuleLoader
if log_function is None:
log_function = lambda x: None
if py_compile:
import imp, struct, marshal
py_header = imp.get_magic() + \
u'\xff\xff\xff\xff'.encode('iso-8859-15')
def write_file(filename, data, mode):
if zip:
info = ZipInfo(filename)
info.external_attr = 0755 << 16L
zip_file.writestr(info, data)
else:
f = open(os.path.join(target, filename), mode)
try:
f.write(data)
finally:
f.close()
if zip is not None:
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
zip_file = ZipFile(target, 'w', dict(deflated=ZIP_DEFLATED,
stored=ZIP_STORED)[zip])
log_function('Compiling into Zip archive "%s"' % target)
else:
if not os.path.isdir(target):
os.makedirs(target)
log_function('Compiling into folder "%s"' % target)
try:
for name in self.list_templates(extensions, filter_func):
source, filename, _ = self.loader.get_source(self, name)
try:
code = self.compile(source, name, filename, True, True)
except TemplateSyntaxError, e:
if not ignore_errors:
raise
log_function('Could not compile "%s": %s' % (name, e))
continue
filename = ModuleLoader.get_module_filename(name)
if py_compile:
c = compile(code, _encode_filename(filename), 'exec')
write_file(filename + 'c', py_header +
marshal.dumps(c), 'wb')
log_function('Byte-compiled "%s" as %s' %
(name, filename + 'c'))
else:
write_file(filename, code, 'w')
log_function('Compiled "%s" as %s' % (name, filename))
finally:
if zip:
zip_file.close()
log_function('Finished compiling templates')
def list_templates(self, extensions=None, filter_func=None):
"""Returns a list of templates for this environment. This requires
that the loader supports the loader's
:meth:`~BaseLoader.list_templates` method.
If there are other files in the template folder besides the
actual templates, the returned list can be filtered. There are two
ways: either `extensions` is set to a list of file extensions for
templates, or a `filter_func` can be provided which is a callable that
is passed a template name and should return `True` if it should end up
in the result list.
If the loader does not support that, a :exc:`TypeError` is raised.
"""
x = self.loader.list_templates()
if extensions is not None:
if filter_func is not None:
raise TypeError('either extensions or filter_func '
'can be passed, but not both')
filter_func = lambda x: '.' in x and \
x.rsplit('.', 1)[1] in extensions
if filter_func is not None:
x = filter(filter_func, x)
return x
def handle_exception(self, exc_info=None, rendered=False, source_hint=None):
"""Exception handling helper. This is used internally to either raise
rewritten exceptions or return a rendered traceback for the template.
"""
global _make_traceback
if exc_info is None:
exc_info = sys.exc_info()
# the debugging module is imported when it's used for the first time.
# we're doing a lot of stuff there and for applications that do not
# get any exceptions in template rendering there is no need to load
# all of that.
if _make_traceback is None:
from jinja2.debug import make_traceback as _make_traceback
traceback = _make_traceback(exc_info, source_hint)
if rendered and self.exception_formatter is not None:
return self.exception_formatter(traceback)
if self.exception_handler is not None:
self.exception_handler(traceback)
exc_type, exc_value, tb = traceback.standard_exc_info
raise exc_type, exc_value, tb
def join_path(self, template, parent):
"""Join a template with the parent. By default all the lookups are
relative to the loader root so this method returns the `template`
parameter unchanged, but if the paths should be relative to the
parent template, this function can be used to calculate the real
template name.
Subclasses may override this method and implement template path
joining here.
"""
return template
@internalcode
def _load_template(self, name, globals):
if self.loader is None:
raise TypeError('no loader for this environment specified')
if self.cache is not None:
template = self.cache.get(name)
if template is not None and (not self.auto_reload or \
template.is_up_to_date):
return template
template = self.loader.load(self, name, globals)
if self.cache is not None:
self.cache[name] = template
return template
@internalcode
def get_template(self, name, parent=None, globals=None):
"""Load a template from the loader. If a loader is configured this
method ask the loader for the template and returns a :class:`Template`.
If the `parent` parameter is not `None`, :meth:`join_path` is called
to get the real template name before loading.
The `globals` parameter can be used to provide template wide globals.
These variables are available in the context at render time.
If the template does not exist a :exc:`TemplateNotFound` exception is
raised.
.. versionchanged:: 2.4
If `name` is a :class:`Template` object it is returned from the
function unchanged.
"""
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
return self._load_template(name, self.make_globals(globals))
@internalcode
def select_template(self, names, parent=None, globals=None):
"""Works like :meth:`get_template` but tries a number of templates
before it fails. If it cannot find any of the templates, it will
raise a :exc:`TemplatesNotFound` exception.
.. versionadded:: 2.3
.. versionchanged:: 2.4
If `names` contains a :class:`Template` object it is returned
from the function unchanged.
"""
if not names:
raise TemplatesNotFound(message=u'Tried to select from an empty list '
u'of templates.')
globals = self.make_globals(globals)
for name in names:
if isinstance(name, Template):
return name
if parent is not None:
name = self.join_path(name, parent)
try:
return self._load_template(name, globals)
except TemplateNotFound:
pass
raise TemplatesNotFound(names)
@internalcode
def get_or_select_template(self, template_name_or_list,
parent=None, globals=None):
"""Does a typecheck and dispatches to :meth:`select_template`
if an iterable of template names is given, otherwise to
:meth:`get_template`.
.. versionadded:: 2.3
"""
if isinstance(template_name_or_list, basestring):
return self.get_template(template_name_or_list, parent, globals)
elif isinstance(template_name_or_list, Template):
return template_name_or_list
return self.select_template(template_name_or_list, parent, globals)
def from_string(self, source, globals=None, template_class=None):
"""Load a template from a string. This parses the source given and
returns a :class:`Template` object.
"""
globals = self.make_globals(globals)
cls = template_class or self.template_class
return cls.from_code(self, self.compile(source), globals, None)
def make_globals(self, d):
"""Return a dict for the globals."""
if not d:
return self.globals
return dict(self.globals, **d)
class Template(object):
"""The central template object. This class represents a compiled template
and is used to evaluate it.
Normally the template object is generated from an :class:`Environment` but
it also has a constructor that makes it possible to create a template
instance directly using the constructor. It takes the same arguments as
the environment constructor but it's not possible to specify a loader.
Every template object has a few methods and members that are guaranteed
to exist. However it's important that a template object should be
considered immutable. Modifications on the object are not supported.
Template objects created from the constructor rather than an environment
do have an `environment` attribute that points to a temporary environment
that is probably shared with other templates created with the constructor
and compatible settings.
>>> template = Template('Hello {{ name }}!')
>>> template.render(name='John Doe')
u'Hello John Doe!'
>>> stream = template.stream(name='John Doe')
>>> stream.next()
u'Hello John Doe!'
>>> stream.next()
Traceback (most recent call last):
...
StopIteration
"""
def __new__(cls, source,
block_start_string=BLOCK_START_STRING,
block_end_string=BLOCK_END_STRING,
variable_start_string=VARIABLE_START_STRING,
variable_end_string=VARIABLE_END_STRING,
comment_start_string=COMMENT_START_STRING,
comment_end_string=COMMENT_END_STRING,
line_statement_prefix=LINE_STATEMENT_PREFIX,
line_comment_prefix=LINE_COMMENT_PREFIX,
trim_blocks=TRIM_BLOCKS,
newline_sequence=NEWLINE_SEQUENCE,
extensions=(),
optimized=True,
undefined=Undefined,
finalize=None,
autoescape=False):
env = get_spontaneous_environment(
block_start_string, block_end_string, variable_start_string,
variable_end_string, comment_start_string, comment_end_string,
line_statement_prefix, line_comment_prefix, trim_blocks,
newline_sequence, frozenset(extensions), optimized, undefined,
finalize, autoescape, None, 0, False, None)
return env.from_string(source, template_class=cls)
@classmethod
def from_code(cls, environment, code, globals, uptodate=None):
"""Creates a template object from compiled code and the globals. This
is used by the loaders and environment to create a template object.
"""
namespace = {
'environment': environment,
'__file__': code.co_filename
}
exec code in namespace
rv = cls._from_namespace(environment, namespace, globals)
rv._uptodate = uptodate
return rv
@classmethod
def from_module_dict(cls, environment, module_dict, globals):
"""Creates a template object from a module. This is used by the
module loader to create a template object.
.. versionadded:: 2.4
"""
return cls._from_namespace(environment, module_dict, globals)
@classmethod
def _from_namespace(cls, environment, namespace, globals):
t = object.__new__(cls)
t.environment = environment
t.globals = globals
t.name = namespace['name']
t.filename = namespace['__file__']
t.blocks = namespace['blocks']
# render function and module
t.root_render_func = namespace['root']
t._module = None
# debug and loader helpers
t._debug_info = namespace['debug_info']
t._uptodate = None
# store the reference
namespace['environment'] = environment
namespace['__jinja_template__'] = t
return t
def render(self, *args, **kwargs):
"""This method accepts the same arguments as the `dict` constructor:
A dict, a dict subclass or some keyword arguments. If no arguments
are given the context will be empty. These two calls do the same::
template.render(knights='that say nih')
template.render({'knights': 'that say nih'})
This will return the rendered template as unicode string.
"""
vars = dict(*args, **kwargs)
try:
return concat(self.root_render_func(self.new_context(vars)))
except:
exc_info = sys.exc_info()
return self.environment.handle_exception(exc_info, True)
def stream(self, *args, **kwargs):
"""Works exactly like :meth:`generate` but returns a
:class:`TemplateStream`.
"""
return TemplateStream(self.generate(*args, **kwargs))
def generate(self, *args, **kwargs):
"""For very large templates it can be useful to not render the whole
template at once but evaluate each statement after another and yield
piece for piece. This method basically does exactly that and returns
a generator that yields one item after another as unicode strings.
It accepts the same arguments as :meth:`render`.
"""
vars = dict(*args, **kwargs)
try:
for event in self.root_render_func(self.new_context(vars)):
yield event
except:
exc_info = sys.exc_info()
else:
return
yield self.environment.handle_exception(exc_info, True)
def new_context(self, vars=None, shared=False, locals=None):
"""Create a new :class:`Context` for this template. The vars
provided will be passed to the template. Per default the globals
are added to the context. If shared is set to `True` the data
is passed as it to the context without adding the globals.
`locals` can be a dict of local variables for internal usage.
"""
return new_context(self.environment, self.name, self.blocks,
vars, shared, self.globals, locals)
def make_module(self, vars=None, shared=False, locals=None):
"""This method works like the :attr:`module` attribute when called
without arguments but it will evaluate the template on every call
rather than caching it. It's also possible to provide
a dict which is then used as context. The arguments are the same
as for the :meth:`new_context` method.
"""
return TemplateModule(self, self.new_context(vars, shared, locals))
@property
def module(self):
"""The template as module. This is used for imports in the
template runtime but is also useful if one wants to access
exported template variables from the Python layer:
>>> t = Template('{% macro foo() %}42{% endmacro %}23')
>>> unicode(t.module)
u'23'
>>> t.module.foo()
u'42'
"""
if self._module is not None:
return self._module
self._module = rv = self.make_module()
return rv
def get_corresponding_lineno(self, lineno):
"""Return the source line number of a line number in the
generated bytecode as they are not in sync.
"""
for template_line, code_line in reversed(self.debug_info):
if code_line <= lineno:
return template_line
return 1
@property
def is_up_to_date(self):
"""If this variable is `False` there is a newer version available."""
if self._uptodate is None:
return True
return self._uptodate()
@property
def debug_info(self):
"""The debug info mapping."""
return [tuple(map(int, x.split('='))) for x in
self._debug_info.split('&')]
def __repr__(self):
if self.name is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.name)
return '<%s %s>' % (self.__class__.__name__, name)
class TemplateModule(object):
"""Represents an imported template. All the exported names of the
template are available as attributes on this object. Additionally
converting it into an unicode- or bytestrings renders the contents.
"""
def __init__(self, template, context):
self._body_stream = list(template.root_render_func(context))
self.__dict__.update(context.get_exported())
self.__name__ = template.name
def __html__(self):
return Markup(concat(self._body_stream))
def __str__(self):
return unicode(self).encode('utf-8')
# unicode goes after __str__ because we configured 2to3 to rename
# __unicode__ to __str__. because the 2to3 tree is not designed to
# remove nodes from it, we leave the above __str__ around and let
# it override at runtime.
def __unicode__(self):
return concat(self._body_stream)
def __repr__(self):
if self.__name__ is None:
name = 'memory:%x' % id(self)
else:
name = repr(self.__name__)
return '<%s %s>' % (self.__class__.__name__, name)
class TemplateExpression(object):
"""The :meth:`jinja2.Environment.compile_expression` method returns an
instance of this object. It encapsulates the expression-like access
to the template with an expression it wraps.
"""
def __init__(self, template, undefined_to_none):
self._template = template
self._undefined_to_none = undefined_to_none
def __call__(self, *args, **kwargs):
context = self._template.new_context(dict(*args, **kwargs))
consume(self._template.root_render_func(context))
rv = context.vars['result']
if self._undefined_to_none and isinstance(rv, Undefined):
rv = None
return rv
class TemplateStream(object):
"""A template stream works pretty much like an ordinary python generator
but it can buffer multiple items to reduce the number of total iterations.
Per default the output is unbuffered which means that for every unbuffered
instruction in the template one unicode string is yielded.
If buffering is enabled with a buffer size of 5, five items are combined
into a new unicode string. This is mainly useful if you are streaming
big templates to a client via WSGI which flushes after each iteration.
"""
def __init__(self, gen):
self._gen = gen
self.disable_buffering()
def dump(self, fp, encoding=None, errors='strict'):
"""Dump the complete stream into a file or file-like object.
Per default unicode strings are written, if you want to encode
before writing specifiy an `encoding`.
Example usage::
Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
"""
close = False
if isinstance(fp, basestring):
fp = file(fp, 'w')
close = True
try:
if encoding is not None:
iterable = (x.encode(encoding, errors) for x in self)
else:
iterable = self
if hasattr(fp, 'writelines'):
fp.writelines(iterable)
else:
for item in iterable:
fp.write(item)
finally:
if close:
fp.close()
def disable_buffering(self):
"""Disable the output buffering."""
self._next = self._gen.next
self.buffered = False
def enable_buffering(self, size=5):
"""Enable buffering. Buffer `size` items before yielding them."""
if size <= 1:
raise ValueError('buffer size too small')
def generator(next):
buf = []
c_size = 0
push = buf.append
while 1:
try:
while c_size < size:
c = next()
push(c)
if c:
c_size += 1
except StopIteration:
if not c_size:
return
yield concat(buf)
del buf[:]
c_size = 0
self.buffered = True
self._next = generator(self._gen.next).next
def __iter__(self):
return self
def next(self):
return self._next()
# hook in default template class. if anyone reads this comment: ignore that
# it's possible to use custom templates ;-)
Environment.template_class = Template
| apache-2.0 |
smartshark/serverSHARK | smartshark/urls.py | 1 | 1884 | from django.conf.urls import url
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.views import login, logout
from smartshark.views import analysis, common, collection, visualizations, remote
urlpatterns = [
# Frontend
url(r'^login/$', login, name='mysite_login'),
url(r'^logout/$', logout, {'next_page': reverse_lazy('index')}, name='mysite_logout'),
url(r'^$', common.index, name='index'),
url(r'^documentation/$', common.documentation, name='documentation'),
url(r'^visualizations/overview/$', visualizations.overview, name='overview'),
url(r'^spark/submit/$', analysis.spark_submit, name='spark_submit'),
# Backend
url(r'^smartshark/project/collection/choose/$', collection.choose_plugins, name='choose_plugins'),
url(r'^smartshark/project/collection/start/$', collection.start_collection, name='collection_start'),
url(r'^smartshark/project/delete/$', collection.delete_project_data, name='project_delete_data'),
url(r'^admin/smartshark/project/plugin_status/(?P<id>[0-9]+)$', common.plugin_status, name='plugin_status'),
url(r'^admin/smartshark/project/plugin_execution/(?P<id>[0-9]+)$', common.plugin_execution_status, name='plugin_execution_status'),
url(r'^admin/smartshark/project/job/(?P<id>[0-9]+)/(?P<type>[a-z]+)$', common.job_output, name='job_output'),
url(r'^smartshark/plugin/install/$', collection.install, name='install'),
url(r'^smartshark/plugin/github/install', collection.installgithub, name='view'),
# remote additions
url(r'^remote/test/$', remote.test_connection, name='remote_test_connection'),
url(r'^remote/plugin/$', remote.list_plugins, name='remote_list_plugins'),
url(r'^remote/argument/$', remote.list_arguments, name='remote_list_plugin_arguments'),
url(r'^remote/collect/$', remote.start_collection, name='remote_start_collection')
]
| apache-2.0 |
pawaranand/phr_frappe | frappe/website/statics.py | 17 | 5584 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, os, time
def sync_statics(rebuild=False):
s = sync()
s.verbose = True
# s.start(rebuild)
# frappe.db.commit()
while True:
s.start(rebuild)
frappe.db.commit()
time.sleep(2)
rebuild = False
class sync(object):
def __init__(self, verbose=False):
self.verbose = verbose
def start(self, rebuild=False):
self.synced = []
self.synced_paths = []
self.updated = 0
if rebuild:
frappe.db.sql("delete from `tabWeb Page` where ifnull(template_path, '')!=''")
for app in frappe.get_installed_apps():
self.sync_for_app(app)
self.cleanup()
def sync_for_app(self, app):
self.statics_path = frappe.get_app_path(app, "templates", "statics")
if os.path.exists(self.statics_path):
for basepath, folders, files in os.walk(self.statics_path):
self.sync_folder(basepath, folders, files)
def sync_folder(self, basepath, folders, files):
self.get_index_txt(basepath, files)
index_found = self.sync_index_page(basepath, files)
if not index_found and basepath!=self.statics_path:
# not synced either by generator or by index.html
return
if self.index:
self.sync_using_given_index(basepath, folders, files)
else:
self.sync_alphabetically(basepath, folders, [filename for filename in files if filename.endswith('html') or filename.endswith('md')])
def get_index_txt(self, basepath, files):
self.index = []
if "index.txt" in files:
with open(os.path.join(basepath, "index.txt"), "r") as indexfile:
self.index = indexfile.read().splitlines()
def sync_index_page(self, basepath, files):
for extn in ("md", "html"):
fname = "index." + extn
if fname in files:
self.sync_file(fname, os.path.join(basepath, fname), None)
return True
def sync_using_given_index(self, basepath, folders, files):
for i, page_name in enumerate(self.index):
if page_name in folders:
# for folder, sync inner index first (so that idx is set)
for extn in ("md", "html"):
path = os.path.join(basepath, page_name, "index." + extn)
if os.path.exists(path):
self.sync_file("index." + extn, path, i)
break
# other files
if page_name + ".md" in files:
self.sync_file(page_name + ".md", os.path.join(basepath, page_name + ".md"), i)
elif page_name + ".html" in files:
self.sync_file(page_name + ".html", os.path.join(basepath, page_name + ".html"), i)
else:
if page_name not in folders:
print page_name + " not found in " + basepath
def sync_alphabetically(self, basepath, folders, files):
files.sort()
for fname in files:
page_name = fname.rsplit(".", 1)[0]
if not (page_name=="index" and basepath!=self.statics_path):
self.sync_file(fname, os.path.join(basepath, fname), None)
def sync_file(self, fname, template_path, priority):
route = os.path.relpath(template_path, self.statics_path).rsplit(".", 1)[0]
if fname.rsplit(".", 1)[0]=="index" and \
os.path.dirname(template_path) != self.statics_path:
route = os.path.dirname(route)
parent_web_page = frappe.db.sql("""select name from `tabWeb Page` where
page_name=%s and ifnull(parent_website_route, '')=ifnull(%s, '')""",
(os.path.basename(os.path.dirname(route)), os.path.dirname(os.path.dirname(route))))
parent_web_page = parent_web_page and parent_web_page[0][0] or ""
page_name = os.path.basename(route)
published = 1
idx = priority
if (parent_web_page, page_name) in self.synced:
return
title = self.get_title(template_path)
if not frappe.db.get_value("Web Page", {"template_path":template_path}):
web_page = frappe.new_doc("Web Page")
web_page.page_name = page_name
web_page.parent_web_page = parent_web_page
web_page.template_path = template_path
web_page.title = title
web_page.published = published
web_page.idx = idx
web_page.from_website_sync = True
web_page.insert()
if self.verbose: print "Inserted: " + web_page.name
else:
web_page = frappe.get_doc("Web Page", {"template_path":template_path})
dirty = False
for key in ("parent_web_page", "title", "template_path", "published", "idx"):
if web_page.get(key) != locals().get(key):
web_page.set(key, locals().get(key))
dirty = True
if dirty:
web_page.from_website_sync = True
web_page.save()
if self.verbose: print "Updated: " + web_page.name
self.synced.append((parent_web_page, page_name))
def get_title(self, fpath):
title = os.path.basename(fpath).rsplit(".", 1)[0]
if title =="index":
title = os.path.basename(os.path.dirname(fpath))
title = title.replace("-", " ").replace("_", " ").title()
with open(fpath, "r") as f:
content = unicode(f.read().strip(), "utf-8")
if content.startswith("# "):
title = content.splitlines()[0][2:]
if "<!-- title:" in content:
title = content.split("<!-- title:", 1)[1].split("-->", 1)[0].strip()
return title
def cleanup(self):
if self.synced:
# delete static web pages that are not in immediate list
for static_page in frappe.db.sql("""select name, page_name, parent_web_page
from `tabWeb Page` where ifnull(template_path,'')!=''""", as_dict=1):
if (static_page.parent_web_page, static_page.page_name) not in self.synced:
frappe.delete_doc("Web Page", static_page.name, force=1)
else:
# delete all static web pages
frappe.delete_doc("Web Page", frappe.db.sql_list("""select name
from `tabWeb Page`
where ifnull(template_path,'')!=''"""), force=1)
| mit |
kbran420/android-quill | jni/libhpdf-2.3.0RC2/if/python/demo/slide_show_demo.py | 32 | 6231 | ###
## * << Haru Free PDF Library 2.0.6 >> -- slideshow_demo.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <[email protected]>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
import random
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def print_page (page, caption, font, style, prev, next):
r = random.random()
g = random.random()
b = random.random()
rect=HPDF_Rect()
HPDF_Page_SetWidth (page, 800)
HPDF_Page_SetHeight (page, 600)
HPDF_Page_SetRGBFill (page, r, g, b)
HPDF_Page_Rectangle (page, 0, 0, 800, 600)
HPDF_Page_Fill (page)
HPDF_Page_SetRGBFill (page, 1.0 - r, 1.0 - g, 1.0 - b)
HPDF_Page_SetFontAndSize (page, font, 30)
HPDF_Page_BeginText (page)
HPDF_Page_SetTextMatrix (page, 0.8, 0.0, 0.0, 1.0, 0.0, 0.0)
HPDF_Page_TextOut (page, 50, 530, caption)
HPDF_Page_SetTextMatrix (page, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0)
HPDF_Page_SetFontAndSize (page, font, 20)
HPDF_Page_TextOut (page, 55, 300,
"Type \"Ctrl+L\" in order to return from full screen mode.")
HPDF_Page_EndText (page)
HPDF_Page_SetSlideShow (page, style, 5.0, 1.0)
HPDF_Page_SetFontAndSize (page, font, 20)
if (next):
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 680, 50, "Next=>")
HPDF_Page_EndText (page)
rect.left = 680
rect.right = 750
rect.top = 70
rect.bottom = 50
dst = HPDF_Page_CreateDestination (next)
HPDF_Destination_SetFit(dst)
annot = HPDF_Page_CreateLinkAnnot (page, rect, dst)
HPDF_LinkAnnot_SetBorderStyle (annot, 0, 0, 0)
HPDF_LinkAnnot_SetHighlightMode (annot, HPDF_ANNOT_INVERT_BOX)
if (prev):
HPDF_Page_BeginText (page)
HPDF_Page_TextOut (page, 50, 50, "<=Prev")
HPDF_Page_EndText (page)
rect.left = 50
rect.right = 110
rect.top = 70
rect.bottom = 50
dst = HPDF_Page_CreateDestination (prev)
HPDF_Destination_SetFit(dst)
annot = HPDF_Page_CreateLinkAnnot (page, rect, dst)
HPDF_LinkAnnot_SetBorderStyle (annot, 0, 0, 0)
HPDF_LinkAnnot_SetHighlightMode (annot, HPDF_ANNOT_INVERT_BOX)
def main():
global pdf
page=[None for i in range(17)]
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# create default-font
font = HPDF_GetFont (pdf, "Courier", NULL)
# Add 17 pages to the document.
page[0] = HPDF_AddPage (pdf)
page[1] = HPDF_AddPage (pdf)
page[2] = HPDF_AddPage (pdf)
page[3] = HPDF_AddPage (pdf)
page[4] = HPDF_AddPage (pdf)
page[5] = HPDF_AddPage (pdf)
page[6] = HPDF_AddPage (pdf)
page[7] = HPDF_AddPage (pdf)
page[8] = HPDF_AddPage (pdf)
page[9] = HPDF_AddPage (pdf)
page[10] = HPDF_AddPage (pdf)
page[11] = HPDF_AddPage (pdf)
page[12] = HPDF_AddPage (pdf)
page[13] = HPDF_AddPage (pdf)
page[14] = HPDF_AddPage (pdf)
page[15] = HPDF_AddPage (pdf)
page[16] = HPDF_AddPage (pdf)
print_page(page[0], "HPDF_TS_WIPE_RIGHT", font,
HPDF_TS_WIPE_RIGHT, NULL, page[1])
print_page(page[1], "HPDF_TS_WIPE_UP", font,
HPDF_TS_WIPE_UP, page[0], page[2])
print_page(page[2], "HPDF_TS_WIPE_LEFT", font,
HPDF_TS_WIPE_LEFT, page[1], page[3])
print_page(page[3], "HPDF_TS_WIPE_DOWN", font,
HPDF_TS_WIPE_DOWN, page[2], page[4])
print_page(page[4], "HPDF_TS_BARN_DOORS_HORIZONTAL_OUT", font,
HPDF_TS_BARN_DOORS_HORIZONTAL_OUT, page[3], page[5])
print_page(page[5], "HPDF_TS_BARN_DOORS_HORIZONTAL_IN", font,
HPDF_TS_BARN_DOORS_HORIZONTAL_IN, page[4], page[6])
print_page(page[6], "HPDF_TS_BARN_DOORS_VERTICAL_OUT", font,
HPDF_TS_BARN_DOORS_VERTICAL_OUT, page[5], page[7])
print_page(page[7], "HPDF_TS_BARN_DOORS_VERTICAL_IN", font,
HPDF_TS_BARN_DOORS_VERTICAL_IN, page[6], page[8])
print_page(page[8], "HPDF_TS_BOX_OUT", font,
HPDF_TS_BOX_OUT, page[7], page[9])
print_page(page[9], "HPDF_TS_BOX_IN", font,
HPDF_TS_BOX_IN, page[8], page[10])
print_page(page[10], "HPDF_TS_BLINDS_HORIZONTAL", font,
HPDF_TS_BLINDS_HORIZONTAL, page[9], page[11])
print_page(page[11], "HPDF_TS_BLINDS_VERTICAL", font,
HPDF_TS_BLINDS_VERTICAL, page[10], page[12])
print_page(page[12], "HPDF_TS_DISSOLVE", font,
HPDF_TS_DISSOLVE, page[11], page[13])
print_page(page[13], "HPDF_TS_GLITTER_RIGHT", font,
HPDF_TS_GLITTER_RIGHT, page[12], page[14])
print_page(page[14], "HPDF_TS_GLITTER_DOWN", font,
HPDF_TS_GLITTER_DOWN, page[13], page[15])
print_page(page[15], "HPDF_TS_GLITTER_TOP_LEFT_TO_BOTTOM_RIGHT", font,
HPDF_TS_GLITTER_TOP_LEFT_TO_BOTTOM_RIGHT, page[14], page[16])
print_page(page[16], "HPDF_TS_REPLACE", font,
HPDF_TS_REPLACE, page[15], NULL)
HPDF_SetPageMode (pdf, HPDF_PAGE_MODE_FULL_SCREEN)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | gpl-3.0 |
thepug/Speeqe | speeqeweb/speeqe/templatetags/splib.py | 1 | 1687 | from django.contrib.sites.models import Site
from django.template import Library, Node
import speeqeweb.xmpp.muc as muc
import speeqeweb.settings as settings
register = Library()
@register.simple_tag
def current_domain():
return settings.HTTP_DOMAIN
#return all active muc rooms
class ActiveRoomsNode(Node):
""" return all active muc rooms """
def render(self, context):
try:
context['rooms'] = muc.listrooms()[:5]
except:
pass
return ''
@register.tag(name="show_rooms")
def show_rooms(parser,token):
return ActiveRoomsNode()
class Room:
pass
class FeaturedRoomsNode(Node):
def __init__(self):
"""do I need this?"""
pass
def render(self, context):
try:
featured_rooms = []
for key in settings.FEATURED_ROOMS.keys():
room = Room()
room.name = key
room.url = settings.FEATURED_ROOMS[key]
featured_rooms.append(room)
context['featuredrooms'] = featured_rooms
except:
pass
return ''
@register.tag(name="show_featured_rooms")
def show_featured_rooms(parser,token):
return FeaturedRoomsNode()
@register.simple_tag
def help_email():
return settings.HELP_EMAIL
class DnsRoomNamesNode(Node):
""" return setting that the dns trick for room names is being used """
def render(self, context):
try:
context['dns_room_names'] = settings.DNS_ROOM_NAMES
except:
pass
return ''
@register.tag(name="use_dns_room_names")
def use_dns_room_names(parser,token):
return DnsRoomNamesNode()
| agpl-3.0 |
calberti/models | resnet/resnet_main.py | 2 | 7036 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet Train/Eval module.
"""
import sys
import time
import cifar_input
import numpy as np
import resnet_model
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('dataset', 'cifar10', 'cifar10 or cifar100.')
tf.app.flags.DEFINE_string('mode', 'train', 'train or eval.')
tf.app.flags.DEFINE_string('train_data_path', '', 'Filename for training data.')
tf.app.flags.DEFINE_string('eval_data_path', '', 'Filename for eval data')
tf.app.flags.DEFINE_integer('image_size', 32, 'Image side length.')
tf.app.flags.DEFINE_string('train_dir', '',
'Directory to keep training outputs.')
tf.app.flags.DEFINE_string('eval_dir', '',
'Directory to keep eval outputs.')
tf.app.flags.DEFINE_integer('eval_batch_count', 50,
'Number of batches to eval.')
tf.app.flags.DEFINE_bool('eval_once', False,
'Whether evaluate the model only once.')
tf.app.flags.DEFINE_string('log_root', '',
'Directory to keep the checkpoints. Should be a '
'parent directory of FLAGS.train_dir/eval_dir.')
tf.app.flags.DEFINE_integer('num_gpus', 0,
'Number of gpus used for training. (0 or 1)')
def train(hps):
"""Training loop."""
images, labels = cifar_input.build_input(
FLAGS.dataset, FLAGS.train_data_path, hps.batch_size, FLAGS.mode)
model = resnet_model.ResNet(hps, images, labels, FLAGS.mode)
model.build_graph()
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir)
sv = tf.train.Supervisor(logdir=FLAGS.log_root,
is_chief=True,
summary_op=None,
save_summaries_secs=60,
save_model_secs=300,
global_step=model.global_step)
sess = sv.prepare_or_wait_for_session()
step = 0
total_prediction = 0
correct_prediction = 0
precision = 0.0
lrn_rate = 0.1
while not sv.should_stop():
(_, summaries, loss, predictions, truth, train_step) = sess.run(
[model.train_op, model.summaries, model.cost, model.predictions,
model.labels, model.global_step],
feed_dict={model.lrn_rate: lrn_rate})
if train_step < 40000:
lrn_rate = 0.1
elif train_step < 60000:
lrn_rate = 0.01
elif train_step < 80000:
lrn_rate = 0.001
else:
lrn_rate = 0.0001
predictions = np.argmax(predictions, axis=1)
truth = np.argmax(truth, axis=1)
for (t, p) in zip(truth, predictions):
if t == p:
correct_prediction += 1
total_prediction += 1
precision = float(correct_prediction) / total_prediction
correct_prediction = total_prediction = 0
step += 1
if step % 100 == 0:
precision_summ = tf.Summary()
precision_summ.value.add(
tag='Precision', simple_value=precision)
summary_writer.add_summary(precision_summ, train_step)
summary_writer.add_summary(summaries, train_step)
tf.logging.info('loss: %.3f, precision: %.3f\n' % (loss, precision))
summary_writer.flush()
sv.Stop()
def evaluate(hps):
"""Eval loop."""
images, labels = cifar_input.build_input(
FLAGS.dataset, FLAGS.eval_data_path, hps.batch_size, FLAGS.mode)
model = resnet_model.ResNet(hps, images, labels, FLAGS.mode)
model.build_graph()
saver = tf.train.Saver()
summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
tf.train.start_queue_runners(sess)
best_precision = 0.0
while True:
time.sleep(60)
try:
ckpt_state = tf.train.get_checkpoint_state(FLAGS.log_root)
except tf.errors.OutOfRangeError as e:
tf.logging.error('Cannot restore checkpoint: %s', e)
continue
if not (ckpt_state and ckpt_state.model_checkpoint_path):
tf.logging.info('No model to eval yet at %s', FLAGS.log_root)
continue
tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)
saver.restore(sess, ckpt_state.model_checkpoint_path)
total_prediction, correct_prediction = 0, 0
for _ in xrange(FLAGS.eval_batch_count):
(summaries, loss, predictions, truth, train_step) = sess.run(
[model.summaries, model.cost, model.predictions,
model.labels, model.global_step])
best_predictions = np.argmax(predictions, axis=1)
truth = np.argmax(truth, axis=1)
for (t, p) in zip(truth, best_predictions):
if t == p:
correct_prediction += 1
total_prediction += 1
precision = 1.0 * correct_prediction / total_prediction
best_precision = max(precision, best_precision)
precision_summ = tf.Summary()
precision_summ.value.add(
tag='Precision', simple_value=precision)
summary_writer.add_summary(precision_summ, train_step)
best_precision_summ = tf.Summary()
best_precision_summ.value.add(
tag='Best Precision', simple_value=best_precision)
summary_writer.add_summary(best_precision_summ, train_step)
summary_writer.add_summary(summaries, train_step)
tf.logging.info('loss: %.3f, precision: %.3f, best precision: %.3f\n' %
(loss, precision, best_precision))
summary_writer.flush()
if FLAGS.eval_once:
break
def main(_):
if FLAGS.num_gpus == 0:
dev = '/cpu:0'
elif FLAGS.num_gpus == 1:
dev = '/gpu:0'
else:
raise ValueError('Only support 0 or 1 gpu.')
if FLAGS.mode == 'train':
batch_size = 128
elif FLAGS.mode == 'eval':
batch_size = 100
if FLAGS.dataset == 'cifar10':
num_classes = 10
elif FLAGS.dataset == 'cifar100':
num_classes = 100
hps = resnet_model.HParams(batch_size=batch_size,
num_classes=num_classes,
min_lrn_rate=0.0001,
lrn_rate=0.1,
num_residual_units=5,
use_bottleneck=False,
weight_decay_rate=0.0002,
relu_leakiness=0.1,
optimizer='mom')
with tf.device(dev):
if FLAGS.mode == 'train':
train(hps)
elif FLAGS.mode == 'eval':
evaluate(hps)
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
thurt/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_datetime.py | 52 | 133995 | """Test date/time type.
See http://www.zope.org/Members/fdrake/DateTimeWiki/TestCases
"""
import os
import pickle
import cPickle
import unittest
from test import test_support
from datetime import MINYEAR, MAXYEAR
from datetime import timedelta
from datetime import tzinfo
from datetime import time
from datetime import date, datetime
pickle_choices = [(pickler, unpickler, proto)
for pickler in pickle, cPickle
for unpickler in pickle, cPickle
for proto in range(3)]
assert len(pickle_choices) == 2*2*3
# An arbitrary collection of objects of non-datetime types, for testing
# mixed-type comparisons.
OTHERSTUFF = (10, 10L, 34.5, "abc", {}, [], ())
#############################################################################
# module tests
class TestModule(unittest.TestCase):
def test_constants(self):
import datetime
self.assertEqual(datetime.MINYEAR, 1)
self.assertEqual(datetime.MAXYEAR, 9999)
#############################################################################
# tzinfo tests
class FixedOffset(tzinfo):
def __init__(self, offset, name, dstoffset=42):
if isinstance(offset, int):
offset = timedelta(minutes=offset)
if isinstance(dstoffset, int):
dstoffset = timedelta(minutes=dstoffset)
self.__offset = offset
self.__name = name
self.__dstoffset = dstoffset
def __repr__(self):
return self.__name.lower()
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return self.__dstoffset
class PicklableFixedOffset(FixedOffset):
def __init__(self, offset=None, name=None, dstoffset=None):
FixedOffset.__init__(self, offset, name, dstoffset)
class TestTZInfo(unittest.TestCase):
def test_non_abstractness(self):
# In order to allow subclasses to get pickled, the C implementation
# wasn't able to get away with having __init__ raise
# NotImplementedError.
useless = tzinfo()
dt = datetime.max
self.assertRaises(NotImplementedError, useless.tzname, dt)
self.assertRaises(NotImplementedError, useless.utcoffset, dt)
self.assertRaises(NotImplementedError, useless.dst, dt)
def test_subclass_must_override(self):
class NotEnough(tzinfo):
def __init__(self, offset, name):
self.__offset = offset
self.__name = name
self.failUnless(issubclass(NotEnough, tzinfo))
ne = NotEnough(3, "NotByALongShot")
self.failUnless(isinstance(ne, tzinfo))
dt = datetime.now()
self.assertRaises(NotImplementedError, ne.tzname, dt)
self.assertRaises(NotImplementedError, ne.utcoffset, dt)
self.assertRaises(NotImplementedError, ne.dst, dt)
def test_normal(self):
fo = FixedOffset(3, "Three")
self.failUnless(isinstance(fo, tzinfo))
for dt in datetime.now(), None:
self.assertEqual(fo.utcoffset(dt), timedelta(minutes=3))
self.assertEqual(fo.tzname(dt), "Three")
self.assertEqual(fo.dst(dt), timedelta(minutes=42))
def test_pickling_base(self):
# There's no point to pickling tzinfo objects on their own (they
# carry no data), but they need to be picklable anyway else
# concrete subclasses can't be pickled.
orig = tzinfo.__new__(tzinfo)
self.failUnless(type(orig) is tzinfo)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.failUnless(type(derived) is tzinfo)
def test_pickling_subclass(self):
# Make sure we can pickle/unpickle an instance of a subclass.
offset = timedelta(minutes=-300)
orig = PicklableFixedOffset(offset, 'cookie')
self.failUnless(isinstance(orig, tzinfo))
self.failUnless(type(orig) is PicklableFixedOffset)
self.assertEqual(orig.utcoffset(None), offset)
self.assertEqual(orig.tzname(None), 'cookie')
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.failUnless(isinstance(derived, tzinfo))
self.failUnless(type(derived) is PicklableFixedOffset)
self.assertEqual(derived.utcoffset(None), offset)
self.assertEqual(derived.tzname(None), 'cookie')
#############################################################################
# Base clase for testing a particular aspect of timedelta, time, date and
# datetime comparisons.
class HarmlessMixedComparison:
# Test that __eq__ and __ne__ don't complain for mixed-type comparisons.
# Subclasses must define 'theclass', and theclass(1, 1, 1) must be a
# legit constructor.
def test_harmless_mixed_comparison(self):
me = self.theclass(1, 1, 1)
self.failIf(me == ())
self.failUnless(me != ())
self.failIf(() == me)
self.failUnless(() != me)
self.failUnless(me in [1, 20L, [], me])
self.failIf(me not in [1, 20L, [], me])
self.failUnless([] in [me, 1, 20L, []])
self.failIf([] not in [me, 1, 20L, []])
def test_harmful_mixed_comparison(self):
me = self.theclass(1, 1, 1)
self.assertRaises(TypeError, lambda: me < ())
self.assertRaises(TypeError, lambda: me <= ())
self.assertRaises(TypeError, lambda: me > ())
self.assertRaises(TypeError, lambda: me >= ())
self.assertRaises(TypeError, lambda: () < me)
self.assertRaises(TypeError, lambda: () <= me)
self.assertRaises(TypeError, lambda: () > me)
self.assertRaises(TypeError, lambda: () >= me)
self.assertRaises(TypeError, cmp, (), me)
self.assertRaises(TypeError, cmp, me, ())
#############################################################################
# timedelta tests
class TestTimeDelta(HarmlessMixedComparison, unittest.TestCase):
theclass = timedelta
def test_constructor(self):
eq = self.assertEqual
td = timedelta
# Check keyword args to constructor
eq(td(), td(weeks=0, days=0, hours=0, minutes=0, seconds=0,
milliseconds=0, microseconds=0))
eq(td(1), td(days=1))
eq(td(0, 1), td(seconds=1))
eq(td(0, 0, 1), td(microseconds=1))
eq(td(weeks=1), td(days=7))
eq(td(days=1), td(hours=24))
eq(td(hours=1), td(minutes=60))
eq(td(minutes=1), td(seconds=60))
eq(td(seconds=1), td(milliseconds=1000))
eq(td(milliseconds=1), td(microseconds=1000))
# Check float args to constructor
eq(td(weeks=1.0/7), td(days=1))
eq(td(days=1.0/24), td(hours=1))
eq(td(hours=1.0/60), td(minutes=1))
eq(td(minutes=1.0/60), td(seconds=1))
eq(td(seconds=0.001), td(milliseconds=1))
eq(td(milliseconds=0.001), td(microseconds=1))
def test_computations(self):
eq = self.assertEqual
td = timedelta
a = td(7) # One week
b = td(0, 60) # One minute
c = td(0, 0, 1000) # One millisecond
eq(a+b+c, td(7, 60, 1000))
eq(a-b, td(6, 24*3600 - 60))
eq(-a, td(-7))
eq(+a, td(7))
eq(-b, td(-1, 24*3600 - 60))
eq(-c, td(-1, 24*3600 - 1, 999000))
eq(abs(a), a)
eq(abs(-a), a)
eq(td(6, 24*3600), a)
eq(td(0, 0, 60*1000000), b)
eq(a*10, td(70))
eq(a*10, 10*a)
eq(a*10L, 10*a)
eq(b*10, td(0, 600))
eq(10*b, td(0, 600))
eq(b*10L, td(0, 600))
eq(c*10, td(0, 0, 10000))
eq(10*c, td(0, 0, 10000))
eq(c*10L, td(0, 0, 10000))
eq(a*-1, -a)
eq(b*-2, -b-b)
eq(c*-2, -c+-c)
eq(b*(60*24), (b*60)*24)
eq(b*(60*24), (60*b)*24)
eq(c*1000, td(0, 1))
eq(1000*c, td(0, 1))
eq(a//7, td(1))
eq(b//10, td(0, 6))
eq(c//1000, td(0, 0, 1))
eq(a//10, td(0, 7*24*360))
eq(a//3600000, td(0, 0, 7*24*1000))
def test_disallowed_computations(self):
a = timedelta(42)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# Mul/div by float isn't supported.
x = 2.3
self.assertRaises(TypeError, lambda: a*x)
self.assertRaises(TypeError, lambda: x*a)
self.assertRaises(TypeError, lambda: a/x)
self.assertRaises(TypeError, lambda: x/a)
self.assertRaises(TypeError, lambda: a // x)
self.assertRaises(TypeError, lambda: x // a)
# Division of int by timedelta doesn't make sense.
# Division by zero doesn't make sense.
for zero in 0, 0L:
self.assertRaises(TypeError, lambda: zero // a)
self.assertRaises(ZeroDivisionError, lambda: a // zero)
def test_basic_attributes(self):
days, seconds, us = 1, 7, 31
td = timedelta(days, seconds, us)
self.assertEqual(td.days, days)
self.assertEqual(td.seconds, seconds)
self.assertEqual(td.microseconds, us)
def test_carries(self):
t1 = timedelta(days=100,
weeks=-7,
hours=-24*(100-49),
minutes=-3,
seconds=12,
microseconds=(3*60 - 12) * 1e6 + 1)
t2 = timedelta(microseconds=1)
self.assertEqual(t1, t2)
def test_hash_equality(self):
t1 = timedelta(days=100,
weeks=-7,
hours=-24*(100-49),
minutes=-3,
seconds=12,
microseconds=(3*60 - 12) * 1000000)
t2 = timedelta()
self.assertEqual(hash(t1), hash(t2))
t1 += timedelta(weeks=7)
t2 += timedelta(days=7*7)
self.assertEqual(t1, t2)
self.assertEqual(hash(t1), hash(t2))
d = {t1: 1}
d[t2] = 2
self.assertEqual(len(d), 1)
self.assertEqual(d[t1], 2)
def test_pickling(self):
args = 12, 34, 56
orig = timedelta(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_compare(self):
t1 = timedelta(2, 3, 4)
t2 = timedelta(2, 3, 4)
self.failUnless(t1 == t2)
self.failUnless(t1 <= t2)
self.failUnless(t1 >= t2)
self.failUnless(not t1 != t2)
self.failUnless(not t1 < t2)
self.failUnless(not t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for args in (3, 3, 3), (2, 4, 4), (2, 3, 5):
t2 = timedelta(*args) # this is larger than t1
self.failUnless(t1 < t2)
self.failUnless(t2 > t1)
self.failUnless(t1 <= t2)
self.failUnless(t2 >= t1)
self.failUnless(t1 != t2)
self.failUnless(t2 != t1)
self.failUnless(not t1 == t2)
self.failUnless(not t2 == t1)
self.failUnless(not t1 > t2)
self.failUnless(not t2 < t1)
self.failUnless(not t1 >= t2)
self.failUnless(not t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 <= badarg)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_str(self):
td = timedelta
eq = self.assertEqual
eq(str(td(1)), "1 day, 0:00:00")
eq(str(td(-1)), "-1 day, 0:00:00")
eq(str(td(2)), "2 days, 0:00:00")
eq(str(td(-2)), "-2 days, 0:00:00")
eq(str(td(hours=12, minutes=58, seconds=59)), "12:58:59")
eq(str(td(hours=2, minutes=3, seconds=4)), "2:03:04")
eq(str(td(weeks=-30, hours=23, minutes=12, seconds=34)),
"-210 days, 23:12:34")
eq(str(td(milliseconds=1)), "0:00:00.001000")
eq(str(td(microseconds=3)), "0:00:00.000003")
eq(str(td(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)),
"999999999 days, 23:59:59.999999")
def test_roundtrip(self):
for td in (timedelta(days=999999999, hours=23, minutes=59,
seconds=59, microseconds=999999),
timedelta(days=-999999999),
timedelta(days=1, seconds=2, microseconds=3)):
# Verify td -> string -> td identity.
s = repr(td)
self.failUnless(s.startswith('datetime.'))
s = s[9:]
td2 = eval(s)
self.assertEqual(td, td2)
# Verify identity via reconstructing from pieces.
td2 = timedelta(td.days, td.seconds, td.microseconds)
self.assertEqual(td, td2)
def test_resolution_info(self):
self.assert_(isinstance(timedelta.min, timedelta))
self.assert_(isinstance(timedelta.max, timedelta))
self.assert_(isinstance(timedelta.resolution, timedelta))
self.assert_(timedelta.max > timedelta.min)
self.assertEqual(timedelta.min, timedelta(-999999999))
self.assertEqual(timedelta.max, timedelta(999999999, 24*3600-1, 1e6-1))
self.assertEqual(timedelta.resolution, timedelta(0, 0, 1))
def test_overflow(self):
tiny = timedelta.resolution
td = timedelta.min + tiny
td -= tiny # no problem
self.assertRaises(OverflowError, td.__sub__, tiny)
self.assertRaises(OverflowError, td.__add__, -tiny)
td = timedelta.max - tiny
td += tiny # no problem
self.assertRaises(OverflowError, td.__add__, tiny)
self.assertRaises(OverflowError, td.__sub__, -tiny)
self.assertRaises(OverflowError, lambda: -timedelta.max)
def test_microsecond_rounding(self):
td = timedelta
eq = self.assertEqual
# Single-field rounding.
eq(td(milliseconds=0.4/1000), td(0)) # rounds to 0
eq(td(milliseconds=-0.4/1000), td(0)) # rounds to 0
eq(td(milliseconds=0.6/1000), td(microseconds=1))
eq(td(milliseconds=-0.6/1000), td(microseconds=-1))
# Rounding due to contributions from more than one field.
us_per_hour = 3600e6
us_per_day = us_per_hour * 24
eq(td(days=.4/us_per_day), td(0))
eq(td(hours=.2/us_per_hour), td(0))
eq(td(days=.4/us_per_day, hours=.2/us_per_hour), td(microseconds=1))
eq(td(days=-.4/us_per_day), td(0))
eq(td(hours=-.2/us_per_hour), td(0))
eq(td(days=-.4/us_per_day, hours=-.2/us_per_hour), td(microseconds=-1))
def test_massive_normalization(self):
td = timedelta(microseconds=-1)
self.assertEqual((td.days, td.seconds, td.microseconds),
(-1, 24*3600-1, 999999))
def test_bool(self):
self.failUnless(timedelta(1))
self.failUnless(timedelta(0, 1))
self.failUnless(timedelta(0, 0, 1))
self.failUnless(timedelta(microseconds=1))
self.failUnless(not timedelta(0))
def test_subclass_timedelta(self):
class T(timedelta):
@staticmethod
def from_td(td):
return T(td.days, td.seconds, td.microseconds)
def as_hours(self):
sum = (self.days * 24 +
self.seconds / 3600.0 +
self.microseconds / 3600e6)
return round(sum)
t1 = T(days=1)
self.assert_(type(t1) is T)
self.assertEqual(t1.as_hours(), 24)
t2 = T(days=-1, seconds=-3600)
self.assert_(type(t2) is T)
self.assertEqual(t2.as_hours(), -25)
t3 = t1 + t2
self.assert_(type(t3) is timedelta)
t4 = T.from_td(t3)
self.assert_(type(t4) is T)
self.assertEqual(t3.days, t4.days)
self.assertEqual(t3.seconds, t4.seconds)
self.assertEqual(t3.microseconds, t4.microseconds)
self.assertEqual(str(t3), str(t4))
self.assertEqual(t4.as_hours(), -1)
#############################################################################
# date tests
class TestDateOnly(unittest.TestCase):
# Tests here won't pass if also run on datetime objects, so don't
# subclass this to test datetimes too.
def test_delta_non_days_ignored(self):
dt = date(2000, 1, 2)
delta = timedelta(days=1, hours=2, minutes=3, seconds=4,
microseconds=5)
days = timedelta(delta.days)
self.assertEqual(days, timedelta(1))
dt2 = dt + delta
self.assertEqual(dt2, dt + days)
dt2 = delta + dt
self.assertEqual(dt2, dt + days)
dt2 = dt - delta
self.assertEqual(dt2, dt - days)
delta = -delta
days = timedelta(delta.days)
self.assertEqual(days, timedelta(-2))
dt2 = dt + delta
self.assertEqual(dt2, dt + days)
dt2 = delta + dt
self.assertEqual(dt2, dt + days)
dt2 = dt - delta
self.assertEqual(dt2, dt - days)
class SubclassDate(date):
sub_var = 1
class TestDate(HarmlessMixedComparison, unittest.TestCase):
# Tests here should pass for both dates and datetimes, except for a
# few tests that TestDateTime overrides.
theclass = date
def test_basic_attributes(self):
dt = self.theclass(2002, 3, 1)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
def test_roundtrip(self):
for dt in (self.theclass(1, 2, 3),
self.theclass.today()):
# Verify dt -> string -> date identity.
s = repr(dt)
self.failUnless(s.startswith('datetime.'))
s = s[9:]
dt2 = eval(s)
self.assertEqual(dt, dt2)
# Verify identity via reconstructing from pieces.
dt2 = self.theclass(dt.year, dt.month, dt.day)
self.assertEqual(dt, dt2)
def test_ordinal_conversions(self):
# Check some fixed values.
for y, m, d, n in [(1, 1, 1, 1), # calendar origin
(1, 12, 31, 365),
(2, 1, 1, 366),
# first example from "Calendrical Calculations"
(1945, 11, 12, 710347)]:
d = self.theclass(y, m, d)
self.assertEqual(n, d.toordinal())
fromord = self.theclass.fromordinal(n)
self.assertEqual(d, fromord)
if hasattr(fromord, "hour"):
# if we're checking something fancier than a date, verify
# the extra fields have been zeroed out
self.assertEqual(fromord.hour, 0)
self.assertEqual(fromord.minute, 0)
self.assertEqual(fromord.second, 0)
self.assertEqual(fromord.microsecond, 0)
# Check first and last days of year spottily across the whole
# range of years supported.
for year in xrange(MINYEAR, MAXYEAR+1, 7):
# Verify (year, 1, 1) -> ordinal -> y, m, d is identity.
d = self.theclass(year, 1, 1)
n = d.toordinal()
d2 = self.theclass.fromordinal(n)
self.assertEqual(d, d2)
# Verify that moving back a day gets to the end of year-1.
if year > 1:
d = self.theclass.fromordinal(n-1)
d2 = self.theclass(year-1, 12, 31)
self.assertEqual(d, d2)
self.assertEqual(d2.toordinal(), n-1)
# Test every day in a leap-year and a non-leap year.
dim = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
for year, isleap in (2000, True), (2002, False):
n = self.theclass(year, 1, 1).toordinal()
for month, maxday in zip(range(1, 13), dim):
if month == 2 and isleap:
maxday += 1
for day in range(1, maxday+1):
d = self.theclass(year, month, day)
self.assertEqual(d.toordinal(), n)
self.assertEqual(d, self.theclass.fromordinal(n))
n += 1
def test_extreme_ordinals(self):
a = self.theclass.min
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord - 1))
b = a + timedelta(days=1)
self.assertEqual(b.toordinal(), aord + 1)
self.assertEqual(b, self.theclass.fromordinal(aord + 1))
a = self.theclass.max
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord + 1))
b = a - timedelta(days=1)
self.assertEqual(b.toordinal(), aord - 1)
self.assertEqual(b, self.theclass.fromordinal(aord - 1))
def test_bad_constructor_arguments(self):
# bad years
self.theclass(MINYEAR, 1, 1) # no exception
self.theclass(MAXYEAR, 1, 1) # no exception
self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1)
self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1)
# bad months
self.theclass(2000, 1, 1) # no exception
self.theclass(2000, 12, 1) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 0, 1)
self.assertRaises(ValueError, self.theclass, 2000, 13, 1)
# bad days
self.theclass(2000, 2, 29) # no exception
self.theclass(2004, 2, 29) # no exception
self.theclass(2400, 2, 29) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 2, 30)
self.assertRaises(ValueError, self.theclass, 2001, 2, 29)
self.assertRaises(ValueError, self.theclass, 2100, 2, 29)
self.assertRaises(ValueError, self.theclass, 1900, 2, 29)
self.assertRaises(ValueError, self.theclass, 2000, 1, 0)
self.assertRaises(ValueError, self.theclass, 2000, 1, 32)
def test_hash_equality(self):
d = self.theclass(2000, 12, 31)
# same thing
e = self.theclass(2000, 12, 31)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(2001, 1, 1)
# same thing
e = self.theclass(2001, 1, 1)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_computations(self):
a = self.theclass(2002, 1, 31)
b = self.theclass(1956, 1, 31)
diff = a-b
self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4)))
self.assertEqual(diff.seconds, 0)
self.assertEqual(diff.microseconds, 0)
day = timedelta(1)
week = timedelta(7)
a = self.theclass(2002, 3, 2)
self.assertEqual(a + day, self.theclass(2002, 3, 3))
self.assertEqual(day + a, self.theclass(2002, 3, 3))
self.assertEqual(a - day, self.theclass(2002, 3, 1))
self.assertEqual(-day + a, self.theclass(2002, 3, 1))
self.assertEqual(a + week, self.theclass(2002, 3, 9))
self.assertEqual(a - week, self.theclass(2002, 2, 23))
self.assertEqual(a + 52*week, self.theclass(2003, 3, 1))
self.assertEqual(a - 52*week, self.theclass(2001, 3, 3))
self.assertEqual((a + week) - a, week)
self.assertEqual((a + day) - a, day)
self.assertEqual((a - week) - a, -week)
self.assertEqual((a - day) - a, -day)
self.assertEqual(a - (a + week), -week)
self.assertEqual(a - (a + day), -day)
self.assertEqual(a - (a - week), week)
self.assertEqual(a - (a - day), day)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# delta - date is senseless.
self.assertRaises(TypeError, lambda: day - a)
# mixing date and (delta or date) via * or // is senseless
self.assertRaises(TypeError, lambda: day * a)
self.assertRaises(TypeError, lambda: a * day)
self.assertRaises(TypeError, lambda: day // a)
self.assertRaises(TypeError, lambda: a // day)
self.assertRaises(TypeError, lambda: a * a)
self.assertRaises(TypeError, lambda: a // a)
# date + date is senseless
self.assertRaises(TypeError, lambda: a + a)
def test_overflow(self):
tiny = self.theclass.resolution
dt = self.theclass.min + tiny
dt -= tiny # no problem
self.assertRaises(OverflowError, dt.__sub__, tiny)
self.assertRaises(OverflowError, dt.__add__, -tiny)
dt = self.theclass.max - tiny
dt += tiny # no problem
self.assertRaises(OverflowError, dt.__add__, tiny)
self.assertRaises(OverflowError, dt.__sub__, -tiny)
def test_fromtimestamp(self):
import time
# Try an arbitrary fixed value.
year, month, day = 1999, 9, 19
ts = time.mktime((year, month, day, 0, 0, 0, 0, 0, -1))
d = self.theclass.fromtimestamp(ts)
self.assertEqual(d.year, year)
self.assertEqual(d.month, month)
self.assertEqual(d.day, day)
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.fromtimestamp,
insane)
def test_today(self):
import time
# We claim that today() is like fromtimestamp(time.time()), so
# prove it.
for dummy in range(3):
today = self.theclass.today()
ts = time.time()
todayagain = self.theclass.fromtimestamp(ts)
if today == todayagain:
break
# There are several legit reasons that could fail:
# 1. It recently became midnight, between the today() and the
# time() calls.
# 2. The platform time() has such fine resolution that we'll
# never get the same value twice.
# 3. The platform time() has poor resolution, and we just
# happened to call today() right before a resolution quantum
# boundary.
# 4. The system clock got fiddled between calls.
# In any case, wait a little while and try again.
time.sleep(0.1)
# It worked or it didn't. If it didn't, assume it's reason #2, and
# let the test pass if they're within half a second of each other.
self.failUnless(today == todayagain or
abs(todayagain - today) < timedelta(seconds=0.5))
def test_weekday(self):
for i in range(7):
# March 4, 2002 is a Monday
self.assertEqual(self.theclass(2002, 3, 4+i).weekday(), i)
self.assertEqual(self.theclass(2002, 3, 4+i).isoweekday(), i+1)
# January 2, 1956 is a Monday
self.assertEqual(self.theclass(1956, 1, 2+i).weekday(), i)
self.assertEqual(self.theclass(1956, 1, 2+i).isoweekday(), i+1)
def test_isocalendar(self):
# Check examples from
# http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
for i in range(7):
d = self.theclass(2003, 12, 22+i)
self.assertEqual(d.isocalendar(), (2003, 52, i+1))
d = self.theclass(2003, 12, 29) + timedelta(i)
self.assertEqual(d.isocalendar(), (2004, 1, i+1))
d = self.theclass(2004, 1, 5+i)
self.assertEqual(d.isocalendar(), (2004, 2, i+1))
d = self.theclass(2009, 12, 21+i)
self.assertEqual(d.isocalendar(), (2009, 52, i+1))
d = self.theclass(2009, 12, 28) + timedelta(i)
self.assertEqual(d.isocalendar(), (2009, 53, i+1))
d = self.theclass(2010, 1, 4+i)
self.assertEqual(d.isocalendar(), (2010, 1, i+1))
def test_iso_long_years(self):
# Calculate long ISO years and compare to table from
# http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
ISO_LONG_YEARS_TABLE = """
4 32 60 88
9 37 65 93
15 43 71 99
20 48 76
26 54 82
105 133 161 189
111 139 167 195
116 144 172
122 150 178
128 156 184
201 229 257 285
207 235 263 291
212 240 268 296
218 246 274
224 252 280
303 331 359 387
308 336 364 392
314 342 370 398
320 348 376
325 353 381
"""
iso_long_years = map(int, ISO_LONG_YEARS_TABLE.split())
iso_long_years.sort()
L = []
for i in range(400):
d = self.theclass(2000+i, 12, 31)
d1 = self.theclass(1600+i, 12, 31)
self.assertEqual(d.isocalendar()[1:], d1.isocalendar()[1:])
if d.isocalendar()[1] == 53:
L.append(i)
self.assertEqual(L, iso_long_years)
def test_isoformat(self):
t = self.theclass(2, 3, 2)
self.assertEqual(t.isoformat(), "0002-03-02")
def test_ctime(self):
t = self.theclass(2002, 3, 2)
self.assertEqual(t.ctime(), "Sat Mar 2 00:00:00 2002")
def test_strftime(self):
t = self.theclass(2005, 3, 2)
self.assertEqual(t.strftime("m:%m d:%d y:%y"), "m:03 d:02 y:05")
self.assertEqual(t.strftime(""), "") # SF bug #761337
self.assertEqual(t.strftime('x'*1000), 'x'*1000) # SF bug #1556784
self.assertRaises(TypeError, t.strftime) # needs an arg
self.assertRaises(TypeError, t.strftime, "one", "two") # too many args
self.assertRaises(TypeError, t.strftime, 42) # arg wrong type
# test that unicode input is allowed (issue 2782)
self.assertEqual(t.strftime(u"%m"), "03")
# A naive object replaces %z and %Z w/ empty strings.
self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''")
#make sure that invalid format specifiers are handled correctly
#self.assertRaises(ValueError, t.strftime, "%e")
#self.assertRaises(ValueError, t.strftime, "%")
#self.assertRaises(ValueError, t.strftime, "%#")
#oh well, some systems just ignore those invalid ones.
#at least, excercise them to make sure that no crashes
#are generated
for f in ["%e", "%", "%#"]:
try:
t.strftime(f)
except ValueError:
pass
#check that this standard extension works
t.strftime("%f")
def test_format(self):
dt = self.theclass(2007, 9, 10)
self.assertEqual(dt.__format__(''), str(dt))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(2007, 9, 10)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(2007, 9, 10)
self.assertEqual(b.__format__(''), str(dt))
for fmt in ["m:%m d:%d y:%y",
"m:%m d:%d y:%y H:%H M:%M S:%S",
"%z %Z",
]:
self.assertEqual(dt.__format__(fmt), dt.strftime(fmt))
self.assertEqual(a.__format__(fmt), dt.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_resolution_info(self):
self.assert_(isinstance(self.theclass.min, self.theclass))
self.assert_(isinstance(self.theclass.max, self.theclass))
self.assert_(isinstance(self.theclass.resolution, timedelta))
self.assert_(self.theclass.max > self.theclass.min)
def test_extreme_timedelta(self):
big = self.theclass.max - self.theclass.min
# 3652058 days, 23 hours, 59 minutes, 59 seconds, 999999 microseconds
n = (big.days*24*3600 + big.seconds)*1000000 + big.microseconds
# n == 315537897599999999 ~= 2**58.13
justasbig = timedelta(0, 0, n)
self.assertEqual(big, justasbig)
self.assertEqual(self.theclass.min + big, self.theclass.max)
self.assertEqual(self.theclass.max - big, self.theclass.min)
def test_timetuple(self):
for i in range(7):
# January 2, 1956 is a Monday (0)
d = self.theclass(1956, 1, 2+i)
t = d.timetuple()
self.assertEqual(t, (1956, 1, 2+i, 0, 0, 0, i, 2+i, -1))
# February 1, 1956 is a Wednesday (2)
d = self.theclass(1956, 2, 1+i)
t = d.timetuple()
self.assertEqual(t, (1956, 2, 1+i, 0, 0, 0, (2+i)%7, 32+i, -1))
# March 1, 1956 is a Thursday (3), and is the 31+29+1 = 61st day
# of the year.
d = self.theclass(1956, 3, 1+i)
t = d.timetuple()
self.assertEqual(t, (1956, 3, 1+i, 0, 0, 0, (3+i)%7, 61+i, -1))
self.assertEqual(t.tm_year, 1956)
self.assertEqual(t.tm_mon, 3)
self.assertEqual(t.tm_mday, 1+i)
self.assertEqual(t.tm_hour, 0)
self.assertEqual(t.tm_min, 0)
self.assertEqual(t.tm_sec, 0)
self.assertEqual(t.tm_wday, (3+i)%7)
self.assertEqual(t.tm_yday, 61+i)
self.assertEqual(t.tm_isdst, -1)
def test_pickling(self):
args = 6, 7, 23
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_compare(self):
t1 = self.theclass(2, 3, 4)
t2 = self.theclass(2, 3, 4)
self.failUnless(t1 == t2)
self.failUnless(t1 <= t2)
self.failUnless(t1 >= t2)
self.failUnless(not t1 != t2)
self.failUnless(not t1 < t2)
self.failUnless(not t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for args in (3, 3, 3), (2, 4, 4), (2, 3, 5):
t2 = self.theclass(*args) # this is larger than t1
self.failUnless(t1 < t2)
self.failUnless(t2 > t1)
self.failUnless(t1 <= t2)
self.failUnless(t2 >= t1)
self.failUnless(t1 != t2)
self.failUnless(t2 != t1)
self.failUnless(not t1 == t2)
self.failUnless(not t2 == t1)
self.failUnless(not t1 > t2)
self.failUnless(not t2 < t1)
self.failUnless(not t1 >= t2)
self.failUnless(not t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_mixed_compare(self):
our = self.theclass(2000, 4, 5)
self.assertRaises(TypeError, cmp, our, 1)
self.assertRaises(TypeError, cmp, 1, our)
class AnotherDateTimeClass(object):
def __cmp__(self, other):
# Return "equal" so calling this can't be confused with
# compare-by-address (which never says "equal" for distinct
# objects).
return 0
__hash__ = None # Silence Py3k warning
# This still errors, because date and datetime comparison raise
# TypeError instead of NotImplemented when they don't know what to
# do, in order to stop comparison from falling back to the default
# compare-by-address.
their = AnotherDateTimeClass()
self.assertRaises(TypeError, cmp, our, their)
# Oops: The next stab raises TypeError in the C implementation,
# but not in the Python implementation of datetime. The difference
# is due to that the Python implementation defines __cmp__ but
# the C implementation defines tp_richcompare. This is more pain
# to fix than it's worth, so commenting out the test.
# self.assertEqual(cmp(their, our), 0)
# But date and datetime comparison return NotImplemented instead if the
# other object has a timetuple attr. This gives the other object a
# chance to do the comparison.
class Comparable(AnotherDateTimeClass):
def timetuple(self):
return ()
their = Comparable()
self.assertEqual(cmp(our, their), 0)
self.assertEqual(cmp(their, our), 0)
self.failUnless(our == their)
self.failUnless(their == our)
def test_bool(self):
# All dates are considered true.
self.failUnless(self.theclass.min)
self.failUnless(self.theclass.max)
def test_strftime_out_of_range(self):
# For nasty technical reasons, we can't handle years before 1900.
cls = self.theclass
self.assertEqual(cls(1900, 1, 1).strftime("%Y"), "1900")
for y in 1, 49, 51, 99, 100, 1000, 1899:
self.assertRaises(ValueError, cls(y, 1, 1).strftime, "%Y")
def test_replace(self):
cls = self.theclass
args = [1, 2, 3]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_subclass_date(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.year + self.month
args = 2003, 4, 14
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.toordinal(), dt2.toordinal())
self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month - 7)
def test_pickling_subclass_date(self):
args = 6, 7, 23
orig = SubclassDate(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_backdoor_resistance(self):
# For fast unpickling, the constructor accepts a pickle string.
# This is a low-overhead backdoor. A user can (by intent or
# mistake) pass a string directly, which (if it's the right length)
# will get treated like a pickle, and bypass the normal sanity
# checks in the constructor. This can create insane objects.
# The constructor doesn't want to burn the time to validate all
# fields, but does check the month field. This stops, e.g.,
# datetime.datetime('1995-03-25') from yielding an insane object.
base = '1995-03-25'
if not issubclass(self.theclass, datetime):
base = base[:4]
for month_byte in '9', chr(0), chr(13), '\xff':
self.assertRaises(TypeError, self.theclass,
base[:2] + month_byte + base[3:])
for ord_byte in range(1, 13):
# This shouldn't blow up because of the month byte alone. If
# the implementation changes to do more-careful checking, it may
# blow up because other fields are insane.
self.theclass(base[:2] + chr(ord_byte) + base[3:])
#############################################################################
# datetime tests
class SubclassDatetime(datetime):
sub_var = 1
class TestDateTime(TestDate):
theclass = datetime
def test_basic_attributes(self):
dt = self.theclass(2002, 3, 1, 12, 0)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
self.assertEqual(dt.hour, 12)
self.assertEqual(dt.minute, 0)
self.assertEqual(dt.second, 0)
self.assertEqual(dt.microsecond, 0)
def test_basic_attributes_nonzero(self):
# Make sure all attributes are non-zero so bugs in
# bit-shifting access show up.
dt = self.theclass(2002, 3, 1, 12, 59, 59, 8000)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
self.assertEqual(dt.hour, 12)
self.assertEqual(dt.minute, 59)
self.assertEqual(dt.second, 59)
self.assertEqual(dt.microsecond, 8000)
def test_roundtrip(self):
for dt in (self.theclass(1, 2, 3, 4, 5, 6, 7),
self.theclass.now()):
# Verify dt -> string -> datetime identity.
s = repr(dt)
self.failUnless(s.startswith('datetime.'))
s = s[9:]
dt2 = eval(s)
self.assertEqual(dt, dt2)
# Verify identity via reconstructing from pieces.
dt2 = self.theclass(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.microsecond)
self.assertEqual(dt, dt2)
def test_isoformat(self):
t = self.theclass(2, 3, 2, 4, 5, 1, 123)
self.assertEqual(t.isoformat(), "0002-03-02T04:05:01.000123")
self.assertEqual(t.isoformat('T'), "0002-03-02T04:05:01.000123")
self.assertEqual(t.isoformat(' '), "0002-03-02 04:05:01.000123")
# str is ISO format with the separator forced to a blank.
self.assertEqual(str(t), "0002-03-02 04:05:01.000123")
t = self.theclass(2, 3, 2)
self.assertEqual(t.isoformat(), "0002-03-02T00:00:00")
self.assertEqual(t.isoformat('T'), "0002-03-02T00:00:00")
self.assertEqual(t.isoformat(' '), "0002-03-02 00:00:00")
# str is ISO format with the separator forced to a blank.
self.assertEqual(str(t), "0002-03-02 00:00:00")
def test_format(self):
dt = self.theclass(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(dt.__format__(''), str(dt))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(b.__format__(''), str(dt))
for fmt in ["m:%m d:%d y:%y",
"m:%m d:%d y:%y H:%H M:%M S:%S",
"%z %Z",
]:
self.assertEqual(dt.__format__(fmt), dt.strftime(fmt))
self.assertEqual(a.__format__(fmt), dt.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_more_ctime(self):
# Test fields that TestDate doesn't touch.
import time
t = self.theclass(2002, 3, 2, 18, 3, 5, 123)
self.assertEqual(t.ctime(), "Sat Mar 2 18:03:05 2002")
# Oops! The next line fails on Win2K under MSVC 6, so it's commented
# out. The difference is that t.ctime() produces " 2" for the day,
# but platform ctime() produces "02" for the day. According to
# C99, t.ctime() is correct here.
# self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple())))
# So test a case where that difference doesn't matter.
t = self.theclass(2002, 3, 22, 18, 3, 5, 123)
self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple())))
def test_tz_independent_comparing(self):
dt1 = self.theclass(2002, 3, 1, 9, 0, 0)
dt2 = self.theclass(2002, 3, 1, 10, 0, 0)
dt3 = self.theclass(2002, 3, 1, 9, 0, 0)
self.assertEqual(dt1, dt3)
self.assert_(dt2 > dt3)
# Make sure comparison doesn't forget microseconds, and isn't done
# via comparing a float timestamp (an IEEE double doesn't have enough
# precision to span microsecond resolution across years 1 thru 9999,
# so comparing via timestamp necessarily calls some distinct values
# equal).
dt1 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999998)
us = timedelta(microseconds=1)
dt2 = dt1 + us
self.assertEqual(dt2 - dt1, us)
self.assert_(dt1 < dt2)
def test_strftime_with_bad_tzname_replace(self):
# verify ok if tzinfo.tzname().replace() returns a non-string
class MyTzInfo(FixedOffset):
def tzname(self, dt):
class MyStr(str):
def replace(self, *args):
return None
return MyStr('name')
t = self.theclass(2005, 3, 2, 0, 0, 0, 0, MyTzInfo(3, 'name'))
self.assertRaises(TypeError, t.strftime, '%Z')
def test_bad_constructor_arguments(self):
# bad years
self.theclass(MINYEAR, 1, 1) # no exception
self.theclass(MAXYEAR, 1, 1) # no exception
self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1)
self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1)
# bad months
self.theclass(2000, 1, 1) # no exception
self.theclass(2000, 12, 1) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 0, 1)
self.assertRaises(ValueError, self.theclass, 2000, 13, 1)
# bad days
self.theclass(2000, 2, 29) # no exception
self.theclass(2004, 2, 29) # no exception
self.theclass(2400, 2, 29) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 2, 30)
self.assertRaises(ValueError, self.theclass, 2001, 2, 29)
self.assertRaises(ValueError, self.theclass, 2100, 2, 29)
self.assertRaises(ValueError, self.theclass, 1900, 2, 29)
self.assertRaises(ValueError, self.theclass, 2000, 1, 0)
self.assertRaises(ValueError, self.theclass, 2000, 1, 32)
# bad hours
self.theclass(2000, 1, 31, 0) # no exception
self.theclass(2000, 1, 31, 23) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 24)
# bad minutes
self.theclass(2000, 1, 31, 23, 0) # no exception
self.theclass(2000, 1, 31, 23, 59) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 60)
# bad seconds
self.theclass(2000, 1, 31, 23, 59, 0) # no exception
self.theclass(2000, 1, 31, 23, 59, 59) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, 60)
# bad microseconds
self.theclass(2000, 1, 31, 23, 59, 59, 0) # no exception
self.theclass(2000, 1, 31, 23, 59, 59, 999999) # no exception
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, 23, 59, 59, -1)
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, 23, 59, 59,
1000000)
def test_hash_equality(self):
d = self.theclass(2000, 12, 31, 23, 30, 17)
e = self.theclass(2000, 12, 31, 23, 30, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(2001, 1, 1, 0, 5, 17)
e = self.theclass(2001, 1, 1, 0, 5, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_computations(self):
a = self.theclass(2002, 1, 31)
b = self.theclass(1956, 1, 31)
diff = a-b
self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4)))
self.assertEqual(diff.seconds, 0)
self.assertEqual(diff.microseconds, 0)
a = self.theclass(2002, 3, 2, 17, 6)
millisec = timedelta(0, 0, 1000)
hour = timedelta(0, 3600)
day = timedelta(1)
week = timedelta(7)
self.assertEqual(a + hour, self.theclass(2002, 3, 2, 18, 6))
self.assertEqual(hour + a, self.theclass(2002, 3, 2, 18, 6))
self.assertEqual(a + 10*hour, self.theclass(2002, 3, 3, 3, 6))
self.assertEqual(a - hour, self.theclass(2002, 3, 2, 16, 6))
self.assertEqual(-hour + a, self.theclass(2002, 3, 2, 16, 6))
self.assertEqual(a - hour, a + -hour)
self.assertEqual(a - 20*hour, self.theclass(2002, 3, 1, 21, 6))
self.assertEqual(a + day, self.theclass(2002, 3, 3, 17, 6))
self.assertEqual(a - day, self.theclass(2002, 3, 1, 17, 6))
self.assertEqual(a + week, self.theclass(2002, 3, 9, 17, 6))
self.assertEqual(a - week, self.theclass(2002, 2, 23, 17, 6))
self.assertEqual(a + 52*week, self.theclass(2003, 3, 1, 17, 6))
self.assertEqual(a - 52*week, self.theclass(2001, 3, 3, 17, 6))
self.assertEqual((a + week) - a, week)
self.assertEqual((a + day) - a, day)
self.assertEqual((a + hour) - a, hour)
self.assertEqual((a + millisec) - a, millisec)
self.assertEqual((a - week) - a, -week)
self.assertEqual((a - day) - a, -day)
self.assertEqual((a - hour) - a, -hour)
self.assertEqual((a - millisec) - a, -millisec)
self.assertEqual(a - (a + week), -week)
self.assertEqual(a - (a + day), -day)
self.assertEqual(a - (a + hour), -hour)
self.assertEqual(a - (a + millisec), -millisec)
self.assertEqual(a - (a - week), week)
self.assertEqual(a - (a - day), day)
self.assertEqual(a - (a - hour), hour)
self.assertEqual(a - (a - millisec), millisec)
self.assertEqual(a + (week + day + hour + millisec),
self.theclass(2002, 3, 10, 18, 6, 0, 1000))
self.assertEqual(a + (week + day + hour + millisec),
(((a + week) + day) + hour) + millisec)
self.assertEqual(a - (week + day + hour + millisec),
self.theclass(2002, 2, 22, 16, 5, 59, 999000))
self.assertEqual(a - (week + day + hour + millisec),
(((a - week) - day) - hour) - millisec)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# delta - datetime is senseless.
self.assertRaises(TypeError, lambda: day - a)
# mixing datetime and (delta or datetime) via * or // is senseless
self.assertRaises(TypeError, lambda: day * a)
self.assertRaises(TypeError, lambda: a * day)
self.assertRaises(TypeError, lambda: day // a)
self.assertRaises(TypeError, lambda: a // day)
self.assertRaises(TypeError, lambda: a * a)
self.assertRaises(TypeError, lambda: a // a)
# datetime + datetime is senseless
self.assertRaises(TypeError, lambda: a + a)
def test_pickling(self):
args = 6, 7, 23, 20, 59, 1, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_more_pickling(self):
a = self.theclass(2003, 2, 7, 16, 48, 37, 444116)
s = pickle.dumps(a)
b = pickle.loads(s)
self.assertEqual(b.year, 2003)
self.assertEqual(b.month, 2)
self.assertEqual(b.day, 7)
def test_pickling_subclass_datetime(self):
args = 6, 7, 23, 20, 59, 1, 64**2
orig = SubclassDatetime(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_more_compare(self):
# The test_compare() inherited from TestDate covers the error cases.
# We just want to test lexicographic ordering on the members datetime
# has that date lacks.
args = [2000, 11, 29, 20, 58, 16, 999998]
t1 = self.theclass(*args)
t2 = self.theclass(*args)
self.failUnless(t1 == t2)
self.failUnless(t1 <= t2)
self.failUnless(t1 >= t2)
self.failUnless(not t1 != t2)
self.failUnless(not t1 < t2)
self.failUnless(not t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for i in range(len(args)):
newargs = args[:]
newargs[i] = args[i] + 1
t2 = self.theclass(*newargs) # this is larger than t1
self.failUnless(t1 < t2)
self.failUnless(t2 > t1)
self.failUnless(t1 <= t2)
self.failUnless(t2 >= t1)
self.failUnless(t1 != t2)
self.failUnless(t2 != t1)
self.failUnless(not t1 == t2)
self.failUnless(not t2 == t1)
self.failUnless(not t1 > t2)
self.failUnless(not t2 < t1)
self.failUnless(not t1 >= t2)
self.failUnless(not t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
# A helper for timestamp constructor tests.
def verify_field_equality(self, expected, got):
self.assertEqual(expected.tm_year, got.year)
self.assertEqual(expected.tm_mon, got.month)
self.assertEqual(expected.tm_mday, got.day)
self.assertEqual(expected.tm_hour, got.hour)
self.assertEqual(expected.tm_min, got.minute)
self.assertEqual(expected.tm_sec, got.second)
def test_fromtimestamp(self):
import time
ts = time.time()
expected = time.localtime(ts)
got = self.theclass.fromtimestamp(ts)
self.verify_field_equality(expected, got)
def test_utcfromtimestamp(self):
import time
ts = time.time()
expected = time.gmtime(ts)
got = self.theclass.utcfromtimestamp(ts)
self.verify_field_equality(expected, got)
def test_microsecond_rounding(self):
# Test whether fromtimestamp "rounds up" floats that are less
# than one microsecond smaller than an integer.
self.assertEquals(self.theclass.fromtimestamp(0.9999999),
self.theclass.fromtimestamp(1))
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.fromtimestamp,
insane)
def test_insane_utcfromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.utcfromtimestamp,
insane)
def test_negative_float_fromtimestamp(self):
# Windows doesn't accept negative timestamps
if os.name == "nt":
return
# The result is tz-dependent; at least test that this doesn't
# fail (like it did before bug 1646728 was fixed).
self.theclass.fromtimestamp(-1.05)
def test_negative_float_utcfromtimestamp(self):
# Windows doesn't accept negative timestamps
if os.name == "nt":
return
d = self.theclass.utcfromtimestamp(-1.05)
self.assertEquals(d, self.theclass(1969, 12, 31, 23, 59, 58, 950000))
def test_utcnow(self):
import time
# Call it a success if utcnow() and utcfromtimestamp() are within
# a second of each other.
tolerance = timedelta(seconds=1)
for dummy in range(3):
from_now = self.theclass.utcnow()
from_timestamp = self.theclass.utcfromtimestamp(time.time())
if abs(from_timestamp - from_now) <= tolerance:
break
# Else try again a few times.
self.failUnless(abs(from_timestamp - from_now) <= tolerance)
def test_strptime(self):
import _strptime
string = '2004-12-01 13:02:47.197'
format = '%Y-%m-%d %H:%M:%S.%f'
result, frac = _strptime._strptime(string, format)
expected = self.theclass(*(result[0:6]+(frac,)))
got = self.theclass.strptime(string, format)
self.assertEqual(expected, got)
def test_more_timetuple(self):
# This tests fields beyond those tested by the TestDate.test_timetuple.
t = self.theclass(2004, 12, 31, 6, 22, 33)
self.assertEqual(t.timetuple(), (2004, 12, 31, 6, 22, 33, 4, 366, -1))
self.assertEqual(t.timetuple(),
(t.year, t.month, t.day,
t.hour, t.minute, t.second,
t.weekday(),
t.toordinal() - date(t.year, 1, 1).toordinal() + 1,
-1))
tt = t.timetuple()
self.assertEqual(tt.tm_year, t.year)
self.assertEqual(tt.tm_mon, t.month)
self.assertEqual(tt.tm_mday, t.day)
self.assertEqual(tt.tm_hour, t.hour)
self.assertEqual(tt.tm_min, t.minute)
self.assertEqual(tt.tm_sec, t.second)
self.assertEqual(tt.tm_wday, t.weekday())
self.assertEqual(tt.tm_yday, t.toordinal() -
date(t.year, 1, 1).toordinal() + 1)
self.assertEqual(tt.tm_isdst, -1)
def test_more_strftime(self):
# This tests fields beyond those tested by the TestDate.test_strftime.
t = self.theclass(2004, 12, 31, 6, 22, 33, 47)
self.assertEqual(t.strftime("%m %d %y %f %S %M %H %j"),
"12 31 04 000047 33 22 06 366")
def test_extract(self):
dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234)
self.assertEqual(dt.date(), date(2002, 3, 4))
self.assertEqual(dt.time(), time(18, 45, 3, 1234))
def test_combine(self):
d = date(2002, 3, 4)
t = time(18, 45, 3, 1234)
expected = self.theclass(2002, 3, 4, 18, 45, 3, 1234)
combine = self.theclass.combine
dt = combine(d, t)
self.assertEqual(dt, expected)
dt = combine(time=t, date=d)
self.assertEqual(dt, expected)
self.assertEqual(d, dt.date())
self.assertEqual(t, dt.time())
self.assertEqual(dt, combine(dt.date(), dt.time()))
self.assertRaises(TypeError, combine) # need an arg
self.assertRaises(TypeError, combine, d) # need two args
self.assertRaises(TypeError, combine, t, d) # args reversed
self.assertRaises(TypeError, combine, d, t, 1) # too many args
self.assertRaises(TypeError, combine, "date", "time") # wrong types
def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4, 5, 6, 7]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4),
("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_astimezone(self):
# Pretty boring! The TZ test is more interesting here. astimezone()
# simply can't be applied to a naive object.
dt = self.theclass.now()
f = FixedOffset(44, "")
self.assertRaises(TypeError, dt.astimezone) # not enough args
self.assertRaises(TypeError, dt.astimezone, f, f) # too many args
self.assertRaises(TypeError, dt.astimezone, dt) # arg wrong type
self.assertRaises(ValueError, dt.astimezone, f) # naive
self.assertRaises(ValueError, dt.astimezone, tz=f) # naive
class Bogus(tzinfo):
def utcoffset(self, dt): return None
def dst(self, dt): return timedelta(0)
bog = Bogus()
self.assertRaises(ValueError, dt.astimezone, bog) # naive
class AlsoBogus(tzinfo):
def utcoffset(self, dt): return timedelta(0)
def dst(self, dt): return None
alsobog = AlsoBogus()
self.assertRaises(ValueError, dt.astimezone, alsobog) # also naive
def test_subclass_datetime(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.year + self.month + self.second
args = 2003, 4, 14, 12, 13, 41
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.toordinal(), dt2.toordinal())
self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month +
dt1.second - 7)
class SubclassTime(time):
sub_var = 1
class TestTime(HarmlessMixedComparison, unittest.TestCase):
theclass = time
def test_basic_attributes(self):
t = self.theclass(12, 0)
self.assertEqual(t.hour, 12)
self.assertEqual(t.minute, 0)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 0)
def test_basic_attributes_nonzero(self):
# Make sure all attributes are non-zero so bugs in
# bit-shifting access show up.
t = self.theclass(12, 59, 59, 8000)
self.assertEqual(t.hour, 12)
self.assertEqual(t.minute, 59)
self.assertEqual(t.second, 59)
self.assertEqual(t.microsecond, 8000)
def test_roundtrip(self):
t = self.theclass(1, 2, 3, 4)
# Verify t -> string -> time identity.
s = repr(t)
self.failUnless(s.startswith('datetime.'))
s = s[9:]
t2 = eval(s)
self.assertEqual(t, t2)
# Verify identity via reconstructing from pieces.
t2 = self.theclass(t.hour, t.minute, t.second,
t.microsecond)
self.assertEqual(t, t2)
def test_comparing(self):
args = [1, 2, 3, 4]
t1 = self.theclass(*args)
t2 = self.theclass(*args)
self.failUnless(t1 == t2)
self.failUnless(t1 <= t2)
self.failUnless(t1 >= t2)
self.failUnless(not t1 != t2)
self.failUnless(not t1 < t2)
self.failUnless(not t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for i in range(len(args)):
newargs = args[:]
newargs[i] = args[i] + 1
t2 = self.theclass(*newargs) # this is larger than t1
self.failUnless(t1 < t2)
self.failUnless(t2 > t1)
self.failUnless(t1 <= t2)
self.failUnless(t2 >= t1)
self.failUnless(t1 != t2)
self.failUnless(t2 != t1)
self.failUnless(not t1 == t2)
self.failUnless(not t2 == t1)
self.failUnless(not t1 > t2)
self.failUnless(not t2 < t1)
self.failUnless(not t1 >= t2)
self.failUnless(not t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 <= badarg)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_bad_constructor_arguments(self):
# bad hours
self.theclass(0, 0) # no exception
self.theclass(23, 0) # no exception
self.assertRaises(ValueError, self.theclass, -1, 0)
self.assertRaises(ValueError, self.theclass, 24, 0)
# bad minutes
self.theclass(23, 0) # no exception
self.theclass(23, 59) # no exception
self.assertRaises(ValueError, self.theclass, 23, -1)
self.assertRaises(ValueError, self.theclass, 23, 60)
# bad seconds
self.theclass(23, 59, 0) # no exception
self.theclass(23, 59, 59) # no exception
self.assertRaises(ValueError, self.theclass, 23, 59, -1)
self.assertRaises(ValueError, self.theclass, 23, 59, 60)
# bad microseconds
self.theclass(23, 59, 59, 0) # no exception
self.theclass(23, 59, 59, 999999) # no exception
self.assertRaises(ValueError, self.theclass, 23, 59, 59, -1)
self.assertRaises(ValueError, self.theclass, 23, 59, 59, 1000000)
def test_hash_equality(self):
d = self.theclass(23, 30, 17)
e = self.theclass(23, 30, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(0, 5, 17)
e = self.theclass(0, 5, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_isoformat(self):
t = self.theclass(4, 5, 1, 123)
self.assertEqual(t.isoformat(), "04:05:01.000123")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass()
self.assertEqual(t.isoformat(), "00:00:00")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=1)
self.assertEqual(t.isoformat(), "00:00:00.000001")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=10)
self.assertEqual(t.isoformat(), "00:00:00.000010")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=100)
self.assertEqual(t.isoformat(), "00:00:00.000100")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=1000)
self.assertEqual(t.isoformat(), "00:00:00.001000")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=10000)
self.assertEqual(t.isoformat(), "00:00:00.010000")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=100000)
self.assertEqual(t.isoformat(), "00:00:00.100000")
self.assertEqual(t.isoformat(), str(t))
def test_1653736(self):
# verify it doesn't accept extra keyword arguments
t = self.theclass(second=1)
self.assertRaises(TypeError, t.isoformat, foo=3)
def test_strftime(self):
t = self.theclass(1, 2, 3, 4)
self.assertEqual(t.strftime('%H %M %S %f'), "01 02 03 000004")
# A naive object replaces %z and %Z with empty strings.
self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''")
def test_format(self):
t = self.theclass(1, 2, 3, 4)
self.assertEqual(t.__format__(''), str(t))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(1, 2, 3, 4)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(1, 2, 3, 4)
self.assertEqual(b.__format__(''), str(t))
for fmt in ['%H %M %S',
]:
self.assertEqual(t.__format__(fmt), t.strftime(fmt))
self.assertEqual(a.__format__(fmt), t.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_str(self):
self.assertEqual(str(self.theclass(1, 2, 3, 4)), "01:02:03.000004")
self.assertEqual(str(self.theclass(10, 2, 3, 4000)), "10:02:03.004000")
self.assertEqual(str(self.theclass(0, 2, 3, 400000)), "00:02:03.400000")
self.assertEqual(str(self.theclass(12, 2, 3, 0)), "12:02:03")
self.assertEqual(str(self.theclass(23, 15, 0, 0)), "23:15:00")
def test_repr(self):
name = 'datetime.' + self.theclass.__name__
self.assertEqual(repr(self.theclass(1, 2, 3, 4)),
"%s(1, 2, 3, 4)" % name)
self.assertEqual(repr(self.theclass(10, 2, 3, 4000)),
"%s(10, 2, 3, 4000)" % name)
self.assertEqual(repr(self.theclass(0, 2, 3, 400000)),
"%s(0, 2, 3, 400000)" % name)
self.assertEqual(repr(self.theclass(12, 2, 3, 0)),
"%s(12, 2, 3)" % name)
self.assertEqual(repr(self.theclass(23, 15, 0, 0)),
"%s(23, 15)" % name)
def test_resolution_info(self):
self.assert_(isinstance(self.theclass.min, self.theclass))
self.assert_(isinstance(self.theclass.max, self.theclass))
self.assert_(isinstance(self.theclass.resolution, timedelta))
self.assert_(self.theclass.max > self.theclass.min)
def test_pickling(self):
args = 20, 59, 16, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_pickling_subclass_time(self):
args = 20, 59, 16, 64**2
orig = SubclassTime(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_bool(self):
cls = self.theclass
self.failUnless(cls(1))
self.failUnless(cls(0, 1))
self.failUnless(cls(0, 0, 1))
self.failUnless(cls(0, 0, 0, 1))
self.failUnless(not cls(0))
self.failUnless(not cls())
def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(1)
self.assertRaises(ValueError, base.replace, hour=24)
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
def test_subclass_time(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.second
args = 4, 5, 6
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.isoformat(), dt2.isoformat())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7)
def test_backdoor_resistance(self):
# see TestDate.test_backdoor_resistance().
base = '2:59.0'
for hour_byte in ' ', '9', chr(24), '\xff':
self.assertRaises(TypeError, self.theclass,
hour_byte + base[1:])
# A mixin for classes with a tzinfo= argument. Subclasses must define
# theclass as a class atribute, and theclass(1, 1, 1, tzinfo=whatever)
# must be legit (which is true for time and datetime).
class TZInfoBase:
def test_argument_passing(self):
cls = self.theclass
# A datetime passes itself on, a time passes None.
class introspective(tzinfo):
def tzname(self, dt): return dt and "real" or "none"
def utcoffset(self, dt):
return timedelta(minutes = dt and 42 or -42)
dst = utcoffset
obj = cls(1, 2, 3, tzinfo=introspective())
expected = cls is time and "none" or "real"
self.assertEqual(obj.tzname(), expected)
expected = timedelta(minutes=(cls is time and -42 or 42))
self.assertEqual(obj.utcoffset(), expected)
self.assertEqual(obj.dst(), expected)
def test_bad_tzinfo_classes(self):
cls = self.theclass
self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=12)
class NiceTry(object):
def __init__(self): pass
def utcoffset(self, dt): pass
self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=NiceTry)
class BetterTry(tzinfo):
def __init__(self): pass
def utcoffset(self, dt): pass
b = BetterTry()
t = cls(1, 1, 1, tzinfo=b)
self.failUnless(t.tzinfo is b)
def test_utc_offset_out_of_bounds(self):
class Edgy(tzinfo):
def __init__(self, offset):
self.offset = timedelta(minutes=offset)
def utcoffset(self, dt):
return self.offset
cls = self.theclass
for offset, legit in ((-1440, False),
(-1439, True),
(1439, True),
(1440, False)):
if cls is time:
t = cls(1, 2, 3, tzinfo=Edgy(offset))
elif cls is datetime:
t = cls(6, 6, 6, 1, 2, 3, tzinfo=Edgy(offset))
else:
assert 0, "impossible"
if legit:
aofs = abs(offset)
h, m = divmod(aofs, 60)
tag = "%c%02d:%02d" % (offset < 0 and '-' or '+', h, m)
if isinstance(t, datetime):
t = t.timetz()
self.assertEqual(str(t), "01:02:03" + tag)
else:
self.assertRaises(ValueError, str, t)
def test_tzinfo_classes(self):
cls = self.theclass
class C1(tzinfo):
def utcoffset(self, dt): return None
def dst(self, dt): return None
def tzname(self, dt): return None
for t in (cls(1, 1, 1),
cls(1, 1, 1, tzinfo=None),
cls(1, 1, 1, tzinfo=C1())):
self.failUnless(t.utcoffset() is None)
self.failUnless(t.dst() is None)
self.failUnless(t.tzname() is None)
class C3(tzinfo):
def utcoffset(self, dt): return timedelta(minutes=-1439)
def dst(self, dt): return timedelta(minutes=1439)
def tzname(self, dt): return "aname"
t = cls(1, 1, 1, tzinfo=C3())
self.assertEqual(t.utcoffset(), timedelta(minutes=-1439))
self.assertEqual(t.dst(), timedelta(minutes=1439))
self.assertEqual(t.tzname(), "aname")
# Wrong types.
class C4(tzinfo):
def utcoffset(self, dt): return "aname"
def dst(self, dt): return 7
def tzname(self, dt): return 0
t = cls(1, 1, 1, tzinfo=C4())
self.assertRaises(TypeError, t.utcoffset)
self.assertRaises(TypeError, t.dst)
self.assertRaises(TypeError, t.tzname)
# Offset out of range.
class C6(tzinfo):
def utcoffset(self, dt): return timedelta(hours=-24)
def dst(self, dt): return timedelta(hours=24)
t = cls(1, 1, 1, tzinfo=C6())
self.assertRaises(ValueError, t.utcoffset)
self.assertRaises(ValueError, t.dst)
# Not a whole number of minutes.
class C7(tzinfo):
def utcoffset(self, dt): return timedelta(seconds=61)
def dst(self, dt): return timedelta(microseconds=-81)
t = cls(1, 1, 1, tzinfo=C7())
self.assertRaises(ValueError, t.utcoffset)
self.assertRaises(ValueError, t.dst)
def test_aware_compare(self):
cls = self.theclass
# Ensure that utcoffset() gets ignored if the comparands have
# the same tzinfo member.
class OperandDependentOffset(tzinfo):
def utcoffset(self, t):
if t.minute < 10:
# d0 and d1 equal after adjustment
return timedelta(minutes=t.minute)
else:
# d2 off in the weeds
return timedelta(minutes=59)
base = cls(8, 9, 10, tzinfo=OperandDependentOffset())
d0 = base.replace(minute=3)
d1 = base.replace(minute=9)
d2 = base.replace(minute=11)
for x in d0, d1, d2:
for y in d0, d1, d2:
got = cmp(x, y)
expected = cmp(x.minute, y.minute)
self.assertEqual(got, expected)
# However, if they're different members, uctoffset is not ignored.
# Note that a time can't actually have an operand-depedent offset,
# though (and time.utcoffset() passes None to tzinfo.utcoffset()),
# so skip this test for time.
if cls is not time:
d0 = base.replace(minute=3, tzinfo=OperandDependentOffset())
d1 = base.replace(minute=9, tzinfo=OperandDependentOffset())
d2 = base.replace(minute=11, tzinfo=OperandDependentOffset())
for x in d0, d1, d2:
for y in d0, d1, d2:
got = cmp(x, y)
if (x is d0 or x is d1) and (y is d0 or y is d1):
expected = 0
elif x is y is d2:
expected = 0
elif x is d2:
expected = -1
else:
assert y is d2
expected = 1
self.assertEqual(got, expected)
# Testing time objects with a non-None tzinfo.
class TestTimeTZ(TestTime, TZInfoBase, unittest.TestCase):
theclass = time
def test_empty(self):
t = self.theclass()
self.assertEqual(t.hour, 0)
self.assertEqual(t.minute, 0)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 0)
self.failUnless(t.tzinfo is None)
def test_zones(self):
est = FixedOffset(-300, "EST", 1)
utc = FixedOffset(0, "UTC", -2)
met = FixedOffset(60, "MET", 3)
t1 = time( 7, 47, tzinfo=est)
t2 = time(12, 47, tzinfo=utc)
t3 = time(13, 47, tzinfo=met)
t4 = time(microsecond=40)
t5 = time(microsecond=40, tzinfo=utc)
self.assertEqual(t1.tzinfo, est)
self.assertEqual(t2.tzinfo, utc)
self.assertEqual(t3.tzinfo, met)
self.failUnless(t4.tzinfo is None)
self.assertEqual(t5.tzinfo, utc)
self.assertEqual(t1.utcoffset(), timedelta(minutes=-300))
self.assertEqual(t2.utcoffset(), timedelta(minutes=0))
self.assertEqual(t3.utcoffset(), timedelta(minutes=60))
self.failUnless(t4.utcoffset() is None)
self.assertRaises(TypeError, t1.utcoffset, "no args")
self.assertEqual(t1.tzname(), "EST")
self.assertEqual(t2.tzname(), "UTC")
self.assertEqual(t3.tzname(), "MET")
self.failUnless(t4.tzname() is None)
self.assertRaises(TypeError, t1.tzname, "no args")
self.assertEqual(t1.dst(), timedelta(minutes=1))
self.assertEqual(t2.dst(), timedelta(minutes=-2))
self.assertEqual(t3.dst(), timedelta(minutes=3))
self.failUnless(t4.dst() is None)
self.assertRaises(TypeError, t1.dst, "no args")
self.assertEqual(hash(t1), hash(t2))
self.assertEqual(hash(t1), hash(t3))
self.assertEqual(hash(t2), hash(t3))
self.assertEqual(t1, t2)
self.assertEqual(t1, t3)
self.assertEqual(t2, t3)
self.assertRaises(TypeError, lambda: t4 == t5) # mixed tz-aware & naive
self.assertRaises(TypeError, lambda: t4 < t5) # mixed tz-aware & naive
self.assertRaises(TypeError, lambda: t5 < t4) # mixed tz-aware & naive
self.assertEqual(str(t1), "07:47:00-05:00")
self.assertEqual(str(t2), "12:47:00+00:00")
self.assertEqual(str(t3), "13:47:00+01:00")
self.assertEqual(str(t4), "00:00:00.000040")
self.assertEqual(str(t5), "00:00:00.000040+00:00")
self.assertEqual(t1.isoformat(), "07:47:00-05:00")
self.assertEqual(t2.isoformat(), "12:47:00+00:00")
self.assertEqual(t3.isoformat(), "13:47:00+01:00")
self.assertEqual(t4.isoformat(), "00:00:00.000040")
self.assertEqual(t5.isoformat(), "00:00:00.000040+00:00")
d = 'datetime.time'
self.assertEqual(repr(t1), d + "(7, 47, tzinfo=est)")
self.assertEqual(repr(t2), d + "(12, 47, tzinfo=utc)")
self.assertEqual(repr(t3), d + "(13, 47, tzinfo=met)")
self.assertEqual(repr(t4), d + "(0, 0, 0, 40)")
self.assertEqual(repr(t5), d + "(0, 0, 0, 40, tzinfo=utc)")
self.assertEqual(t1.strftime("%H:%M:%S %%Z=%Z %%z=%z"),
"07:47:00 %Z=EST %z=-0500")
self.assertEqual(t2.strftime("%H:%M:%S %Z %z"), "12:47:00 UTC +0000")
self.assertEqual(t3.strftime("%H:%M:%S %Z %z"), "13:47:00 MET +0100")
yuck = FixedOffset(-1439, "%z %Z %%z%%Z")
t1 = time(23, 59, tzinfo=yuck)
self.assertEqual(t1.strftime("%H:%M %%Z='%Z' %%z='%z'"),
"23:59 %Z='%z %Z %%z%%Z' %z='-2359'")
# Check that an invalid tzname result raises an exception.
class Badtzname(tzinfo):
def tzname(self, dt): return 42
t = time(2, 3, 4, tzinfo=Badtzname())
self.assertEqual(t.strftime("%H:%M:%S"), "02:03:04")
self.assertRaises(TypeError, t.strftime, "%Z")
def test_hash_edge_cases(self):
# Offsets that overflow a basic time.
t1 = self.theclass(0, 1, 2, 3, tzinfo=FixedOffset(1439, ""))
t2 = self.theclass(0, 0, 2, 3, tzinfo=FixedOffset(1438, ""))
self.assertEqual(hash(t1), hash(t2))
t1 = self.theclass(23, 58, 6, 100, tzinfo=FixedOffset(-1000, ""))
t2 = self.theclass(23, 48, 6, 100, tzinfo=FixedOffset(-1010, ""))
self.assertEqual(hash(t1), hash(t2))
def test_pickling(self):
# Try one without a tzinfo.
args = 20, 59, 16, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
# Try one with a tzinfo.
tinfo = PicklableFixedOffset(-300, 'cookie')
orig = self.theclass(5, 6, 7, tzinfo=tinfo)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.failUnless(isinstance(derived.tzinfo, PicklableFixedOffset))
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
def test_more_bool(self):
# Test cases with non-None tzinfo.
cls = self.theclass
t = cls(0, tzinfo=FixedOffset(-300, ""))
self.failUnless(t)
t = cls(5, tzinfo=FixedOffset(-300, ""))
self.failUnless(t)
t = cls(5, tzinfo=FixedOffset(300, ""))
self.failUnless(not t)
t = cls(23, 59, tzinfo=FixedOffset(23*60 + 59, ""))
self.failUnless(not t)
# Mostly ensuring this doesn't overflow internally.
t = cls(0, tzinfo=FixedOffset(23*60 + 59, ""))
self.failUnless(t)
# But this should yield a value error -- the utcoffset is bogus.
t = cls(0, tzinfo=FixedOffset(24*60, ""))
self.assertRaises(ValueError, lambda: bool(t))
# Likewise.
t = cls(0, tzinfo=FixedOffset(-24*60, ""))
self.assertRaises(ValueError, lambda: bool(t))
def test_replace(self):
cls = self.theclass
z100 = FixedOffset(100, "+100")
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, z100]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8),
("tzinfo", zm200)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.failUnless(base2.tzinfo is None)
self.failUnless(base2.tzname() is None)
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.failUnless(base.tzinfo is base3.tzinfo)
# Out of bounds.
base = cls(1)
self.assertRaises(ValueError, base.replace, hour=24)
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
def test_mixed_compare(self):
t1 = time(1, 2, 3)
t2 = time(1, 2, 3)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=None)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(None, ""))
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(0, ""))
self.assertRaises(TypeError, lambda: t1 == t2)
# In time w/ identical tzinfo objects, utcoffset is ignored.
class Varies(tzinfo):
def __init__(self):
self.offset = timedelta(minutes=22)
def utcoffset(self, t):
self.offset += timedelta(minutes=1)
return self.offset
v = Varies()
t1 = t2.replace(tzinfo=v)
t2 = t2.replace(tzinfo=v)
self.assertEqual(t1.utcoffset(), timedelta(minutes=23))
self.assertEqual(t2.utcoffset(), timedelta(minutes=24))
self.assertEqual(t1, t2)
# But if they're not identical, it isn't ignored.
t2 = t2.replace(tzinfo=Varies())
self.failUnless(t1 < t2) # t1's offset counter still going up
def test_subclass_timetz(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.second
args = 4, 5, 6, 500, FixedOffset(-300, "EST", 1)
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.utcoffset(), dt2.utcoffset())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7)
# Testing datetime objects with a non-None tzinfo.
class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase):
theclass = datetime
def test_trivial(self):
dt = self.theclass(1, 2, 3, 4, 5, 6, 7)
self.assertEqual(dt.year, 1)
self.assertEqual(dt.month, 2)
self.assertEqual(dt.day, 3)
self.assertEqual(dt.hour, 4)
self.assertEqual(dt.minute, 5)
self.assertEqual(dt.second, 6)
self.assertEqual(dt.microsecond, 7)
self.assertEqual(dt.tzinfo, None)
def test_even_more_compare(self):
# The test_compare() and test_more_compare() inherited from TestDate
# and TestDateTime covered non-tzinfo cases.
# Smallest possible after UTC adjustment.
t1 = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, ""))
# Largest possible after UTC adjustment.
t2 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, ""))
# Make sure those compare correctly, and w/o overflow.
self.failUnless(t1 < t2)
self.failUnless(t1 != t2)
self.failUnless(t2 > t1)
self.failUnless(t1 == t1)
self.failUnless(t2 == t2)
# Equal afer adjustment.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""))
t2 = self.theclass(2, 1, 1, 3, 13, tzinfo=FixedOffset(3*60+13+2, ""))
self.assertEqual(t1, t2)
# Change t1 not to subtract a minute, and t1 should be larger.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(0, ""))
self.failUnless(t1 > t2)
# Change t1 to subtract 2 minutes, and t1 should be smaller.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(2, ""))
self.failUnless(t1 < t2)
# Back to the original t1, but make seconds resolve it.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""),
second=1)
self.failUnless(t1 > t2)
# Likewise, but make microseconds resolve it.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""),
microsecond=1)
self.failUnless(t1 > t2)
# Make t2 naive and it should fail.
t2 = self.theclass.min
self.assertRaises(TypeError, lambda: t1 == t2)
self.assertEqual(t2, t2)
# It's also naive if it has tzinfo but tzinfo.utcoffset() is None.
class Naive(tzinfo):
def utcoffset(self, dt): return None
t2 = self.theclass(5, 6, 7, tzinfo=Naive())
self.assertRaises(TypeError, lambda: t1 == t2)
self.assertEqual(t2, t2)
# OTOH, it's OK to compare two of these mixing the two ways of being
# naive.
t1 = self.theclass(5, 6, 7)
self.assertEqual(t1, t2)
# Try a bogus uctoffset.
class Bogus(tzinfo):
def utcoffset(self, dt):
return timedelta(minutes=1440) # out of bounds
t1 = self.theclass(2, 2, 2, tzinfo=Bogus())
t2 = self.theclass(2, 2, 2, tzinfo=FixedOffset(0, ""))
self.assertRaises(ValueError, lambda: t1 == t2)
def test_pickling(self):
# Try one without a tzinfo.
args = 6, 7, 23, 20, 59, 1, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
# Try one with a tzinfo.
tinfo = PicklableFixedOffset(-300, 'cookie')
orig = self.theclass(*args, **{'tzinfo': tinfo})
derived = self.theclass(1, 1, 1, tzinfo=FixedOffset(0, "", 0))
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.failUnless(isinstance(derived.tzinfo,
PicklableFixedOffset))
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
def test_extreme_hashes(self):
# If an attempt is made to hash these via subtracting the offset
# then hashing a datetime object, OverflowError results. The
# Python implementation used to blow up here.
t = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, ""))
hash(t)
t = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, ""))
hash(t)
# OTOH, an OOB offset should blow up.
t = self.theclass(5, 5, 5, tzinfo=FixedOffset(-1440, ""))
self.assertRaises(ValueError, hash, t)
def test_zones(self):
est = FixedOffset(-300, "EST")
utc = FixedOffset(0, "UTC")
met = FixedOffset(60, "MET")
t1 = datetime(2002, 3, 19, 7, 47, tzinfo=est)
t2 = datetime(2002, 3, 19, 12, 47, tzinfo=utc)
t3 = datetime(2002, 3, 19, 13, 47, tzinfo=met)
self.assertEqual(t1.tzinfo, est)
self.assertEqual(t2.tzinfo, utc)
self.assertEqual(t3.tzinfo, met)
self.assertEqual(t1.utcoffset(), timedelta(minutes=-300))
self.assertEqual(t2.utcoffset(), timedelta(minutes=0))
self.assertEqual(t3.utcoffset(), timedelta(minutes=60))
self.assertEqual(t1.tzname(), "EST")
self.assertEqual(t2.tzname(), "UTC")
self.assertEqual(t3.tzname(), "MET")
self.assertEqual(hash(t1), hash(t2))
self.assertEqual(hash(t1), hash(t3))
self.assertEqual(hash(t2), hash(t3))
self.assertEqual(t1, t2)
self.assertEqual(t1, t3)
self.assertEqual(t2, t3)
self.assertEqual(str(t1), "2002-03-19 07:47:00-05:00")
self.assertEqual(str(t2), "2002-03-19 12:47:00+00:00")
self.assertEqual(str(t3), "2002-03-19 13:47:00+01:00")
d = 'datetime.datetime(2002, 3, 19, '
self.assertEqual(repr(t1), d + "7, 47, tzinfo=est)")
self.assertEqual(repr(t2), d + "12, 47, tzinfo=utc)")
self.assertEqual(repr(t3), d + "13, 47, tzinfo=met)")
def test_combine(self):
met = FixedOffset(60, "MET")
d = date(2002, 3, 4)
tz = time(18, 45, 3, 1234, tzinfo=met)
dt = datetime.combine(d, tz)
self.assertEqual(dt, datetime(2002, 3, 4, 18, 45, 3, 1234,
tzinfo=met))
def test_extract(self):
met = FixedOffset(60, "MET")
dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234, tzinfo=met)
self.assertEqual(dt.date(), date(2002, 3, 4))
self.assertEqual(dt.time(), time(18, 45, 3, 1234))
self.assertEqual(dt.timetz(), time(18, 45, 3, 1234, tzinfo=met))
def test_tz_aware_arithmetic(self):
import random
now = self.theclass.now()
tz55 = FixedOffset(-330, "west 5:30")
timeaware = now.time().replace(tzinfo=tz55)
nowaware = self.theclass.combine(now.date(), timeaware)
self.failUnless(nowaware.tzinfo is tz55)
self.assertEqual(nowaware.timetz(), timeaware)
# Can't mix aware and non-aware.
self.assertRaises(TypeError, lambda: now - nowaware)
self.assertRaises(TypeError, lambda: nowaware - now)
# And adding datetime's doesn't make sense, aware or not.
self.assertRaises(TypeError, lambda: now + nowaware)
self.assertRaises(TypeError, lambda: nowaware + now)
self.assertRaises(TypeError, lambda: nowaware + nowaware)
# Subtracting should yield 0.
self.assertEqual(now - now, timedelta(0))
self.assertEqual(nowaware - nowaware, timedelta(0))
# Adding a delta should preserve tzinfo.
delta = timedelta(weeks=1, minutes=12, microseconds=5678)
nowawareplus = nowaware + delta
self.failUnless(nowaware.tzinfo is tz55)
nowawareplus2 = delta + nowaware
self.failUnless(nowawareplus2.tzinfo is tz55)
self.assertEqual(nowawareplus, nowawareplus2)
# that - delta should be what we started with, and that - what we
# started with should be delta.
diff = nowawareplus - delta
self.failUnless(diff.tzinfo is tz55)
self.assertEqual(nowaware, diff)
self.assertRaises(TypeError, lambda: delta - nowawareplus)
self.assertEqual(nowawareplus - nowaware, delta)
# Make up a random timezone.
tzr = FixedOffset(random.randrange(-1439, 1440), "randomtimezone")
# Attach it to nowawareplus.
nowawareplus = nowawareplus.replace(tzinfo=tzr)
self.failUnless(nowawareplus.tzinfo is tzr)
# Make sure the difference takes the timezone adjustments into account.
got = nowaware - nowawareplus
# Expected: (nowaware base - nowaware offset) -
# (nowawareplus base - nowawareplus offset) =
# (nowaware base - nowawareplus base) +
# (nowawareplus offset - nowaware offset) =
# -delta + nowawareplus offset - nowaware offset
expected = nowawareplus.utcoffset() - nowaware.utcoffset() - delta
self.assertEqual(got, expected)
# Try max possible difference.
min = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, "min"))
max = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, "max"))
maxdiff = max - min
self.assertEqual(maxdiff, self.theclass.max - self.theclass.min +
timedelta(minutes=2*1439))
def test_tzinfo_now(self):
meth = self.theclass.now
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth()
# Try with and without naming the keyword.
off42 = FixedOffset(42, "42")
another = meth(off42)
again = meth(tz=off42)
self.failUnless(another.tzinfo is again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, 16)
self.assertRaises(TypeError, meth, tzinfo=16)
# Bad keyword name.
self.assertRaises(TypeError, meth, tinfo=off42)
# Too many args.
self.assertRaises(TypeError, meth, off42, off42)
# We don't know which time zone we're in, and don't have a tzinfo
# class to represent it, so seeing whether a tz argument actually
# does a conversion is tricky.
weirdtz = FixedOffset(timedelta(hours=15, minutes=58), "weirdtz", 0)
utc = FixedOffset(0, "utc", 0)
for dummy in range(3):
now = datetime.now(weirdtz)
self.failUnless(now.tzinfo is weirdtz)
utcnow = datetime.utcnow().replace(tzinfo=utc)
now2 = utcnow.astimezone(weirdtz)
if abs(now - now2) < timedelta(seconds=30):
break
# Else the code is broken, or more than 30 seconds passed between
# calls; assuming the latter, just try again.
else:
# Three strikes and we're out.
self.fail("utcnow(), now(tz), or astimezone() may be broken")
def test_tzinfo_fromtimestamp(self):
import time
meth = self.theclass.fromtimestamp
ts = time.time()
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth(ts)
# Try with and without naming the keyword.
off42 = FixedOffset(42, "42")
another = meth(ts, off42)
again = meth(ts, tz=off42)
self.failUnless(another.tzinfo is again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, ts, 16)
self.assertRaises(TypeError, meth, ts, tzinfo=16)
# Bad keyword name.
self.assertRaises(TypeError, meth, ts, tinfo=off42)
# Too many args.
self.assertRaises(TypeError, meth, ts, off42, off42)
# Too few args.
self.assertRaises(TypeError, meth)
# Try to make sure tz= actually does some conversion.
timestamp = 1000000000
utcdatetime = datetime.utcfromtimestamp(timestamp)
# In POSIX (epoch 1970), that's 2001-09-09 01:46:40 UTC, give or take.
# But on some flavor of Mac, it's nowhere near that. So we can't have
# any idea here what time that actually is, we can only test that
# relative changes match.
utcoffset = timedelta(hours=-15, minutes=39) # arbitrary, but not zero
tz = FixedOffset(utcoffset, "tz", 0)
expected = utcdatetime + utcoffset
got = datetime.fromtimestamp(timestamp, tz)
self.assertEqual(expected, got.replace(tzinfo=None))
def test_tzinfo_utcnow(self):
meth = self.theclass.utcnow
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth()
# Try with and without naming the keyword; for whatever reason,
# utcnow() doesn't accept a tzinfo argument.
off42 = FixedOffset(42, "42")
self.assertRaises(TypeError, meth, off42)
self.assertRaises(TypeError, meth, tzinfo=off42)
def test_tzinfo_utcfromtimestamp(self):
import time
meth = self.theclass.utcfromtimestamp
ts = time.time()
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth(ts)
# Try with and without naming the keyword; for whatever reason,
# utcfromtimestamp() doesn't accept a tzinfo argument.
off42 = FixedOffset(42, "42")
self.assertRaises(TypeError, meth, ts, off42)
self.assertRaises(TypeError, meth, ts, tzinfo=off42)
def test_tzinfo_timetuple(self):
# TestDateTime tested most of this. datetime adds a twist to the
# DST flag.
class DST(tzinfo):
def __init__(self, dstvalue):
if isinstance(dstvalue, int):
dstvalue = timedelta(minutes=dstvalue)
self.dstvalue = dstvalue
def dst(self, dt):
return self.dstvalue
cls = self.theclass
for dstvalue, flag in (-33, 1), (33, 1), (0, 0), (None, -1):
d = cls(1, 1, 1, 10, 20, 30, 40, tzinfo=DST(dstvalue))
t = d.timetuple()
self.assertEqual(1, t.tm_year)
self.assertEqual(1, t.tm_mon)
self.assertEqual(1, t.tm_mday)
self.assertEqual(10, t.tm_hour)
self.assertEqual(20, t.tm_min)
self.assertEqual(30, t.tm_sec)
self.assertEqual(0, t.tm_wday)
self.assertEqual(1, t.tm_yday)
self.assertEqual(flag, t.tm_isdst)
# dst() returns wrong type.
self.assertRaises(TypeError, cls(1, 1, 1, tzinfo=DST("x")).timetuple)
# dst() at the edge.
self.assertEqual(cls(1,1,1, tzinfo=DST(1439)).timetuple().tm_isdst, 1)
self.assertEqual(cls(1,1,1, tzinfo=DST(-1439)).timetuple().tm_isdst, 1)
# dst() out of range.
self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(1440)).timetuple)
self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(-1440)).timetuple)
def test_utctimetuple(self):
class DST(tzinfo):
def __init__(self, dstvalue):
if isinstance(dstvalue, int):
dstvalue = timedelta(minutes=dstvalue)
self.dstvalue = dstvalue
def dst(self, dt):
return self.dstvalue
cls = self.theclass
# This can't work: DST didn't implement utcoffset.
self.assertRaises(NotImplementedError,
cls(1, 1, 1, tzinfo=DST(0)).utcoffset)
class UOFS(DST):
def __init__(self, uofs, dofs=None):
DST.__init__(self, dofs)
self.uofs = timedelta(minutes=uofs)
def utcoffset(self, dt):
return self.uofs
# Ensure tm_isdst is 0 regardless of what dst() says: DST is never
# in effect for a UTC time.
for dstvalue in -33, 33, 0, None:
d = cls(1, 2, 3, 10, 20, 30, 40, tzinfo=UOFS(-53, dstvalue))
t = d.utctimetuple()
self.assertEqual(d.year, t.tm_year)
self.assertEqual(d.month, t.tm_mon)
self.assertEqual(d.day, t.tm_mday)
self.assertEqual(11, t.tm_hour) # 20mm + 53mm = 1hn + 13mm
self.assertEqual(13, t.tm_min)
self.assertEqual(d.second, t.tm_sec)
self.assertEqual(d.weekday(), t.tm_wday)
self.assertEqual(d.toordinal() - date(1, 1, 1).toordinal() + 1,
t.tm_yday)
self.assertEqual(0, t.tm_isdst)
# At the edges, UTC adjustment can normalize into years out-of-range
# for a datetime object. Ensure that a correct timetuple is
# created anyway.
tiny = cls(MINYEAR, 1, 1, 0, 0, 37, tzinfo=UOFS(1439))
# That goes back 1 minute less than a full day.
t = tiny.utctimetuple()
self.assertEqual(t.tm_year, MINYEAR-1)
self.assertEqual(t.tm_mon, 12)
self.assertEqual(t.tm_mday, 31)
self.assertEqual(t.tm_hour, 0)
self.assertEqual(t.tm_min, 1)
self.assertEqual(t.tm_sec, 37)
self.assertEqual(t.tm_yday, 366) # "year 0" is a leap year
self.assertEqual(t.tm_isdst, 0)
huge = cls(MAXYEAR, 12, 31, 23, 59, 37, 999999, tzinfo=UOFS(-1439))
# That goes forward 1 minute less than a full day.
t = huge.utctimetuple()
self.assertEqual(t.tm_year, MAXYEAR+1)
self.assertEqual(t.tm_mon, 1)
self.assertEqual(t.tm_mday, 1)
self.assertEqual(t.tm_hour, 23)
self.assertEqual(t.tm_min, 58)
self.assertEqual(t.tm_sec, 37)
self.assertEqual(t.tm_yday, 1)
self.assertEqual(t.tm_isdst, 0)
def test_tzinfo_isoformat(self):
zero = FixedOffset(0, "+00:00")
plus = FixedOffset(220, "+03:40")
minus = FixedOffset(-231, "-03:51")
unknown = FixedOffset(None, "")
cls = self.theclass
datestr = '0001-02-03'
for ofs in None, zero, plus, minus, unknown:
for us in 0, 987001:
d = cls(1, 2, 3, 4, 5, 59, us, tzinfo=ofs)
timestr = '04:05:59' + (us and '.987001' or '')
ofsstr = ofs is not None and d.tzname() or ''
tailstr = timestr + ofsstr
iso = d.isoformat()
self.assertEqual(iso, datestr + 'T' + tailstr)
self.assertEqual(iso, d.isoformat('T'))
self.assertEqual(d.isoformat('k'), datestr + 'k' + tailstr)
self.assertEqual(str(d), datestr + ' ' + tailstr)
def test_replace(self):
cls = self.theclass
z100 = FixedOffset(100, "+100")
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, 5, 6, 7, z100]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4),
("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8),
("tzinfo", zm200)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.failUnless(base2.tzinfo is None)
self.failUnless(base2.tzname() is None)
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.failUnless(base.tzinfo is base3.tzinfo)
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_more_astimezone(self):
# The inherited test_astimezone covered some trivial and error cases.
fnone = FixedOffset(None, "None")
f44m = FixedOffset(44, "44")
fm5h = FixedOffset(-timedelta(hours=5), "m300")
dt = self.theclass.now(tz=f44m)
self.failUnless(dt.tzinfo is f44m)
# Replacing with degenerate tzinfo raises an exception.
self.assertRaises(ValueError, dt.astimezone, fnone)
# Ditto with None tz.
self.assertRaises(TypeError, dt.astimezone, None)
# Replacing with same tzinfo makes no change.
x = dt.astimezone(dt.tzinfo)
self.failUnless(x.tzinfo is f44m)
self.assertEqual(x.date(), dt.date())
self.assertEqual(x.time(), dt.time())
# Replacing with different tzinfo does adjust.
got = dt.astimezone(fm5h)
self.failUnless(got.tzinfo is fm5h)
self.assertEqual(got.utcoffset(), timedelta(hours=-5))
expected = dt - dt.utcoffset() # in effect, convert to UTC
expected += fm5h.utcoffset(dt) # and from there to local time
expected = expected.replace(tzinfo=fm5h) # and attach new tzinfo
self.assertEqual(got.date(), expected.date())
self.assertEqual(got.time(), expected.time())
self.assertEqual(got.timetz(), expected.timetz())
self.failUnless(got.tzinfo is expected.tzinfo)
self.assertEqual(got, expected)
def test_aware_subtract(self):
cls = self.theclass
# Ensure that utcoffset() is ignored when the operands have the
# same tzinfo member.
class OperandDependentOffset(tzinfo):
def utcoffset(self, t):
if t.minute < 10:
# d0 and d1 equal after adjustment
return timedelta(minutes=t.minute)
else:
# d2 off in the weeds
return timedelta(minutes=59)
base = cls(8, 9, 10, 11, 12, 13, 14, tzinfo=OperandDependentOffset())
d0 = base.replace(minute=3)
d1 = base.replace(minute=9)
d2 = base.replace(minute=11)
for x in d0, d1, d2:
for y in d0, d1, d2:
got = x - y
expected = timedelta(minutes=x.minute - y.minute)
self.assertEqual(got, expected)
# OTOH, if the tzinfo members are distinct, utcoffsets aren't
# ignored.
base = cls(8, 9, 10, 11, 12, 13, 14)
d0 = base.replace(minute=3, tzinfo=OperandDependentOffset())
d1 = base.replace(minute=9, tzinfo=OperandDependentOffset())
d2 = base.replace(minute=11, tzinfo=OperandDependentOffset())
for x in d0, d1, d2:
for y in d0, d1, d2:
got = x - y
if (x is d0 or x is d1) and (y is d0 or y is d1):
expected = timedelta(0)
elif x is y is d2:
expected = timedelta(0)
elif x is d2:
expected = timedelta(minutes=(11-59)-0)
else:
assert y is d2
expected = timedelta(minutes=0-(11-59))
self.assertEqual(got, expected)
def test_mixed_compare(self):
t1 = datetime(1, 2, 3, 4, 5, 6, 7)
t2 = datetime(1, 2, 3, 4, 5, 6, 7)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=None)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(None, ""))
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(0, ""))
self.assertRaises(TypeError, lambda: t1 == t2)
# In datetime w/ identical tzinfo objects, utcoffset is ignored.
class Varies(tzinfo):
def __init__(self):
self.offset = timedelta(minutes=22)
def utcoffset(self, t):
self.offset += timedelta(minutes=1)
return self.offset
v = Varies()
t1 = t2.replace(tzinfo=v)
t2 = t2.replace(tzinfo=v)
self.assertEqual(t1.utcoffset(), timedelta(minutes=23))
self.assertEqual(t2.utcoffset(), timedelta(minutes=24))
self.assertEqual(t1, t2)
# But if they're not identical, it isn't ignored.
t2 = t2.replace(tzinfo=Varies())
self.failUnless(t1 < t2) # t1's offset counter still going up
def test_subclass_datetimetz(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.year
args = 2002, 12, 31, 4, 5, 6, 500, FixedOffset(-300, "EST", 1)
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.utcoffset(), dt2.utcoffset())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.year - 7)
# Pain to set up DST-aware tzinfo classes.
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
DAY = timedelta(days=1)
# In the US, DST starts at 2am (standard time) on the first Sunday in April.
DSTSTART = datetime(1, 4, 1, 2)
# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct,
# which is the first Sunday on or after Oct 25. Because we view 1:MM as
# being standard time on that day, there is no spelling in local time of
# the last hour of DST (that's 1:MM DST, but 1:MM is taken as standard time).
DSTEND = datetime(1, 10, 25, 1)
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception instead may be sensible here, in one or more of
# the cases.
return ZERO
assert dt.tzinfo is self
# Find first Sunday in April.
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
assert start.weekday() == 6 and start.month == 4 and start.day <= 7
# Find last Sunday in October.
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
assert end.weekday() == 6 and end.month == 10 and end.day >= 25
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
utc_real = FixedOffset(0, "UTC", 0)
# For better test coverage, we want another flavor of UTC that's west of
# the Eastern and Pacific timezones.
utc_fake = FixedOffset(-12*60, "UTCfake", 0)
class TestTimezoneConversions(unittest.TestCase):
# The DST switch times for 2002, in std time.
dston = datetime(2002, 4, 7, 2)
dstoff = datetime(2002, 10, 27, 1)
theclass = datetime
# Check a time that's inside DST.
def checkinside(self, dt, tz, utc, dston, dstoff):
self.assertEqual(dt.dst(), HOUR)
# Conversion to our own timezone is always an identity.
self.assertEqual(dt.astimezone(tz), dt)
asutc = dt.astimezone(utc)
there_and_back = asutc.astimezone(tz)
# Conversion to UTC and back isn't always an identity here,
# because there are redundant spellings (in local time) of
# UTC time when DST begins: the clock jumps from 1:59:59
# to 3:00:00, and a local time of 2:MM:SS doesn't really
# make sense then. The classes above treat 2:MM:SS as
# daylight time then (it's "after 2am"), really an alias
# for 1:MM:SS standard time. The latter form is what
# conversion back from UTC produces.
if dt.date() == dston.date() and dt.hour == 2:
# We're in the redundant hour, and coming back from
# UTC gives the 1:MM:SS standard-time spelling.
self.assertEqual(there_and_back + HOUR, dt)
# Although during was considered to be in daylight
# time, there_and_back is not.
self.assertEqual(there_and_back.dst(), ZERO)
# They're the same times in UTC.
self.assertEqual(there_and_back.astimezone(utc),
dt.astimezone(utc))
else:
# We're not in the redundant hour.
self.assertEqual(dt, there_and_back)
# Because we have a redundant spelling when DST begins, there is
# (unforunately) an hour when DST ends that can't be spelled at all in
# local time. When DST ends, the clock jumps from 1:59 back to 1:00
# again. The hour 1:MM DST has no spelling then: 1:MM is taken to be
# standard time. 1:MM DST == 0:MM EST, but 0:MM is taken to be
# daylight time. The hour 1:MM daylight == 0:MM standard can't be
# expressed in local time. Nevertheless, we want conversion back
# from UTC to mimic the local clock's "repeat an hour" behavior.
nexthour_utc = asutc + HOUR
nexthour_tz = nexthour_utc.astimezone(tz)
if dt.date() == dstoff.date() and dt.hour == 0:
# We're in the hour before the last DST hour. The last DST hour
# is ineffable. We want the conversion back to repeat 1:MM.
self.assertEqual(nexthour_tz, dt.replace(hour=1))
nexthour_utc += HOUR
nexthour_tz = nexthour_utc.astimezone(tz)
self.assertEqual(nexthour_tz, dt.replace(hour=1))
else:
self.assertEqual(nexthour_tz - dt, HOUR)
# Check a time that's outside DST.
def checkoutside(self, dt, tz, utc):
self.assertEqual(dt.dst(), ZERO)
# Conversion to our own timezone is always an identity.
self.assertEqual(dt.astimezone(tz), dt)
# Converting to UTC and back is an identity too.
asutc = dt.astimezone(utc)
there_and_back = asutc.astimezone(tz)
self.assertEqual(dt, there_and_back)
def convert_between_tz_and_utc(self, tz, utc):
dston = self.dston.replace(tzinfo=tz)
# Because 1:MM on the day DST ends is taken as being standard time,
# there is no spelling in tz for the last hour of daylight time.
# For purposes of the test, the last hour of DST is 0:MM, which is
# taken as being daylight time (and 1:MM is taken as being standard
# time).
dstoff = self.dstoff.replace(tzinfo=tz)
for delta in (timedelta(weeks=13),
DAY,
HOUR,
timedelta(minutes=1),
timedelta(microseconds=1)):
self.checkinside(dston, tz, utc, dston, dstoff)
for during in dston + delta, dstoff - delta:
self.checkinside(during, tz, utc, dston, dstoff)
self.checkoutside(dstoff, tz, utc)
for outside in dston - delta, dstoff + delta:
self.checkoutside(outside, tz, utc)
def test_easy(self):
# Despite the name of this test, the endcases are excruciating.
self.convert_between_tz_and_utc(Eastern, utc_real)
self.convert_between_tz_and_utc(Pacific, utc_real)
self.convert_between_tz_and_utc(Eastern, utc_fake)
self.convert_between_tz_and_utc(Pacific, utc_fake)
# The next is really dancing near the edge. It works because
# Pacific and Eastern are far enough apart that their "problem
# hours" don't overlap.
self.convert_between_tz_and_utc(Eastern, Pacific)
self.convert_between_tz_and_utc(Pacific, Eastern)
# OTOH, these fail! Don't enable them. The difficulty is that
# the edge case tests assume that every hour is representable in
# the "utc" class. This is always true for a fixed-offset tzinfo
# class (lke utc_real and utc_fake), but not for Eastern or Central.
# For these adjacent DST-aware time zones, the range of time offsets
# tested ends up creating hours in the one that aren't representable
# in the other. For the same reason, we would see failures in the
# Eastern vs Pacific tests too if we added 3*HOUR to the list of
# offset deltas in convert_between_tz_and_utc().
#
# self.convert_between_tz_and_utc(Eastern, Central) # can't work
# self.convert_between_tz_and_utc(Central, Eastern) # can't work
def test_tricky(self):
# 22:00 on day before daylight starts.
fourback = self.dston - timedelta(hours=4)
ninewest = FixedOffset(-9*60, "-0900", 0)
fourback = fourback.replace(tzinfo=ninewest)
# 22:00-0900 is 7:00 UTC == 2:00 EST == 3:00 DST. Since it's "after
# 2", we should get the 3 spelling.
# If we plug 22:00 the day before into Eastern, it "looks like std
# time", so its offset is returned as -5, and -5 - -9 = 4. Adding 4
# to 22:00 lands on 2:00, which makes no sense in local time (the
# local clock jumps from 1 to 3). The point here is to make sure we
# get the 3 spelling.
expected = self.dston.replace(hour=3)
got = fourback.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Similar, but map to 6:00 UTC == 1:00 EST == 2:00 DST. In that
# case we want the 1:00 spelling.
sixutc = self.dston.replace(hour=6, tzinfo=utc_real)
# Now 6:00 "looks like daylight", so the offset wrt Eastern is -4,
# and adding -4-0 == -4 gives the 2:00 spelling. We want the 1:00 EST
# spelling.
expected = self.dston.replace(hour=1)
got = sixutc.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Now on the day DST ends, we want "repeat an hour" behavior.
# UTC 4:MM 5:MM 6:MM 7:MM checking these
# EST 23:MM 0:MM 1:MM 2:MM
# EDT 0:MM 1:MM 2:MM 3:MM
# wall 0:MM 1:MM 1:MM 2:MM against these
for utc in utc_real, utc_fake:
for tz in Eastern, Pacific:
first_std_hour = self.dstoff - timedelta(hours=2) # 23:MM
# Convert that to UTC.
first_std_hour -= tz.utcoffset(None)
# Adjust for possibly fake UTC.
asutc = first_std_hour + utc.utcoffset(None)
# First UTC hour to convert; this is 4:00 when utc=utc_real &
# tz=Eastern.
asutcbase = asutc.replace(tzinfo=utc)
for tzhour in (0, 1, 1, 2):
expectedbase = self.dstoff.replace(hour=tzhour)
for minute in 0, 30, 59:
expected = expectedbase.replace(minute=minute)
asutc = asutcbase.replace(minute=minute)
astz = asutc.astimezone(tz)
self.assertEqual(astz.replace(tzinfo=None), expected)
asutcbase += HOUR
def test_bogus_dst(self):
class ok(tzinfo):
def utcoffset(self, dt): return HOUR
def dst(self, dt): return HOUR
now = self.theclass.now().replace(tzinfo=utc_real)
# Doesn't blow up.
now.astimezone(ok())
# Does blow up.
class notok(ok):
def dst(self, dt): return None
self.assertRaises(ValueError, now.astimezone, notok())
def test_fromutc(self):
self.assertRaises(TypeError, Eastern.fromutc) # not enough args
now = datetime.utcnow().replace(tzinfo=utc_real)
self.assertRaises(ValueError, Eastern.fromutc, now) # wrong tzinfo
now = now.replace(tzinfo=Eastern) # insert correct tzinfo
enow = Eastern.fromutc(now) # doesn't blow up
self.assertEqual(enow.tzinfo, Eastern) # has right tzinfo member
self.assertRaises(TypeError, Eastern.fromutc, now, now) # too many args
self.assertRaises(TypeError, Eastern.fromutc, date.today()) # wrong type
# Always converts UTC to standard time.
class FauxUSTimeZone(USTimeZone):
def fromutc(self, dt):
return dt + self.stdoffset
FEastern = FauxUSTimeZone(-5, "FEastern", "FEST", "FEDT")
# UTC 4:MM 5:MM 6:MM 7:MM 8:MM 9:MM
# EST 23:MM 0:MM 1:MM 2:MM 3:MM 4:MM
# EDT 0:MM 1:MM 2:MM 3:MM 4:MM 5:MM
# Check around DST start.
start = self.dston.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 23, 0, 1, 3, 4, 5:
expected = start.replace(hour=wall)
if wall == 23:
expected -= timedelta(days=1)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
# Check around DST end.
start = self.dstoff.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 0, 1, 1, 2, 3, 4:
expected = start.replace(hour=wall)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
#############################################################################
# oddballs
class Oddballs(unittest.TestCase):
def test_bug_1028306(self):
# Trying to compare a date to a datetime should act like a mixed-
# type comparison, despite that datetime is a subclass of date.
as_date = date.today()
as_datetime = datetime.combine(as_date, time())
self.assert_(as_date != as_datetime)
self.assert_(as_datetime != as_date)
self.assert_(not as_date == as_datetime)
self.assert_(not as_datetime == as_date)
self.assertRaises(TypeError, lambda: as_date < as_datetime)
self.assertRaises(TypeError, lambda: as_datetime < as_date)
self.assertRaises(TypeError, lambda: as_date <= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime <= as_date)
self.assertRaises(TypeError, lambda: as_date > as_datetime)
self.assertRaises(TypeError, lambda: as_datetime > as_date)
self.assertRaises(TypeError, lambda: as_date >= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime >= as_date)
# Neverthelss, comparison should work with the base-class (date)
# projection if use of a date method is forced.
self.assert_(as_date.__eq__(as_datetime))
different_day = (as_date.day + 1) % 20 + 1
self.assert_(not as_date.__eq__(as_datetime.replace(day=
different_day)))
# And date should compare with other subclasses of date. If a
# subclass wants to stop this, it's up to the subclass to do so.
date_sc = SubclassDate(as_date.year, as_date.month, as_date.day)
self.assertEqual(as_date, date_sc)
self.assertEqual(date_sc, as_date)
# Ditto for datetimes.
datetime_sc = SubclassDatetime(as_datetime.year, as_datetime.month,
as_date.day, 0, 0, 0)
self.assertEqual(as_datetime, datetime_sc)
self.assertEqual(datetime_sc, as_datetime)
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| apache-2.0 |
sexroute/commandergenius | project/jni/python/src/Lib/encodings/iso2022_kr.py | 816 | 1053 | #
# iso2022_kr.py: Python Unicode Codec for ISO2022_KR
#
# Written by Hye-Shik Chang <[email protected]>
#
import _codecs_iso2022, codecs
import _multibytecodec as mbc
codec = _codecs_iso2022.getcodec('iso2022_kr')
class Codec(codecs.Codec):
encode = codec.encode
decode = codec.decode
class IncrementalEncoder(mbc.MultibyteIncrementalEncoder,
codecs.IncrementalEncoder):
codec = codec
class IncrementalDecoder(mbc.MultibyteIncrementalDecoder,
codecs.IncrementalDecoder):
codec = codec
class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader):
codec = codec
class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter):
codec = codec
def getregentry():
return codecs.CodecInfo(
name='iso2022_kr',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| lgpl-2.1 |
GustavoHennig/ansible | lib/ansible/modules/cloud/misc/xenserver_facts.py | 69 | 5402 | #!/usr/bin/python -tt
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: xenserver_facts
version_added: "2.0"
short_description: get facts reported on xenserver
description:
- Reads data out of XenAPI, can be used instead of multiple xe commands.
author:
- Andy Hill (@andyhky)
- Tim Rupp
options: {}
'''
EXAMPLES = '''
- name: Gather facts from xenserver
xenserver:
- name: Print running VMs
debug:
msg: "{{ item }}"
with_items: "{{ xs_vms.keys() }}"
when: xs_vms[item]['power_state'] == "Running"
# Which will print:
#
# TASK: [Print running VMs] ***********************************************************
# skipping: [10.13.0.22] => (item=CentOS 4.7 (32-bit))
# ok: [10.13.0.22] => (item=Control domain on host: 10.0.13.22) => {
# "item": "Control domain on host: 10.0.13.22",
# "msg": "Control domain on host: 10.0.13.22"
# }
'''
import platform
HAVE_XENAPI = False
try:
import XenAPI
HAVE_XENAPI = True
except ImportError:
pass
class XenServerFacts:
def __init__(self):
self.codes = {
'5.5.0': 'george',
'5.6.100': 'oxford',
'6.0.0': 'boston',
'6.1.0': 'tampa',
'6.2.0': 'clearwater'
}
@property
def version(self):
# Be aware! Deprecated in Python 2.6!
result = platform.dist()[1]
return result
@property
def codename(self):
if self.version in self.codes:
result = self.codes[self.version]
else:
result = None
return result
def get_xenapi_session():
session = XenAPI.xapi_local()
session.xenapi.login_with_password('', '')
return session
def get_networks(session):
recs = session.xenapi.network.get_all_records()
xs_networks = {}
networks = change_keys(recs, key='uuid')
for network in networks.values():
xs_networks[network['name_label']] = network
return xs_networks
def get_pifs(session):
recs = session.xenapi.PIF.get_all_records()
pifs = change_keys(recs, key='uuid')
xs_pifs = {}
devicenums = range(0, 7)
for pif in pifs.values():
for eth in devicenums:
interface_name = "eth%s" % (eth)
bond_name = interface_name.replace('eth', 'bond')
if pif['device'] == interface_name:
xs_pifs[interface_name] = pif
elif pif['device'] == bond_name:
xs_pifs[bond_name] = pif
return xs_pifs
def get_vlans(session):
recs = session.xenapi.VLAN.get_all_records()
return change_keys(recs, key='tag')
def change_keys(recs, key='uuid', filter_func=None):
"""
Take a xapi dict, and make the keys the value of recs[ref][key].
Preserves the ref in rec['ref']
"""
new_recs = {}
for ref, rec in recs.items():
if filter_func is not None and not filter_func(rec):
continue
new_recs[rec[key]] = rec
new_recs[rec[key]]['ref'] = ref
return new_recs
def get_host(session):
"""Get the host"""
host_recs = session.xenapi.host.get_all()
# We only have one host, so just return its entry
return session.xenapi.host.get_record(host_recs[0])
def get_vms(session):
xs_vms = {}
recs = session.xenapi.VM.get_all()
if not recs:
return None
vms = change_keys(recs, key='uuid')
for vm in vms.values():
xs_vms[vm['name_label']] = vm
return xs_vms
def get_srs(session):
xs_srs = {}
recs = session.xenapi.SR.get_all()
if not recs:
return None
srs = change_keys(recs, key='uuid')
for sr in srs.values():
xs_srs[sr['name_label']] = sr
return xs_srs
def main():
module = AnsibleModule({})
if not HAVE_XENAPI:
module.fail_json(changed=False, msg="python xen api required for this module")
obj = XenServerFacts()
try:
session = get_xenapi_session()
except XenAPI.Failure as e:
module.fail_json(msg='%s' % e)
data = {
'xenserver_version': obj.version,
'xenserver_codename': obj.codename
}
xs_networks = get_networks(session)
xs_pifs = get_pifs(session)
xs_vlans = get_vlans(session)
xs_vms = get_vms(session)
xs_srs = get_srs(session)
if xs_vlans:
data['xs_vlans'] = xs_vlans
if xs_pifs:
data['xs_pifs'] = xs_pifs
if xs_networks:
data['xs_networks'] = xs_networks
if xs_vms:
data['xs_vms'] = xs_vms
if xs_srs:
data['xs_srs'] = xs_srs
module.exit_json(ansible=data)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
babyliynfg/cross | tools/project-creator/Python2.6.6/Lib/email/mime/message.py | 73 | 1320 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""Class representing message/* MIME documents."""
__all__ = ['MIMEMessage']
from email import message
from email.mime.nonmultipart import MIMENonMultipart
class MIMEMessage(MIMENonMultipart):
"""Class representing message/* MIME documents."""
def __init__(self, _msg, _subtype='rfc822'):
"""Create a message/* type MIME document.
_msg is a message object and must be an instance of Message, or a
derived class of Message, otherwise a TypeError is raised.
Optional _subtype defines the subtype of the contained message. The
default is "rfc822" (this is defined by the MIME standard, even though
the term "rfc822" is technically outdated by RFC 2822).
"""
MIMENonMultipart.__init__(self, 'message', _subtype)
if not isinstance(_msg, message.Message):
raise TypeError('Argument is not an instance of Message')
# It's convenient to use this base class method. We need to do it
# this way or we'll get an exception
message.Message.attach(self, _msg)
# And be sure our default type is set correctly
self.set_default_type('message/rfc822')
| mit |
yjmade/odoo | openerp/addons/base/res/res_partner.py | 22 | 39855 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
from lxml import etree
import math
import pytz
import urlparse
import openerp
from openerp import tools, api
from openerp.osv import osv, fields
from openerp.osv.expression import get_unaccent_wrapper
from openerp.tools.translate import _
ADDRESS_FORMAT_LAYOUTS = {
'%(city)s %(state_code)s\n%(zip)s': """
<div class="address_format">
<field name="city" placeholder="City" style="width: 50%%"/>
<field name="state_id" class="oe_no_button" placeholder="State" style="width: 47%%" options='{"no_open": true}'/>
<br/>
<field name="zip" placeholder="ZIP"/>
</div>
""",
'%(zip)s %(city)s': """
<div class="address_format">
<field name="zip" placeholder="ZIP" style="width: 40%%"/>
<field name="city" placeholder="City" style="width: 57%%"/>
<br/>
<field name="state_id" class="oe_no_button" placeholder="State" options='{"no_open": true}'/>
</div>
""",
'%(city)s\n%(state_name)s\n%(zip)s': """
<div class="address_format">
<field name="city" placeholder="City"/>
<field name="state_id" class="oe_no_button" placeholder="State" options='{"no_open": true}'/>
<field name="zip" placeholder="ZIP"/>
</div>
"""
}
class format_address(object):
@api.model
def fields_view_get_address(self, arch):
fmt = self.env.user.company_id.country_id.address_format or ''
for k, v in ADDRESS_FORMAT_LAYOUTS.items():
if k in fmt:
doc = etree.fromstring(arch)
for node in doc.xpath("//div[@class='address_format']"):
tree = etree.fromstring(v)
node.getparent().replace(node, tree)
arch = etree.tostring(doc)
break
return arch
@api.model
def _tz_get(self):
# put POSIX 'Etc/*' entries at the end to avoid confusing users - see bug 1086728
return [(tz,tz) for tz in sorted(pytz.all_timezones, key=lambda tz: tz if not tz.startswith('Etc/') else '_')]
class res_partner_category(osv.Model):
def name_get(self, cr, uid, ids, context=None):
""" Return the categories' display name, including their direct
parent by default.
If ``context['partner_category_display']`` is ``'short'``, the short
version of the category name (without the direct parent) is used.
The default is the long version.
"""
if not isinstance(ids, list):
ids = [ids]
if context is None:
context = {}
if context.get('partner_category_display') == 'short':
return super(res_partner_category, self).name_get(cr, uid, ids, context=context)
res = []
for category in self.browse(cr, uid, ids, context=context):
names = []
current = category
while current:
names.append(current.name)
current = current.parent_id
res.append((category.id, ' / '.join(reversed(names))))
return res
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
if name:
# Be sure name_search is symetric to name_get
name = name.split(' / ')[-1]
args = [('name', operator, name)] + args
categories = self.search(args, limit=limit)
return categories.name_get()
@api.multi
def _name_get_fnc(self, field_name, arg):
return dict(self.name_get())
_description = 'Partner Tags'
_name = 'res.partner.category'
_columns = {
'name': fields.char('Category Name', required=True, translate=True),
'parent_id': fields.many2one('res.partner.category', 'Parent Category', select=True, ondelete='cascade'),
'complete_name': fields.function(_name_get_fnc, type="char", string='Full Name'),
'child_ids': fields.one2many('res.partner.category', 'parent_id', 'Child Categories'),
'active': fields.boolean('Active', help="The active field allows you to hide the category without removing it."),
'parent_left': fields.integer('Left parent', select=True),
'parent_right': fields.integer('Right parent', select=True),
'partner_ids': fields.many2many('res.partner', id1='category_id', id2='partner_id', string='Partners'),
}
_constraints = [
(osv.osv._check_recursion, 'Error ! You can not create recursive categories.', ['parent_id'])
]
_defaults = {
'active': 1,
}
_parent_store = True
_parent_order = 'name'
_order = 'parent_left'
class res_partner_title(osv.osv):
_name = 'res.partner.title'
_order = 'name'
_columns = {
'name': fields.char('Title', required=True, translate=True),
'shortcut': fields.char('Abbreviation', translate=True),
'domain': fields.selection([('partner', 'Partner'), ('contact', 'Contact')], 'Domain', required=True)
}
_defaults = {
'domain': 'contact',
}
@api.model
def _lang_get(self):
languages = self.env['res.lang'].search([])
return [(language.code, language.name) for language in languages]
# fields copy if 'use_parent_address' is checked
ADDRESS_FIELDS = ('street', 'street2', 'zip', 'city', 'state_id', 'country_id')
class res_partner(osv.Model, format_address):
_description = 'Partner'
_name = "res.partner"
def _address_display(self, cr, uid, ids, name, args, context=None):
res = {}
for partner in self.browse(cr, uid, ids, context=context):
res[partner.id] = self._display_address(cr, uid, partner, context=context)
return res
@api.multi
def _get_tz_offset(self, name, args):
return dict(
(p.id, datetime.datetime.now(pytz.timezone(p.tz or 'GMT')).strftime('%z'))
for p in self)
@api.multi
def _get_image(self, name, args):
return dict((p.id, tools.image_get_resized_images(p.image)) for p in self)
@api.one
def _set_image(self, name, value, args):
return self.write({'image': tools.image_resize_image_big(value)})
@api.multi
def _has_image(self, name, args):
return dict((p.id, bool(p.image)) for p in self)
def _commercial_partner_compute(self, cr, uid, ids, name, args, context=None):
""" Returns the partner that is considered the commercial
entity of this partner. The commercial entity holds the master data
for all commercial fields (see :py:meth:`~_commercial_fields`) """
result = dict.fromkeys(ids, False)
for partner in self.browse(cr, uid, ids, context=context):
current_partner = partner
while not current_partner.is_company and current_partner.parent_id:
current_partner = current_partner.parent_id
result[partner.id] = current_partner.id
return result
def _display_name_compute(self, cr, uid, ids, name, args, context=None):
context = dict(context or {})
context.pop('show_address', None)
context.pop('show_address_only', None)
context.pop('show_email', None)
return dict(self.name_get(cr, uid, ids, context=context))
# indirections to avoid passing a copy of the overridable method when declaring the function field
_commercial_partner_id = lambda self, *args, **kwargs: self._commercial_partner_compute(*args, **kwargs)
_display_name = lambda self, *args, **kwargs: self._display_name_compute(*args, **kwargs)
_commercial_partner_store_triggers = {
'res.partner': (lambda self,cr,uid,ids,context=None: self.search(cr, uid, [('id','child_of',ids)], context=dict(active_test=False)),
['parent_id', 'is_company'], 10)
}
_display_name_store_triggers = {
'res.partner': (lambda self,cr,uid,ids,context=None: self.search(cr, uid, [('id','child_of',ids)], context=dict(active_test=False)),
['parent_id', 'is_company', 'name'], 10)
}
_order = "display_name"
_columns = {
'name': fields.char('Name', required=True, select=True),
'display_name': fields.function(_display_name, type='char', string='Name', store=_display_name_store_triggers, select=True),
'date': fields.date('Date', select=1),
'title': fields.many2one('res.partner.title', 'Title'),
'parent_id': fields.many2one('res.partner', 'Related Company', select=True),
'child_ids': fields.one2many('res.partner', 'parent_id', 'Contacts', domain=[('active','=',True)]), # force "active_test" domain to bypass _search() override
'ref': fields.char('Contact Reference', select=1),
'lang': fields.selection(_lang_get, 'Language',
help="If the selected language is loaded in the system, all documents related to this contact will be printed in this language. If not, it will be English."),
'tz': fields.selection(_tz_get, 'Timezone', size=64,
help="The partner's timezone, used to output proper date and time values inside printed reports. "
"It is important to set a value for this field. You should use the same timezone "
"that is otherwise used to pick and render date and time values: your computer's timezone."),
'tz_offset': fields.function(_get_tz_offset, type='char', size=5, string='Timezone offset', invisible=True),
'user_id': fields.many2one('res.users', 'Salesperson', help='The internal user that is in charge of communicating with this contact if any.'),
'vat': fields.char('TIN', help="Tax Identification Number. Check the box if this contact is subjected to taxes. Used by the some of the legal statements."),
'bank_ids': fields.one2many('res.partner.bank', 'partner_id', 'Banks'),
'website': fields.char('Website', help="Website of Partner or Company"),
'comment': fields.text('Notes'),
'category_id': fields.many2many('res.partner.category', id1='partner_id', id2='category_id', string='Tags'),
'credit_limit': fields.float(string='Credit Limit'),
'ean13': fields.char('EAN13', size=13),
'active': fields.boolean('Active'),
'customer': fields.boolean('Customer', help="Check this box if this contact is a customer."),
'supplier': fields.boolean('Supplier', help="Check this box if this contact is a supplier. If it's not checked, purchase people will not see it when encoding a purchase order."),
'employee': fields.boolean('Employee', help="Check this box if this contact is an Employee."),
'function': fields.char('Job Position'),
'type': fields.selection([('default', 'Default'), ('invoice', 'Invoice'),
('delivery', 'Shipping'), ('contact', 'Contact'),
('other', 'Other')], 'Address Type',
help="Used to select automatically the right address according to the context in sales and purchases documents."),
'street': fields.char('Street'),
'street2': fields.char('Street2'),
'zip': fields.char('Zip', size=24, change_default=True),
'city': fields.char('City'),
'state_id': fields.many2one("res.country.state", 'State', ondelete='restrict'),
'country_id': fields.many2one('res.country', 'Country', ondelete='restrict'),
'email': fields.char('Email'),
'phone': fields.char('Phone'),
'fax': fields.char('Fax'),
'mobile': fields.char('Mobile'),
'birthdate': fields.char('Birthdate'),
'is_company': fields.boolean('Is a Company', help="Check if the contact is a company, otherwise it is a person"),
'use_parent_address': fields.boolean('Use Company Address', help="Select this if you want to set company's address information for this contact"),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Image",
help="This field holds the image used as avatar for this contact, limited to 1024x1024px"),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized image", type="binary", multi="_get_image",
store={
'res.partner': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized image of this contact. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized image", type="binary", multi="_get_image",
store={
'res.partner': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized image of this contact. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'has_image': fields.function(_has_image, type="boolean"),
'company_id': fields.many2one('res.company', 'Company', select=1),
'color': fields.integer('Color Index'),
'user_ids': fields.one2many('res.users', 'partner_id', 'Users'),
'contact_address': fields.function(_address_display, type='char', string='Complete Address'),
# technical field used for managing commercial fields
'commercial_partner_id': fields.function(_commercial_partner_id, type='many2one', relation='res.partner', string='Commercial Entity', store=_commercial_partner_store_triggers)
}
@api.model
def _default_category(self):
category_id = self.env.context.get('category_id', False)
return [category_id] if category_id else False
@api.model
def _get_default_image(self, is_company, colorize=False):
img_path = openerp.modules.get_module_resource(
'base', 'static/src/img', 'company_image.png' if is_company else 'avatar.png')
with open(img_path, 'rb') as f:
image = f.read()
# colorize user avatars
if not is_company:
image = tools.image_colorize(image)
return tools.image_resize_image_big(image.encode('base64'))
def fields_view_get(self, cr, user, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if (not view_id) and (view_type=='form') and context and context.get('force_email', False):
view_id = self.pool['ir.model.data'].get_object_reference(cr, user, 'base', 'view_partner_simple_form')[1]
res = super(res_partner,self).fields_view_get(cr, user, view_id, view_type, context, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
res['arch'] = self.fields_view_get_address(cr, user, res['arch'], context=context)
return res
@api.model
def _default_company(self):
return self.env['res.company']._company_default_get('res.partner')
_defaults = {
'active': True,
'lang': api.model(lambda self: self.env.lang),
'tz': api.model(lambda self: self.env.context.get('tz', False)),
'customer': True,
'category_id': _default_category,
'company_id': _default_company,
'color': 0,
'is_company': False,
'type': 'contact', # type 'default' is wildcard and thus inappropriate
'use_parent_address': False,
'image': False,
}
_constraints = [
(osv.osv._check_recursion, 'You cannot create recursive Partner hierarchies.', ['parent_id']),
]
@api.one
def copy(self, default=None):
default = dict(default or {})
default['name'] = _('%s (copy)') % self.name
return super(res_partner, self).copy(default)
@api.multi
def onchange_type(self, is_company):
value = {'title': False}
if is_company:
value['use_parent_address'] = False
domain = {'title': [('domain', '=', 'partner')]}
else:
domain = {'title': [('domain', '=', 'contact')]}
return {'value': value, 'domain': domain}
def onchange_address(self, cr, uid, ids, use_parent_address, parent_id, context=None):
def value_or_id(val):
""" return val or val.id if val is a browse record """
return val if isinstance(val, (bool, int, long, float, basestring)) else val.id
result = {}
if parent_id:
if ids:
partner = self.browse(cr, uid, ids[0], context=context)
if partner.parent_id and partner.parent_id.id != parent_id:
result['warning'] = {'title': _('Warning'),
'message': _('Changing the company of a contact should only be done if it '
'was never correctly set. If an existing contact starts working for a new '
'company then a new contact should be created under that new '
'company. You can use the "Discard" button to abandon this change.')}
if use_parent_address:
parent = self.browse(cr, uid, parent_id, context=context)
address_fields = self._address_fields(cr, uid, context=context)
result['value'] = dict((key, value_or_id(parent[key])) for key in address_fields)
else:
result['value'] = {'use_parent_address': False}
return result
@api.multi
def onchange_state(self, state_id):
if state_id:
state = self.env['res.country.state'].browse(state_id)
return {'value': {'country_id': state.country_id.id}}
return {}
def _check_ean_key(self, cr, uid, ids, context=None):
for partner_o in self.pool['res.partner'].read(cr, uid, ids, ['ean13',]):
thisean=partner_o['ean13']
if thisean and thisean!='':
if len(thisean)!=13:
return False
sum=0
for i in range(12):
if not (i % 2):
sum+=int(thisean[i])
else:
sum+=3*int(thisean[i])
if math.ceil(sum/10.0)*10-sum!=int(thisean[12]):
return False
return True
# _constraints = [(_check_ean_key, 'Error: Invalid ean code', ['ean13'])]
def _update_fields_values(self, cr, uid, partner, fields, context=None):
""" Returns dict of write() values for synchronizing ``fields`` """
values = {}
for field in fields:
column = self._all_columns[field].column
if column._type == 'one2many':
raise AssertionError('One2Many fields cannot be synchronized as part of `commercial_fields` or `address fields`')
if column._type == 'many2one':
values[field] = partner[field].id if partner[field] else False
elif column._type == 'many2many':
values[field] = [(6,0,[r.id for r in partner[field] or []])]
else:
values[field] = partner[field]
return values
def _address_fields(self, cr, uid, context=None):
""" Returns the list of address fields that are synced from the parent
when the `use_parent_address` flag is set. """
return list(ADDRESS_FIELDS)
def update_address(self, cr, uid, ids, vals, context=None):
address_fields = self._address_fields(cr, uid, context=context)
addr_vals = dict((key, vals[key]) for key in address_fields if key in vals)
if addr_vals:
return super(res_partner, self).write(cr, uid, ids, addr_vals, context)
def _commercial_fields(self, cr, uid, context=None):
""" Returns the list of fields that are managed by the commercial entity
to which a partner belongs. These fields are meant to be hidden on
partners that aren't `commercial entities` themselves, and will be
delegated to the parent `commercial entity`. The list is meant to be
extended by inheriting classes. """
return ['vat']
def _commercial_sync_from_company(self, cr, uid, partner, context=None):
""" Handle sync of commercial fields when a new parent commercial entity is set,
as if they were related fields """
commercial_partner = partner.commercial_partner_id
if not commercial_partner:
# On child partner creation of a parent partner,
# the commercial_partner_id is not yet computed
commercial_partner_id = self._commercial_partner_compute(
cr, uid, [partner.id], 'commercial_partner_id', [], context=context)[partner.id]
commercial_partner = self.browse(cr, uid, commercial_partner_id, context=context)
if commercial_partner != partner:
commercial_fields = self._commercial_fields(cr, uid, context=context)
sync_vals = self._update_fields_values(cr, uid, commercial_partner,
commercial_fields, context=context)
partner.write(sync_vals)
def _commercial_sync_to_children(self, cr, uid, partner, context=None):
""" Handle sync of commercial fields to descendants """
commercial_fields = self._commercial_fields(cr, uid, context=context)
commercial_partner = partner.commercial_partner_id
if not commercial_partner:
# On child partner creation of a parent partner,
# the commercial_partner_id is not yet computed
commercial_partner_id = self._commercial_partner_compute(
cr, uid, [partner.id], 'commercial_partner_id', [], context=context)[partner.id]
commercial_partner = self.browse(cr, uid, commercial_partner_id, context=context)
sync_vals = self._update_fields_values(cr, uid, commercial_partner,
commercial_fields, context=context)
sync_children = [c for c in partner.child_ids if not c.is_company]
for child in sync_children:
self._commercial_sync_to_children(cr, uid, child, context=context)
return self.write(cr, uid, [c.id for c in sync_children], sync_vals, context=context)
def _fields_sync(self, cr, uid, partner, update_values, context=None):
""" Sync commercial fields and address fields from company and to children after create/update,
just as if those were all modeled as fields.related to the parent """
# 1. From UPSTREAM: sync from parent
if update_values.get('parent_id') or update_values.get('use_parent_address'):
# 1a. Commercial fields: sync if parent changed
if update_values.get('parent_id'):
self._commercial_sync_from_company(cr, uid, partner, context=context)
# 1b. Address fields: sync if parent or use_parent changed *and* both are now set
if partner.parent_id and partner.use_parent_address:
onchange_vals = self.onchange_address(cr, uid, [partner.id],
use_parent_address=partner.use_parent_address,
parent_id=partner.parent_id.id,
context=context).get('value', {})
partner.update_address(onchange_vals)
# 2. To DOWNSTREAM: sync children
if partner.child_ids:
# 2a. Commercial Fields: sync if commercial entity
if partner.commercial_partner_id == partner:
commercial_fields = self._commercial_fields(cr, uid,
context=context)
if any(field in update_values for field in commercial_fields):
self._commercial_sync_to_children(cr, uid, partner,
context=context)
# 2b. Address fields: sync if address changed
address_fields = self._address_fields(cr, uid, context=context)
if any(field in update_values for field in address_fields):
domain_children = [('parent_id', '=', partner.id), ('use_parent_address', '=', True)]
update_ids = self.search(cr, uid, domain_children, context=context)
self.update_address(cr, uid, update_ids, update_values, context=context)
def _handle_first_contact_creation(self, cr, uid, partner, context=None):
""" On creation of first contact for a company (or root) that has no address, assume contact address
was meant to be company address """
parent = partner.parent_id
address_fields = self._address_fields(cr, uid, context=context)
if parent and (parent.is_company or not parent.parent_id) and len(parent.child_ids) == 1 and \
any(partner[f] for f in address_fields) and not any(parent[f] for f in address_fields):
addr_vals = self._update_fields_values(cr, uid, partner, address_fields, context=context)
parent.update_address(addr_vals)
if not parent.is_company:
parent.write({'is_company': True})
def _clean_website(self, website):
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(website)
if not scheme:
if not netloc:
netloc, path = path, ''
website = urlparse.urlunparse(('http', netloc, path, params, query, fragment))
return website
@api.multi
def write(self, vals):
# res.partner must only allow to set the company_id of a partner if it
# is the same as the company of all users that inherit from this partner
# (this is to allow the code from res_users to write to the partner!) or
# if setting the company_id to False (this is compatible with any user
# company)
if vals.get('website'):
vals['website'] = self._clean_website(vals['website'])
if vals.get('company_id'):
company = self.env['res.company'].browse(vals['company_id'])
for partner in self:
if partner.user_ids:
companies = set(user.company_id for user in partner.user_ids)
if len(companies) > 1 or company not in companies:
raise osv.except_osv(_("Warning"),_("You can not change the company as the partner/user has multiple user linked with different companies."))
result = super(res_partner, self).write(vals)
for partner in self:
self._fields_sync(partner, vals)
return result
@api.model
def create(self, vals):
if vals.get('website'):
vals['website'] = self._clean_website(vals['website'])
partner = super(res_partner, self).create(vals)
self._fields_sync(partner, vals)
self._handle_first_contact_creation(partner)
return partner
def open_commercial_entity(self, cr, uid, ids, context=None):
""" Utility method used to add an "Open Company" button in partner views """
partner = self.browse(cr, uid, ids[0], context=context)
return {'type': 'ir.actions.act_window',
'res_model': 'res.partner',
'view_mode': 'form',
'res_id': partner.commercial_partner_id.id,
'target': 'new',
'flags': {'form': {'action_buttons': True}}}
def open_parent(self, cr, uid, ids, context=None):
""" Utility method used to add an "Open Parent" button in partner views """
partner = self.browse(cr, uid, ids[0], context=context)
return {'type': 'ir.actions.act_window',
'res_model': 'res.partner',
'view_mode': 'form',
'res_id': partner.parent_id.id,
'target': 'new',
'flags': {'form': {'action_buttons': True}}}
def name_get(self, cr, uid, ids, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
res = []
for record in self.browse(cr, uid, ids, context=context):
name = record.name
if record.parent_id and not record.is_company:
name = "%s, %s" % (record.parent_id.name, name)
if context.get('show_address_only'):
name = self._display_address(cr, uid, record, without_company=True, context=context)
if context.get('show_address'):
name = name + "\n" + self._display_address(cr, uid, record, without_company=True, context=context)
name = name.replace('\n\n','\n')
name = name.replace('\n\n','\n')
if context.get('show_email') and record.email:
name = "%s <%s>" % (name, record.email)
res.append((record.id, name))
return res
def _parse_partner_name(self, text, context=None):
""" Supported syntax:
- 'Raoul <[email protected]>': will find name and email address
- otherwise: default, everything is set as the name """
emails = tools.email_split(text.replace(' ',','))
if emails:
email = emails[0]
name = text[:text.index(email)].replace('"', '').replace('<', '').strip()
else:
name, email = text, ''
return name, email
def name_create(self, cr, uid, name, context=None):
""" Override of orm's name_create method for partners. The purpose is
to handle some basic formats to create partners using the
name_create.
If only an email address is received and that the regex cannot find
a name, the name will have the email value.
If 'force_email' key in context: must find the email address. """
if context is None:
context = {}
name, email = self._parse_partner_name(name, context=context)
if context.get('force_email') and not email:
raise osv.except_osv(_('Warning'), _("Couldn't create contact without email address!"))
if not name and email:
name = email
rec_id = self.create(cr, uid, {self._rec_name: name or email, 'email': email or False}, context=context)
return self.name_get(cr, uid, [rec_id], context)[0]
def _search(self, cr, user, args, offset=0, limit=None, order=None, context=None, count=False, access_rights_uid=None):
""" Override search() to always show inactive children when searching via ``child_of`` operator. The ORM will
always call search() with a simple domain of the form [('parent_id', 'in', [ids])]. """
# a special ``domain`` is set on the ``child_ids`` o2m to bypass this logic, as it uses similar domain expressions
if len(args) == 1 and len(args[0]) == 3 and args[0][:2] == ('parent_id','in') \
and args[0][2] != [False]:
context = dict(context or {}, active_test=False)
return super(res_partner, self)._search(cr, user, args, offset=offset, limit=limit, order=order, context=context,
count=count, access_rights_uid=access_rights_uid)
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if name and operator in ('=', 'ilike', '=ilike', 'like', '=like'):
self.check_access_rights(cr, uid, 'read')
where_query = self._where_calc(cr, uid, args, context=context)
self._apply_ir_rules(cr, uid, where_query, 'read', context=context)
from_clause, where_clause, where_clause_params = where_query.get_sql()
where_str = where_clause and (" WHERE %s AND " % where_clause) or ' WHERE '
# search on the name of the contacts and of its company
search_name = name
if operator in ('ilike', 'like'):
search_name = '%%%s%%' % name
if operator in ('=ilike', '=like'):
operator = operator[1:]
unaccent = get_unaccent_wrapper(cr)
query = """SELECT id
FROM res_partner
{where} ({email} {operator} {percent}
OR {display_name} {operator} {percent})
ORDER BY {display_name}
""".format(where=where_str, operator=operator,
email=unaccent('email'),
display_name=unaccent('display_name'),
percent=unaccent('%s'))
where_clause_params += [search_name, search_name]
if limit:
query += ' limit %s'
where_clause_params.append(limit)
cr.execute(query, where_clause_params)
ids = map(lambda x: x[0], cr.fetchall())
if ids:
return self.name_get(cr, uid, ids, context)
else:
return []
return super(res_partner,self).name_search(cr, uid, name, args, operator=operator, context=context, limit=limit)
def find_or_create(self, cr, uid, email, context=None):
""" Find a partner with the given ``email`` or use :py:method:`~.name_create`
to create one
:param str email: email-like string, which should contain at least one email,
e.g. ``"Raoul Grosbedon <[email protected]>"``"""
assert email, 'an email is required for find_or_create to work'
emails = tools.email_split(email)
if emails:
email = emails[0]
ids = self.search(cr, uid, [('email','ilike',email)], context=context)
if not ids:
return self.name_create(cr, uid, email, context=context)[0]
return ids[0]
def _email_send(self, cr, uid, ids, email_from, subject, body, on_error=None):
partners = self.browse(cr, uid, ids)
for partner in partners:
if partner.email:
tools.email_send(email_from, [partner.email], subject, body, on_error)
return True
def email_send(self, cr, uid, ids, email_from, subject, body, on_error=''):
while len(ids):
self.pool['ir.cron'].create(cr, uid, {
'name': 'Send Partner Emails',
'user_id': uid,
'model': 'res.partner',
'function': '_email_send',
'args': repr([ids[:16], email_from, subject, body, on_error])
})
ids = ids[16:]
return True
def address_get(self, cr, uid, ids, adr_pref=None, context=None):
""" Find contacts/addresses of the right type(s) by doing a depth-first-search
through descendants within company boundaries (stop at entities flagged ``is_company``)
then continuing the search at the ancestors that are within the same company boundaries.
Defaults to partners of type ``'default'`` when the exact type is not found, or to the
provided partner itself if no type ``'default'`` is found either. """
adr_pref = set(adr_pref or [])
if 'default' not in adr_pref:
adr_pref.add('default')
result = {}
visited = set()
for partner in self.browse(cr, uid, filter(None, ids), context=context):
current_partner = partner
while current_partner:
to_scan = [current_partner]
# Scan descendants, DFS
while to_scan:
record = to_scan.pop(0)
visited.add(record)
if record.type in adr_pref and not result.get(record.type):
result[record.type] = record.id
if len(result) == len(adr_pref):
return result
to_scan = [c for c in record.child_ids
if c not in visited
if not c.is_company] + to_scan
# Continue scanning at ancestor if current_partner is not a commercial entity
if current_partner.is_company or not current_partner.parent_id:
break
current_partner = current_partner.parent_id
# default to type 'default' or the partner itself
default = result.get('default', partner.id)
for adr_type in adr_pref:
result[adr_type] = result.get(adr_type) or default
return result
def view_header_get(self, cr, uid, view_id, view_type, context):
res = super(res_partner, self).view_header_get(cr, uid, view_id, view_type, context)
if res: return res
if not context.get('category_id', False):
return False
return _('Partners: ')+self.pool['res.partner.category'].browse(cr, uid, context['category_id'], context).name
@api.model
@api.returns('self')
def main_partner(self):
''' Return the main partner '''
return self.env.ref('base.main_partner')
def _display_address(self, cr, uid, address, without_company=False, context=None):
'''
The purpose of this function is to build and return an address formatted accordingly to the
standards of the country where it belongs.
:param address: browse record of the res.partner to format
:returns: the address formatted in a display that fit its country habits (or the default ones
if not country is specified)
:rtype: string
'''
# get the information that will be injected into the display format
# get the address format
address_format = address.country_id.address_format or \
"%(street)s\n%(street2)s\n%(city)s %(state_code)s %(zip)s\n%(country_name)s"
args = {
'state_code': address.state_id.code or '',
'state_name': address.state_id.name or '',
'country_code': address.country_id.code or '',
'country_name': address.country_id.name or '',
'company_name': address.parent_id.name or '',
}
for field in self._address_fields(cr, uid, context=context):
args[field] = getattr(address, field) or ''
if without_company:
args['company_name'] = ''
elif address.parent_id:
address_format = '%(company_name)s\n' + address_format
return address_format % args
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
bowlofstew/code-for-blog | 2009/plotting_data_monitor/com_monitor.py | 15 | 2988 | import Queue
import threading
import time
import serial
class ComMonitorThread(threading.Thread):
""" A thread for monitoring a COM port. The COM port is
opened when the thread is started.
data_q:
Queue for received data. Items in the queue are
(data, timestamp) pairs, where data is a binary
string representing the received data, and timestamp
is the time elapsed from the thread's start (in
seconds).
error_q:
Queue for error messages. In particular, if the
serial port fails to open for some reason, an error
is placed into this queue.
port:
The COM port to open. Must be recognized by the
system.
port_baud/stopbits/parity:
Serial communication parameters
port_timeout:
The timeout used for reading the COM port. If this
value is low, the thread will return data in finer
grained chunks, with more accurate timestamps, but
it will also consume more CPU.
"""
def __init__( self,
data_q, error_q,
port_num,
port_baud,
port_stopbits=serial.STOPBITS_ONE,
port_parity=serial.PARITY_NONE,
port_timeout=0.01):
threading.Thread.__init__(self)
self.serial_port = None
self.serial_arg = dict( port=port_num,
baudrate=port_baud,
stopbits=port_stopbits,
parity=port_parity,
timeout=port_timeout)
self.data_q = data_q
self.error_q = error_q
self.alive = threading.Event()
self.alive.set()
def run(self):
try:
if self.serial_port:
self.serial_port.close()
self.serial_port = serial.Serial(**self.serial_arg)
except serial.SerialException, e:
self.error_q.put(e.message)
return
# Restart the clock
time.clock()
while self.alive.isSet():
# Reading 1 byte, followed by whatever is left in the
# read buffer, as suggested by the developer of
# PySerial.
#
data = self.serial_port.read(1)
data += self.serial_port.read(self.serial_port.inWaiting())
if len(data) > 0:
timestamp = time.clock()
self.data_q.put((data, timestamp))
# clean up
if self.serial_port:
self.serial_port.close()
def join(self, timeout=None):
self.alive.clear()
threading.Thread.join(self, timeout)
| unlicense |
dgoedkoop/QGIS | python/plugins/processing/algs/gdal/proximity.py | 2 | 8834 | # -*- coding: utf-8 -*-
"""
***************************************************************************
proximity.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsRasterFileWriter,
QgsProcessingException,
QgsProcessingParameterDefinition,
QgsProcessingParameterRasterLayer,
QgsProcessingParameterBand,
QgsProcessingParameterEnum,
QgsProcessingParameterString,
QgsProcessingParameterNumber,
QgsProcessingParameterRasterDestination)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
from processing.tools.system import isWindows
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class proximity(GdalAlgorithm):
INPUT = 'INPUT'
BAND = 'BAND'
VALUES = 'VALUES'
MAX_DISTANCE = 'MAX_DISTANCE'
REPLACE = 'REPLACE'
UNITS = 'UNITS'
NODATA = 'NODATA'
OPTIONS = 'OPTIONS'
DATA_TYPE = 'DATA_TYPE'
OUTPUT = 'OUTPUT'
TYPES = ['Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64', 'CInt16', 'CInt32', 'CFloat32', 'CFloat64']
def icon(self):
return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'proximity.png'))
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.distanceUnits = ((self.tr('Georeferenced coordinates'), 'GEO'),
(self.tr('Pixel coordinates'), 'PIXEL'))
self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterBand(self.BAND,
self.tr('Band number'),
parentLayerParameterName=self.INPUT))
self.addParameter(QgsProcessingParameterString(self.VALUES,
self.tr('A list of pixel values in the source image to be considered target pixels'),
optional=True))
self.addParameter(QgsProcessingParameterEnum(self.UNITS,
self.tr('Distance units'),
options=[i[0] for i in self.distanceUnits],
allowMultiple=False,
defaultValue=1))
self.addParameter(QgsProcessingParameterNumber(self.MAX_DISTANCE,
self.tr('The maximum distance to be generated'),
type=QgsProcessingParameterNumber.Double,
minValue=0.0,
defaultValue=0.0,
optional=True))
self.addParameter(QgsProcessingParameterNumber(self.REPLACE,
self.tr('Value to be applied to all pixels that are within the -maxdist of target pixels'),
type=QgsProcessingParameterNumber.Double,
defaultValue=0.0,
optional=True))
self.addParameter(QgsProcessingParameterNumber(self.NODATA,
self.tr('Nodata value to use for the destination proximity raster'),
type=QgsProcessingParameterNumber.Double,
defaultValue=0.0,
optional=True))
options_param = QgsProcessingParameterString(self.OPTIONS,
self.tr('Additional creation parameters'),
defaultValue='',
optional=True)
options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced)
options_param.setMetadata({
'widget_wrapper': {
'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}})
self.addParameter(options_param)
self.addParameter(QgsProcessingParameterEnum(self.DATA_TYPE,
self.tr('Output data type'),
self.TYPES,
allowMultiple=False,
defaultValue=5))
self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT,
self.tr('Proximity map')))
def name(self):
return 'proximity'
def displayName(self):
return self.tr('Proximity (raster distance)')
def group(self):
return self.tr('Raster analysis')
def groupId(self):
return 'rasteranalysis'
def commandName(self):
return 'gdal_proximity'
def getConsoleCommands(self, parameters, context, feedback, executing=True):
inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context)
if inLayer is None:
raise QgsProcessingException(self.invalidRasterError(parameters, self.INPUT))
distance = self.parameterAsDouble(parameters, self.MAX_DISTANCE, context)
replaceValue = self.parameterAsDouble(parameters, self.REPLACE, context)
if self.NODATA in parameters and parameters[self.NODATA] is not None:
nodata = self.parameterAsDouble(parameters, self.NODATA, context)
else:
nodata = None
options = self.parameterAsString(parameters, self.OPTIONS, context)
out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
arguments = []
arguments.append('-srcband')
arguments.append(str(self.parameterAsInt(parameters, self.BAND, context)))
arguments.append('-distunits')
arguments.append(self.distanceUnits[self.parameterAsEnum(parameters, self.UNITS, context)][1])
values = self.parameterAsString(parameters, self.VALUES, context)
if values:
arguments.append('-values')
arguments.append(values)
if distance:
arguments.append('-maxdist')
arguments.append(str(distance))
if nodata is not None:
arguments.append('-nodata')
arguments.append(str(nodata))
if replaceValue:
arguments.append('-fixed-buf-val')
arguments.append(str(replaceValue))
arguments.append('-ot')
arguments.append(self.TYPES[self.parameterAsEnum(parameters, self.DATA_TYPE, context)])
arguments.append('-of')
arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1]))
if options:
arguments.extend(GdalUtils.parseCreationOptions(options))
arguments.append(inLayer.source())
arguments.append(out)
commands = []
if isWindows():
commands = ['cmd.exe', '/C ', self.commandName() + '.bat',
GdalUtils.escapeAndJoin(arguments)]
else:
commands = [self.commandName() + '.py',
GdalUtils.escapeAndJoin(arguments)]
return commands
| gpl-2.0 |
podhmo/boto | boto/route53/__init__.py | 145 | 3103 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# this is here for backward compatibility
# originally, the Route53Connection class was defined here
from boto.route53.connection import Route53Connection
from boto.regioninfo import RegionInfo, get_regions
class Route53RegionInfo(RegionInfo):
def connect(self, **kw_params):
"""
Connect to this Region's endpoint. Returns an connection
object pointing to the endpoint associated with this region.
You may pass any of the arguments accepted by the connection
class's constructor as keyword arguments and they will be
passed along to the connection object.
:rtype: Connection object
:return: The connection to this regions endpoint
"""
if self.connection_cls:
return self.connection_cls(host=self.endpoint, **kw_params)
def regions():
"""
Get all available regions for the Route53 service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo` instances
"""
regions = get_regions(
'route53',
region_cls=Route53RegionInfo,
connection_cls=Route53Connection
)
# For historical reasons, we had a "universal" endpoint as well.
regions.append(
Route53RegionInfo(
name='universal',
endpoint='route53.amazonaws.com',
connection_cls=Route53Connection
)
)
return regions
def connect_to_region(region_name, **kw_params):
"""
Given a valid region name, return a
:class:`boto.route53.connection.Route53Connection`.
:type: str
:param region_name: The name of the region to connect to.
:rtype: :class:`boto.route53.connection.Route53Connection` or ``None``
:return: A connection to the given region, or None if an invalid region
name is given
"""
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit |
JCBarahona/edX | lms/djangoapps/shoppingcart/migrations/0009_auto__del_coupons__add_courseregistrationcode__add_coupon__chg_field_c.py | 114 | 16498 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Coupons'
db.delete_table('shoppingcart_coupons')
# Adding model 'CourseRegistrationCode'
db.create_table('shoppingcart_courseregistrationcode', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('code', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)),
('transaction_group_name', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=255, null=True, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='created_by_user', to=orm['auth.User'])),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 7, 1, 0, 0))),
('redeemed_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='redeemed_by_user', null=True, to=orm['auth.User'])),
('redeemed_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 7, 1, 0, 0), null=True)),
))
db.send_create_signal('shoppingcart', ['CourseRegistrationCode'])
# Adding model 'Coupon'
db.create_table('shoppingcart_coupon', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('code', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255)),
('percentage_discount', self.gf('django.db.models.fields.IntegerField')(default=0)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 7, 1, 0, 0))),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('shoppingcart', ['Coupon'])
# Changing field 'CouponRedemption.coupon'
db.alter_column('shoppingcart_couponredemption', 'coupon_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shoppingcart.Coupon']))
# Deleting field 'OrderItem.discount_price'
db.delete_column('shoppingcart_orderitem', 'discount_price')
# Adding field 'OrderItem.list_price'
db.add_column('shoppingcart_orderitem', 'list_price',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=30, decimal_places=2),
keep_default=False)
def backwards(self, orm):
# Adding model 'Coupons'
db.create_table('shoppingcart_coupons', (
('code', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('percentage_discount', self.gf('django.db.models.fields.IntegerField')(default=0)),
('description', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
('course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2014, 6, 24, 0, 0))),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal('shoppingcart', ['Coupons'])
# Deleting model 'CourseRegistrationCode'
db.delete_table('shoppingcart_courseregistrationcode')
# Deleting model 'Coupon'
db.delete_table('shoppingcart_coupon')
# Changing field 'CouponRedemption.coupon'
db.alter_column('shoppingcart_couponredemption', 'coupon_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['shoppingcart.Coupons']))
# Adding field 'OrderItem.discount_price'
db.add_column('shoppingcart_orderitem', 'discount_price',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=30, decimal_places=2),
keep_default=False)
# Deleting field 'OrderItem.list_price'
db.delete_column('shoppingcart_orderitem', 'list_price')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.coupon': {
'Meta': {'object_name': 'Coupon'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 7, 1, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'percentage_discount': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shoppingcart.couponredemption': {
'Meta': {'object_name': 'CouponRedemption'},
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Coupon']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.courseregistrationcode': {
'Meta': {'object_name': 'CourseRegistrationCode'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 7, 1, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'redeemed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 7, 1, 0, 0)', 'null': 'True'}),
'redeemed_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'redeemed_by_user'", 'null': 'True', 'to': "orm['auth.User']"}),
'transaction_group_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'list_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '30', 'decimal_places': '2'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'service_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32', 'db_index': 'True'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
| agpl-3.0 |
nhicher/ansible | test/runner/injector/injector.py | 7 | 7490 | #!/usr/bin/env python
"""Interpreter and code coverage injector for use with ansible-test.
The injector serves two main purposes:
1) Control the python interpreter used to run test tools and ansible code.
2) Provide optional code coverage analysis of ansible code.
The injector is executed one of two ways:
1) On the controller via a symbolic link such as ansible or pytest.
This is accomplished by prepending the injector directory to the PATH by ansible-test.
2) As the python interpreter when running ansible modules.
This is only supported when connecting to the local host.
Otherwise set the ANSIBLE_TEST_REMOTE_INTERPRETER environment variable.
It can be empty to auto-detect the python interpreter on the remote host.
If not empty it will be used to set ansible_python_interpreter.
NOTE: Running ansible-test with the --tox option or inside a virtual environment
may prevent the injector from working for tests which use connection
types other than local, or which use become, due to lack of permissions
to access the interpreter for the virtual environment.
"""
from __future__ import absolute_import, print_function
import json
import os
import sys
import pipes
import logging
import getpass
import resource
logger = logging.getLogger('injector') # pylint: disable=locally-disabled, invalid-name
# pylint: disable=locally-disabled, invalid-name
config = None # type: InjectorConfig
class InjectorConfig(object):
"""Mandatory configuration."""
def __init__(self, config_path):
"""Initialize config."""
with open(config_path) as config_fd:
_config = json.load(config_fd)
self.python_interpreter = _config['python_interpreter']
self.coverage_file = _config['coverage_file']
# Read from the environment instead of config since it needs to be changed by integration test scripts.
# It also does not need to flow from the controller to the remote. It is only used on the controller.
self.remote_interpreter = os.environ.get('ANSIBLE_TEST_REMOTE_INTERPRETER', None)
self.arguments = [to_text(c) for c in sys.argv]
def to_text(value):
"""
:type value: str | None
:rtype: str | None
"""
if value is None:
return None
if isinstance(value, bytes):
return value.decode('utf-8')
return u'%s' % value
def main():
"""Main entry point."""
global config # pylint: disable=locally-disabled, global-statement
formatter = logging.Formatter('%(asctime)s %(process)d %(levelname)s %(message)s')
log_name = 'ansible-test-coverage.%s.log' % getpass.getuser()
self_dir = os.path.dirname(os.path.abspath(__file__))
handler = logging.FileHandler(os.path.join('/tmp', log_name))
handler.setFormatter(formatter)
logger.addHandler(handler)
handler = logging.FileHandler(os.path.abspath(os.path.join(self_dir, '..', 'logs', log_name)))
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
try:
logger.debug('Self: %s', __file__)
# to achieve a consistent nofile ulimit, set to 16k here, this can affect performance in subprocess.Popen when
# being called with close_fds=True on Python (8x the time on some environments)
nofile_limit = 16 * 1024
current_limit = resource.getrlimit(resource.RLIMIT_NOFILE)
new_limit = (nofile_limit, nofile_limit)
if current_limit > new_limit:
logger.debug('RLIMIT_NOFILE: %s -> %s', current_limit, new_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (nofile_limit, nofile_limit))
else:
logger.debug('RLIMIT_NOFILE: %s', current_limit)
config_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'injector.json')
try:
config = InjectorConfig(config_path)
except IOError:
logger.exception('Error reading config: %s', config_path)
exit('No injector config found. Set ANSIBLE_TEST_REMOTE_INTERPRETER if the test is not connecting to the local host.')
logger.debug('Arguments: %s', ' '.join(pipes.quote(c) for c in config.arguments))
logger.debug('Python interpreter: %s', config.python_interpreter)
logger.debug('Remote interpreter: %s', config.remote_interpreter)
logger.debug('Coverage file: %s', config.coverage_file)
if os.path.basename(__file__) == 'injector.py':
args, env = runner() # code coverage collection is baked into the AnsiballZ wrapper when needed
elif os.path.basename(__file__) == 'python.py':
args, env = python() # run arbitrary python commands using the correct python and with optional code coverage
else:
args, env = injector()
logger.debug('Run command: %s', ' '.join(pipes.quote(c) for c in args))
for key in sorted(env.keys()):
logger.debug('%s=%s', key, env[key])
os.execvpe(args[0], args, env)
except Exception as ex:
logger.fatal(ex)
raise
def python():
"""
:rtype: list[str], dict[str, str]
"""
if config.coverage_file:
args, env = coverage_command()
else:
args, env = [config.python_interpreter], os.environ.copy()
args += config.arguments[1:]
return args, env
def injector():
"""
:rtype: list[str], dict[str, str]
"""
command = os.path.basename(__file__)
executable = find_executable(command)
if config.coverage_file:
args, env = coverage_command()
else:
args, env = [config.python_interpreter], os.environ.copy()
args += [executable]
if command in ('ansible', 'ansible-playbook', 'ansible-pull'):
if config.remote_interpreter is None:
interpreter = os.path.join(os.path.dirname(__file__), 'injector.py')
elif config.remote_interpreter == '':
interpreter = None
else:
interpreter = config.remote_interpreter
if interpreter:
args += ['--extra-vars', 'ansible_python_interpreter=' + interpreter]
args += config.arguments[1:]
return args, env
def runner():
"""
:rtype: list[str], dict[str, str]
"""
args, env = [config.python_interpreter], os.environ.copy()
args += config.arguments[1:]
return args, env
def coverage_command():
"""
:rtype: list[str], dict[str, str]
"""
self_dir = os.path.dirname(os.path.abspath(__file__))
args = [
config.python_interpreter,
'-m',
'coverage.__main__',
'run',
'--rcfile',
os.path.join(self_dir, '.coveragerc'),
]
env = os.environ.copy()
env['COVERAGE_FILE'] = config.coverage_file
return args, env
def find_executable(executable):
"""
:type executable: str
:rtype: str
"""
self = os.path.abspath(__file__)
path = os.environ.get('PATH', os.path.defpath)
seen_dirs = set()
for path_dir in path.split(os.path.pathsep):
if path_dir in seen_dirs:
continue
seen_dirs.add(path_dir)
candidate = os.path.abspath(os.path.join(path_dir, executable))
if candidate == self:
continue
if os.path.exists(candidate) and os.access(candidate, os.F_OK | os.X_OK):
return candidate
raise Exception('Executable "%s" not found in path: %s' % (executable, path))
if __name__ == '__main__':
main()
| gpl-3.0 |
DonHilborn/DataGenerator | faker/providers/internet.py | 1 | 4491 | # coding=utf-8
from __future__ import unicode_literals
from . import BaseProvider
import random
from faker.providers.lorem import Provider as Lorem
from faker.utils.decorators import slugify, slugify_domain
class Provider(BaseProvider):
safe_email_tlds = ('org', 'com', 'net')
free_email_domains = ('gmail.com', 'yahoo.com', 'hotmail.com')
tlds = ('com', 'com', 'com', 'com', 'com', 'com', 'biz', 'info', 'net', 'org')
uri_pages = (
'index', 'home', 'search', 'main', 'post', 'homepage', 'category', 'register', 'login', 'faq', 'about', 'terms',
'privacy', 'author')
uri_paths = (
'app', 'main', 'wp-content', 'search', 'category', 'tag', 'categories', 'tags', 'blog', 'posts', 'list', 'explore')
uri_extensions = ('.html', '.html', '.html', '.htm', '.htm', '.php', '.php', '.jsp', '.asp')
user_name_formats = (
'{{last_name}}.{{first_name}}',
'{{first_name}}.{{last_name}}',
'{{first_name}}##',
'?{{last_name}}',
)
email_formats = (
'{{user_name}}@{{domain_name}}',
'{{user_name}}@{{free_email_domain}}',
)
url_formats = (
'http://www.{{domain_name}}/',
'http://{{domain_name}}/',
)
uri_formats = (
'{{url}}',
'{{url}}{{uri_page}}/',
'{{url}}{{uri_page}}{{uri_extension}}',
'{{url}}{{uri_path}}/{{uri_page}}/',
'{{url}}{{uri_path}}/{{uri_page}}{{uri_extension}}',
)
image_placeholder_services = (
'http://placekitten.com/{width}/{height}',
'http://placehold.it/{width}x{height}',
'http://www.lorempixum.com/{width}/{height}',
'http://dummyimage.com/{width}x{height}',
)
def email(self):
pattern = self.random_element(self.email_formats)
return "".join(self.generator.parse(pattern).split(" "))
def safe_email(self):
return self.user_name() + '@example.' + self.random_element(self.safe_email_tlds)
def free_email(self):
return self.user_name() + '@' + self.free_email_domain()
def company_email(self):
return self.user_name() + '@' + self.domain_name()
@classmethod
def free_email_domain(cls):
return cls.random_element(cls.free_email_domains)
@slugify_domain
def user_name(self):
pattern = self.random_element(self.user_name_formats)
return self.bothify(self.generator.parse(pattern))
def domain_name(self):
return self.domain_word() + '.' + self.tld()
@slugify
def domain_word(self):
company = self.generator.format('company')
company_elements = company.split(' ')
company = company_elements.pop(0)
return company
def tld(self):
return self.random_element(self.tlds)
def url(self):
pattern = self.random_element(self.url_formats)
return self.generator.parse(pattern)
def ipv4(self):
"""
Convert 32-bit integer to dotted IPv4 address.
"""
return ".".join(map(lambda n: str(random.randint(-2147483648, 2147483647) >> n & 0xFF), [24, 16, 8, 0]))
def ipv6(self):
res = [hex(random.randint(0, 65535))[2:].zfill(4) for i in range(0, 8)]
return ":".join(res)
def mac_address(self):
mac = [random.randint(0x00, 0xff) for i in range(0, 6)]
return ":".join(map(lambda x: "%02x" % x, mac))
@classmethod
def uri_page(cls):
return cls.random_element(cls.uri_pages)
@classmethod
def uri_path(cls, deep=None):
deep = deep if deep else random.randint(1, 3)
return "/".join([cls.random_element(cls.uri_paths) for _ in range(0, deep)])
@classmethod
def uri_extension(cls):
return cls.random_element(cls.uri_extensions)
def uri(self):
pattern = self.random_element(self.uri_formats)
return self.generator.parse(pattern)
@classmethod
@slugify
def slug(cls, value=None):
"""
Django algorithm
"""
if value is None:
value = Lorem.text(20)
return value
@classmethod
def image_url(cls, width=None, height=None):
"""
Returns URL to placeholder image
Example: http://placehold.it/640x480
"""
width_ = width or cls.random_int(max=1024)
height_ = height or cls.random_int(max=1024)
placeholder_url = cls.random_element(cls.image_placeholder_services)
return placeholder_url.format(width=width_, height=height_)
| mit |
drewmiller/tornado | docs/conf.py | 9 | 2900 | # Ensure we get the local copy of tornado instead of what's on the standard path
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
import tornado
master_doc = "index"
project = "Tornado"
copyright = "2011, Facebook"
version = release = tornado.version
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
]
primary_domain = 'py'
default_role = 'py:obj'
autodoc_member_order = "bysource"
autoclass_content = "both"
# Without this line sphinx includes a copy of object.__init__'s docstring
# on any class that doesn't define __init__.
# https://bitbucket.org/birkenfeld/sphinx/issue/1337/autoclass_content-both-uses-object__init__
autodoc_docstring_signature = False
coverage_skip_undoc_in_source = True
coverage_ignore_modules = [
"tornado.platform.asyncio",
"tornado.platform.caresresolver",
"tornado.platform.twisted",
]
# I wish this could go in a per-module file...
coverage_ignore_classes = [
# tornado.concurrent
"TracebackFuture",
# tornado.gen
"Runner",
# tornado.ioloop
"PollIOLoop",
# tornado.web
"ChunkedTransferEncoding",
"GZipContentEncoding",
"OutputTransform",
"TemplateModule",
"url",
# tornado.websocket
"WebSocketProtocol",
"WebSocketProtocol13",
"WebSocketProtocol76",
]
coverage_ignore_functions = [
# various modules
"doctests",
"main",
# tornado.escape
# parse_qs_bytes should probably be documented but it's complicated by
# having different implementations between py2 and py3.
"parse_qs_bytes",
]
html_favicon = 'favicon.ico'
latex_documents = [
('documentation', 'tornado.tex', 'Tornado Documentation', 'Facebook', 'manual', False),
]
# HACK: sphinx has limited support for substitutions with the |version|
# variable, but there doesn't appear to be any way to use this in a link
# target.
# http://stackoverflow.com/questions/1227037/substitutions-inside-links-in-rest-sphinx
# The extlink extension can be used to do link substitutions, but it requires a
# portion of the url to be literally contained in the document. Therefore,
# this link must be referenced as :current_tarball:`z`
extlinks = {
'current_tarball': (
'https://pypi.python.org/packages/source/t/tornado/tornado-%s.tar.g%%s' % version,
'tornado-%s.tar.g' % version),
}
intersphinx_mapping = {
'python': ('https://docs.python.org/3.4/', None),
}
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# On RTD we can't import sphinx_rtd_theme, but it will be applied by
# default anyway. This block will use the same theme when building locally
# as on RTD.
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
| apache-2.0 |
gfyoung/pandas | pandas/tests/indexes/datetimes/test_scalar_compat.py | 2 | 12213 | """
Tests for DatetimeIndex methods behaving like their Timestamp counterparts
"""
from datetime import datetime
import numpy as np
import pytest
from pandas._libs.tslibs import OutOfBoundsDatetime, to_offset
from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG
import pandas as pd
from pandas import DatetimeIndex, Timestamp, date_range
import pandas._testing as tm
class TestDatetimeIndexOps:
def test_dti_time(self):
rng = date_range("1/1/2000", freq="12min", periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
assert (result == expected).all()
def test_dti_date(self):
rng = date_range("1/1/2000", freq="12H", periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
assert (result == expected).all()
@pytest.mark.parametrize("data", [["1400-01-01"], [datetime(1400, 1, 1)]])
def test_dti_date_out_of_range(self, data):
# GH#1475
msg = "Out of bounds nanosecond timestamp: 1400-01-01 00:00:00"
with pytest.raises(OutOfBoundsDatetime, match=msg):
DatetimeIndex(data)
@pytest.mark.parametrize(
"field",
[
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
],
)
def test_dti_timestamp_fields(self, field):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
expected = getattr(idx, field)[-1]
result = getattr(Timestamp(idx[-1]), field)
assert result == expected
def test_dti_timestamp_isocalendar_fields(self):
idx = tm.makeDateIndex(100)
expected = tuple(idx.isocalendar().iloc[-1].to_list())
result = idx[-1].isocalendar()
assert result == expected
def test_dti_timestamp_freq_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
assert idx.freq == Timestamp(idx[-1], idx.freq).freq
assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr
# ----------------------------------------------------------------
# DatetimeIndex.round
def test_round_daily(self):
dti = date_range("20130101 09:10:11", periods=5)
result = dti.round("D")
expected = date_range("20130101", periods=5)
tm.assert_index_equal(result, expected)
dti = dti.tz_localize("UTC").tz_convert("US/Eastern")
result = dti.round("D")
expected = date_range("20130101", periods=5).tz_localize("US/Eastern")
tm.assert_index_equal(result, expected)
result = dti.round("s")
tm.assert_index_equal(result, dti)
@pytest.mark.parametrize(
"freq, error_msg",
[
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
],
)
def test_round_invalid(self, freq, error_msg):
dti = date_range("20130101 09:10:11", periods=5)
dti = dti.tz_localize("UTC").tz_convert("US/Eastern")
with pytest.raises(ValueError, match=error_msg):
dti.round(freq)
def test_round(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range(start="2016-01-01", periods=5, freq="30Min", tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 01:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 02:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 02:00:00", tz=tz, freq="30T"),
]
)
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq="H"), expected_rng)
assert elt.round(freq="H") == expected_elt
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
rng.round(freq="foo")
with pytest.raises(ValueError, match=msg):
elt.round(freq="foo")
msg = "<MonthEnd> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
rng.round(freq="M")
with pytest.raises(ValueError, match=msg):
elt.round(freq="M")
# GH#14440 & GH#15578
index = DatetimeIndex(["2016-10-17 12:00:00.0015"], tz=tz)
result = index.round("ms")
expected = DatetimeIndex(["2016-10-17 12:00:00.002000"], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ["us", "ns"]:
tm.assert_index_equal(index, index.round(freq))
index = DatetimeIndex(["2016-10-17 12:00:00.00149"], tz=tz)
result = index.round("ms")
expected = DatetimeIndex(["2016-10-17 12:00:00.001000"], tz=tz)
tm.assert_index_equal(result, expected)
index = DatetimeIndex(["2016-10-17 12:00:00.001501031"])
result = index.round("10ns")
expected = DatetimeIndex(["2016-10-17 12:00:00.001501030"])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(False):
ts = "2016-10-17 12:00:00.001501031"
DatetimeIndex([ts]).round("1010ns")
def test_no_rounding_occurs(self, tz_naive_fixture):
# GH 21262
tz = tz_naive_fixture
rng = date_range(start="2016-01-01", periods=5, freq="2Min", tz=tz)
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:02:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:04:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:06:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:08:00", tz=tz, freq="2T"),
]
)
tm.assert_index_equal(rng.round(freq="2T"), expected_rng)
@pytest.mark.parametrize(
"test_input, rounder, freq, expected",
[
(["2117-01-01 00:00:45"], "floor", "15s", ["2117-01-01 00:00:45"]),
(["2117-01-01 00:00:45"], "ceil", "15s", ["2117-01-01 00:00:45"]),
(
["2117-01-01 00:00:45.000000012"],
"floor",
"10ns",
["2117-01-01 00:00:45.000000010"],
),
(
["1823-01-01 00:00:01.000000012"],
"ceil",
"10ns",
["1823-01-01 00:00:01.000000020"],
),
(["1823-01-01 00:00:01"], "floor", "1s", ["1823-01-01 00:00:01"]),
(["1823-01-01 00:00:01"], "ceil", "1s", ["1823-01-01 00:00:01"]),
(["2018-01-01 00:15:00"], "ceil", "15T", ["2018-01-01 00:15:00"]),
(["2018-01-01 00:15:00"], "floor", "15T", ["2018-01-01 00:15:00"]),
(["1823-01-01 03:00:00"], "ceil", "3H", ["1823-01-01 03:00:00"]),
(["1823-01-01 03:00:00"], "floor", "3H", ["1823-01-01 03:00:00"]),
(
("NaT", "1823-01-01 00:00:01"),
"floor",
"1s",
("NaT", "1823-01-01 00:00:01"),
),
(
("NaT", "1823-01-01 00:00:01"),
"ceil",
"1s",
("NaT", "1823-01-01 00:00:01"),
),
],
)
def test_ceil_floor_edge(self, test_input, rounder, freq, expected):
dt = DatetimeIndex(list(test_input))
func = getattr(dt, rounder)
result = func(freq)
expected = DatetimeIndex(list(expected))
assert expected.equals(result)
@pytest.mark.parametrize(
"start, index_freq, periods",
[("2018-01-01", "12H", 25), ("2018-01-01 0:0:0.124999", "1ns", 1000)],
)
@pytest.mark.parametrize(
"round_freq",
[
"2ns",
"3ns",
"4ns",
"5ns",
"6ns",
"7ns",
"250ns",
"500ns",
"750ns",
"1us",
"19us",
"250us",
"500us",
"750us",
"1s",
"2s",
"3s",
"12H",
"1D",
],
)
def test_round_int64(self, start, index_freq, periods, round_freq):
dt = date_range(start=start, freq=index_freq, periods=periods)
unit = to_offset(round_freq).nanos
# test floor
result = dt.floor(round_freq)
diff = dt.asi8 - result.asi8
mod = result.asi8 % unit
assert (mod == 0).all(), f"floor not a {round_freq} multiple"
assert (0 <= diff).all() and (diff < unit).all(), "floor error"
# test ceil
result = dt.ceil(round_freq)
diff = result.asi8 - dt.asi8
mod = result.asi8 % unit
assert (mod == 0).all(), f"ceil not a {round_freq} multiple"
assert (0 <= diff).all() and (diff < unit).all(), "ceil error"
# test round
result = dt.round(round_freq)
diff = abs(result.asi8 - dt.asi8)
mod = result.asi8 % unit
assert (mod == 0).all(), f"round not a {round_freq} multiple"
assert (diff <= unit // 2).all(), "round error"
if unit % 2 == 0:
assert (
result.asi8[diff == unit // 2] % 2 == 0
).all(), "round half to even error"
# ----------------------------------------------------------------
# DatetimeIndex.normalize
def test_normalize(self):
rng = date_range("1/1/2000 9:30", periods=10, freq="D")
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D")
tm.assert_index_equal(result, expected)
arr_ns = np.array([1380585623454345752, 1380585612343234312]).astype(
"datetime64[ns]"
)
rng_ns = DatetimeIndex(arr_ns)
rng_ns_normalized = rng_ns.normalize()
arr_ns = np.array([1380585600000000000, 1380585600000000000]).astype(
"datetime64[ns]"
)
expected = DatetimeIndex(arr_ns)
tm.assert_index_equal(rng_ns_normalized, expected)
assert result.is_normalized
assert not rng.is_normalized
def test_normalize_nat(self):
dti = DatetimeIndex([pd.NaT, Timestamp("2018-01-01 01:00:00")])
result = dti.normalize()
expected = DatetimeIndex([pd.NaT, Timestamp("2018-01-01")])
tm.assert_index_equal(result, expected)
class TestDateTimeIndexToJulianDate:
def test_1700(self):
dr = date_range(start=Timestamp("1710-10-01"), periods=5, freq="D")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_2000(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="D")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_hour(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="H")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_minute(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="T")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_second(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="S")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
| bsd-3-clause |
meredith-digops/ansible | docs/bin/dump_keywords.py | 33 | 2403 | #!/usr/bin/env python
import optparse
import yaml
from jinja2 import Environment, FileSystemLoader
from ansible.playbook import Play
from ansible.playbook.block import Block
from ansible.playbook.role import Role
from ansible.playbook.task import Task
template_file = 'playbooks_keywords.rst.j2'
oblist = {}
clist = []
class_list = [ Play, Role, Block, Task ]
p = optparse.OptionParser(
version='%prog 1.0',
usage='usage: %prog [options]',
description='Generate module documentation from metadata',
)
p.add_option("-T", "--template-dir", action="store", dest="template_dir", default="../templates", help="directory containing Jinja2 templates")
p.add_option("-o", "--output-dir", action="store", dest="output_dir", default='/tmp/', help="Output directory for rst files")
p.add_option("-d", "--docs-source", action="store", dest="docs", default=None, help="Source for attribute docs")
(options, args) = p.parse_args()
for aclass in class_list:
aobj = aclass()
name = type(aobj).__name__
if options.docs:
with open(options.docs) as f:
docs = yaml.safe_load(f)
else:
docs = {}
# build ordered list to loop over and dict with attributes
clist.append(name)
oblist[name] = dict((x, aobj.__dict__['_attributes'][x]) for x in aobj.__dict__['_attributes'] if 'private' not in x or not x.private)
# pick up docs if they exist
for a in oblist[name]:
if a in docs:
oblist[name][a] = docs[a]
else:
oblist[name][a] = ' UNDOCUMENTED!! '
# loop is really with_ for users
if name == 'Task':
oblist[name]['with_<lookup_plugin>'] = 'with_ is how loops are defined, it can use any available lookup plugin to generate the item list'
# local_action is implicit with action
if 'action' in oblist[name]:
oblist[name]['local_action'] = 'Same as action but also implies ``delegate_to: localhost``'
# remove unusable (used to be private?)
for nouse in ('loop', 'loop_args'):
if nouse in oblist[name]:
del oblist[name][nouse]
env = Environment(loader=FileSystemLoader(options.template_dir), trim_blocks=True,)
template = env.get_template(template_file)
outputname = options.output_dir + template_file.replace('.j2','')
tempvars = { 'oblist': oblist, 'clist': clist }
with open( outputname, 'w') as f:
f.write(template.render(tempvars))
| gpl-3.0 |
xiaohaidao007/pandoraBox-SDK-mt7620 | staging_dir/host/lib/scons-2.5.0/SCons/Job.py | 3 | 16096 | """SCons.Job
This module defines the Serial and Parallel classes that execute tasks to
complete a build. The Jobs class provides a higher level interface to start,
stop, and wait on jobs.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Job.py rel_2.5.0:3543:937e55cd78f7 2016/04/09 11:29:54 bdbaddog"
import SCons.compat
import os
import signal
import SCons.Errors
# The default stack size (in kilobytes) of the threads used to execute
# jobs in parallel.
#
# We use a stack size of 256 kilobytes. The default on some platforms
# is too large and prevents us from creating enough threads to fully
# parallelized the build. For example, the default stack size on linux
# is 8 MBytes.
explicit_stack_size = None
default_stack_size = 256
interrupt_msg = 'Build interrupted.'
class InterruptState(object):
def __init__(self):
self.interrupted = False
def set(self):
self.interrupted = True
def __call__(self):
return self.interrupted
class Jobs(object):
"""An instance of this class initializes N jobs, and provides
methods for starting, stopping, and waiting on all N jobs.
"""
def __init__(self, num, taskmaster):
"""
Create 'num' jobs using the given taskmaster.
If 'num' is 1 or less, then a serial job will be used,
otherwise a parallel job with 'num' worker threads will
be used.
The 'num_jobs' attribute will be set to the actual number of jobs
allocated. If more than one job is requested but the Parallel
class can't do it, it gets reset to 1. Wrapping interfaces that
care should check the value of 'num_jobs' after initialization.
"""
self.job = None
if num > 1:
stack_size = explicit_stack_size
if stack_size is None:
stack_size = default_stack_size
try:
self.job = Parallel(taskmaster, num, stack_size)
self.num_jobs = num
except NameError:
pass
if self.job is None:
self.job = Serial(taskmaster)
self.num_jobs = 1
def run(self, postfunc=lambda: None):
"""Run the jobs.
postfunc() will be invoked after the jobs has run. It will be
invoked even if the jobs are interrupted by a keyboard
interrupt (well, in fact by a signal such as either SIGINT,
SIGTERM or SIGHUP). The execution of postfunc() is protected
against keyboard interrupts and is guaranteed to run to
completion."""
self._setup_sig_handler()
try:
self.job.start()
finally:
postfunc()
self._reset_sig_handler()
def were_interrupted(self):
"""Returns whether the jobs were interrupted by a signal."""
return self.job.interrupted()
def _setup_sig_handler(self):
"""Setup an interrupt handler so that SCons can shutdown cleanly in
various conditions:
a) SIGINT: Keyboard interrupt
b) SIGTERM: kill or system shutdown
c) SIGHUP: Controlling shell exiting
We handle all of these cases by stopping the taskmaster. It
turns out that it's very difficult to stop the build process
by throwing asynchronously an exception such as
KeyboardInterrupt. For example, the python Condition
variables (threading.Condition) and queues do not seem to be
asynchronous-exception-safe. It would require adding a whole
bunch of try/finally block and except KeyboardInterrupt all
over the place.
Note also that we have to be careful to handle the case when
SCons forks before executing another process. In that case, we
want the child to exit immediately.
"""
def handler(signum, stack, self=self, parentpid=os.getpid()):
if os.getpid() == parentpid:
self.job.taskmaster.stop()
self.job.interrupted.set()
else:
os._exit(2)
self.old_sigint = signal.signal(signal.SIGINT, handler)
self.old_sigterm = signal.signal(signal.SIGTERM, handler)
try:
self.old_sighup = signal.signal(signal.SIGHUP, handler)
except AttributeError:
pass
def _reset_sig_handler(self):
"""Restore the signal handlers to their previous state (before the
call to _setup_sig_handler()."""
signal.signal(signal.SIGINT, self.old_sigint)
signal.signal(signal.SIGTERM, self.old_sigterm)
try:
signal.signal(signal.SIGHUP, self.old_sighup)
except AttributeError:
pass
class Serial(object):
"""This class is used to execute tasks in series, and is more efficient
than Parallel, but is only appropriate for non-parallel builds. Only
one instance of this class should be in existence at a time.
This class is not thread safe.
"""
def __init__(self, taskmaster):
"""Create a new serial job given a taskmaster.
The taskmaster's next_task() method should return the next task
that needs to be executed, or None if there are no more tasks. The
taskmaster's executed() method will be called for each task when it
is successfully executed, or failed() will be called if it failed to
execute (e.g. execute() raised an exception)."""
self.taskmaster = taskmaster
self.interrupted = InterruptState()
def start(self):
"""Start the job. This will begin pulling tasks from the taskmaster
and executing them, and return when there are no more tasks. If a task
fails to execute (i.e. execute() raises an exception), then the job will
stop."""
while True:
task = self.taskmaster.next_task()
if task is None:
break
try:
task.prepare()
if task.needs_execute():
task.execute()
except:
if self.interrupted():
try:
raise SCons.Errors.BuildError(
task.targets[0], errstr=interrupt_msg)
except:
task.exception_set()
else:
task.exception_set()
# Let the failed() callback function arrange for the
# build to stop if that's appropriate.
task.failed()
else:
task.executed()
task.postprocess()
self.taskmaster.cleanup()
# Trap import failure so that everything in the Job module but the
# Parallel class (and its dependent classes) will work if the interpreter
# doesn't support threads.
try:
import queue
import threading
except ImportError:
pass
else:
class Worker(threading.Thread):
"""A worker thread waits on a task to be posted to its request queue,
dequeues the task, executes it, and posts a tuple including the task
and a boolean indicating whether the task executed successfully. """
def __init__(self, requestQueue, resultsQueue, interrupted):
threading.Thread.__init__(self)
self.setDaemon(1)
self.requestQueue = requestQueue
self.resultsQueue = resultsQueue
self.interrupted = interrupted
self.start()
def run(self):
while True:
task = self.requestQueue.get()
if task is None:
# The "None" value is used as a sentinel by
# ThreadPool.cleanup(). This indicates that there
# are no more tasks, so we should quit.
break
try:
if self.interrupted():
raise SCons.Errors.BuildError(
task.targets[0], errstr=interrupt_msg)
task.execute()
except:
task.exception_set()
ok = False
else:
ok = True
self.resultsQueue.put((task, ok))
class ThreadPool(object):
"""This class is responsible for spawning and managing worker threads."""
def __init__(self, num, stack_size, interrupted):
"""Create the request and reply queues, and 'num' worker threads.
One must specify the stack size of the worker threads. The
stack size is specified in kilobytes.
"""
self.requestQueue = queue.Queue(0)
self.resultsQueue = queue.Queue(0)
try:
prev_size = threading.stack_size(stack_size*1024)
except AttributeError, e:
# Only print a warning if the stack size has been
# explicitly set.
if not explicit_stack_size is None:
msg = "Setting stack size is unsupported by this version of Python:\n " + \
e.args[0]
SCons.Warnings.warn(SCons.Warnings.StackSizeWarning, msg)
except ValueError, e:
msg = "Setting stack size failed:\n " + str(e)
SCons.Warnings.warn(SCons.Warnings.StackSizeWarning, msg)
# Create worker threads
self.workers = []
for _ in range(num):
worker = Worker(self.requestQueue, self.resultsQueue, interrupted)
self.workers.append(worker)
if 'prev_size' in locals():
threading.stack_size(prev_size)
def put(self, task):
"""Put task into request queue."""
self.requestQueue.put(task)
def get(self):
"""Remove and return a result tuple from the results queue."""
return self.resultsQueue.get()
def preparation_failed(self, task):
self.resultsQueue.put((task, False))
def cleanup(self):
"""
Shuts down the thread pool, giving each worker thread a
chance to shut down gracefully.
"""
# For each worker thread, put a sentinel "None" value
# on the requestQueue (indicating that there's no work
# to be done) so that each worker thread will get one and
# terminate gracefully.
for _ in self.workers:
self.requestQueue.put(None)
# Wait for all of the workers to terminate.
#
# If we don't do this, later Python versions (2.4, 2.5) often
# seem to raise exceptions during shutdown. This happens
# in requestQueue.get(), as an assertion failure that
# requestQueue.not_full is notified while not acquired,
# seemingly because the main thread has shut down (or is
# in the process of doing so) while the workers are still
# trying to pull sentinels off the requestQueue.
#
# Normally these terminations should happen fairly quickly,
# but we'll stick a one-second timeout on here just in case
# someone gets hung.
for worker in self.workers:
worker.join(1.0)
self.workers = []
class Parallel(object):
"""This class is used to execute tasks in parallel, and is somewhat
less efficient than Serial, but is appropriate for parallel builds.
This class is thread safe.
"""
def __init__(self, taskmaster, num, stack_size):
"""Create a new parallel job given a taskmaster.
The taskmaster's next_task() method should return the next
task that needs to be executed, or None if there are no more
tasks. The taskmaster's executed() method will be called
for each task when it is successfully executed, or failed()
will be called if the task failed to execute (i.e. execute()
raised an exception).
Note: calls to taskmaster are serialized, but calls to
execute() on distinct tasks are not serialized, because
that is the whole point of parallel jobs: they can execute
multiple tasks simultaneously. """
self.taskmaster = taskmaster
self.interrupted = InterruptState()
self.tp = ThreadPool(num, stack_size, self.interrupted)
self.maxjobs = num
def start(self):
"""Start the job. This will begin pulling tasks from the
taskmaster and executing them, and return when there are no
more tasks. If a task fails to execute (i.e. execute() raises
an exception), then the job will stop."""
jobs = 0
while True:
# Start up as many available tasks as we're
# allowed to.
while jobs < self.maxjobs:
task = self.taskmaster.next_task()
if task is None:
break
try:
# prepare task for execution
task.prepare()
except:
task.exception_set()
task.failed()
task.postprocess()
else:
if task.needs_execute():
# dispatch task
self.tp.put(task)
jobs = jobs + 1
else:
task.executed()
task.postprocess()
if not task and not jobs: break
# Let any/all completed tasks finish up before we go
# back and put the next batch of tasks on the queue.
while True:
task, ok = self.tp.get()
jobs = jobs - 1
if ok:
task.executed()
else:
if self.interrupted():
try:
raise SCons.Errors.BuildError(
task.targets[0], errstr=interrupt_msg)
except:
task.exception_set()
# Let the failed() callback function arrange
# for the build to stop if that's appropriate.
task.failed()
task.postprocess()
if self.tp.resultsQueue.empty():
break
self.tp.cleanup()
self.taskmaster.cleanup()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
SwainLi/TeamTalk | win-client/3rdParty/src/json/amalgamate.py | 127 | 6807 | """Amalgate json-cpp library sources into a single source and header file.
Requires Python 2.6
Example of invocation (must be invoked from json-cpp top directory):
python amalgate.py
"""
import os
import os.path
import sys
class AmalgamationFile:
def __init__( self, top_dir ):
self.top_dir = top_dir
self.blocks = []
def add_text( self, text ):
if not text.endswith( '\n' ):
text += '\n'
self.blocks.append( text )
def add_file( self, relative_input_path, wrap_in_comment=False ):
def add_marker( prefix ):
self.add_text( '' )
self.add_text( '// ' + '/'*70 )
self.add_text( '// %s of content of file: %s' % (prefix, relative_input_path.replace('\\','/')) )
self.add_text( '// ' + '/'*70 )
self.add_text( '' )
add_marker( 'Beginning' )
f = open( os.path.join( self.top_dir, relative_input_path ), 'rt' )
content = f.read()
if wrap_in_comment:
content = '/*\n' + content + '\n*/'
self.add_text( content )
f.close()
add_marker( 'End' )
self.add_text( '\n\n\n\n' )
def get_value( self ):
return ''.join( self.blocks ).replace('\r\n','\n')
def write_to( self, output_path ):
output_dir = os.path.dirname( output_path )
if output_dir and not os.path.isdir( output_dir ):
os.makedirs( output_dir )
f = open( output_path, 'wb' )
f.write( self.get_value() )
f.close()
def amalgamate_source( source_top_dir=None,
target_source_path=None,
header_include_path=None ):
"""Produces amalgated source.
Parameters:
source_top_dir: top-directory
target_source_path: output .cpp path
header_include_path: generated header path relative to target_source_path.
"""
print 'Amalgating header...'
header = AmalgamationFile( source_top_dir )
header.add_text( '/// Json-cpp amalgated header (http://jsoncpp.sourceforge.net/).' )
header.add_text( '/// It is intented to be used with #include <%s>' % header_include_path )
header.add_file( 'LICENSE', wrap_in_comment=True )
header.add_text( '#ifndef JSON_AMALGATED_H_INCLUDED' )
header.add_text( '# define JSON_AMALGATED_H_INCLUDED' )
header.add_text( '/// If defined, indicates that the source file is amalgated' )
header.add_text( '/// to prevent private header inclusion.' )
header.add_text( '#define JSON_IS_AMALGAMATION' )
#header.add_file( 'include/json/version.h' )
header.add_file( 'include/json/config.h' )
header.add_file( 'include/json/forwards.h' )
header.add_file( 'include/json/features.h' )
header.add_file( 'include/json/value.h' )
header.add_file( 'include/json/reader.h' )
header.add_file( 'include/json/writer.h' )
header.add_file( 'include/json/assertions.h' )
header.add_text( '#endif //ifndef JSON_AMALGATED_H_INCLUDED' )
target_header_path = os.path.join( os.path.dirname(target_source_path), header_include_path )
print 'Writing amalgated header to %r' % target_header_path
header.write_to( target_header_path )
base, ext = os.path.splitext( header_include_path )
forward_header_include_path = base + '-forwards' + ext
print 'Amalgating forward header...'
header = AmalgamationFile( source_top_dir )
header.add_text( '/// Json-cpp amalgated forward header (http://jsoncpp.sourceforge.net/).' )
header.add_text( '/// It is intented to be used with #include <%s>' % forward_header_include_path )
header.add_text( '/// This header provides forward declaration for all JsonCpp types.' )
header.add_file( 'LICENSE', wrap_in_comment=True )
header.add_text( '#ifndef JSON_FORWARD_AMALGATED_H_INCLUDED' )
header.add_text( '# define JSON_FORWARD_AMALGATED_H_INCLUDED' )
header.add_text( '/// If defined, indicates that the source file is amalgated' )
header.add_text( '/// to prevent private header inclusion.' )
header.add_text( '#define JSON_IS_AMALGAMATION' )
header.add_file( 'include/json/config.h' )
header.add_file( 'include/json/forwards.h' )
header.add_text( '#endif //ifndef JSON_FORWARD_AMALGATED_H_INCLUDED' )
target_forward_header_path = os.path.join( os.path.dirname(target_source_path),
forward_header_include_path )
print 'Writing amalgated forward header to %r' % target_forward_header_path
header.write_to( target_forward_header_path )
print 'Amalgating source...'
source = AmalgamationFile( source_top_dir )
source.add_text( '/// Json-cpp amalgated source (http://jsoncpp.sourceforge.net/).' )
source.add_text( '/// It is intented to be used with #include "%s"' % target_source_path )
source.add_file( 'LICENSE', wrap_in_comment=True )
source.add_text( '' )
source.add_text( '#include "%s"' % header_include_path )
source.add_text( '' )
lib_json = 'src/lib_json'
source.add_file( os.path.join(lib_json, 'json_tool.h') )
source.add_file( os.path.join(lib_json, 'json_reader.cpp') )
source.add_file( os.path.join(lib_json, 'json_batchallocator.h') )
source.add_file( os.path.join(lib_json, 'json_valueiterator.inl') )
source.add_file( os.path.join(lib_json, 'json_value.cpp') )
source.add_file( os.path.join(lib_json, 'json_writer.cpp') )
print 'Writing amalgated source to %r' % target_source_path
source.write_to( target_source_path )
def main():
usage = """%prog [options]
Generate a single amalgated source and header file from the sources.
"""
from optparse import OptionParser
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('-s', '--source', dest="target_source_path", action='store', default='dist/jsoncpp.cpp',
help="""Output .cpp source path. [Default: %default]""")
parser.add_option('-i', '--include', dest="header_include_path", action='store', default='json/json.h',
help="""Header include path. Used to include the header from the amalgated source file. [Default: %default]""")
parser.add_option('-t', '--top-dir', dest="top_dir", action='store', default=os.getcwd(),
help="""Source top-directory. [Default: %default]""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
msg = amalgamate_source( source_top_dir=options.top_dir,
target_source_path=options.target_source_path,
header_include_path=options.header_include_path )
if msg:
sys.stderr.write( msg + '\n' )
sys.exit( 1 )
else:
print 'Source succesfully amalagated'
if __name__ == '__main__':
main()
| apache-2.0 |
eesatfan/openpli-enigma2 | lib/python/Tools/NumericalTextInput.py | 16 | 3334 | # -*- coding: UTF-8 -*-
from enigma import eTimer
from Components.Language import language
# Dict languageCode -> array of strings
MAP_SEARCH = (
u"%_0",
u" 1",
u"abc2",
u"def3",
u"ghi4",
u"jkl5",
u"mno6",
u"pqrs7",
u"tuv8",
u"wxyz9",
)
MAP_SEARCH_UPCASE = (
U"0%_",
U"1 ",
U"ABC2",
U"DEF3",
U"GHI4",
U"JKL5",
U"MNO6",
U"PQRS7",
U"TUV8",
U"WXYZ9",
)
MAP_DEFAULT = (
u"0,?!&@=*'+\"()$~%",
u" 1.:;/-_",
u"abc2ABC",
u"def3DEF",
u"ghi4GHI",
u"jkl5JKL",
u"mno6MNO",
u"pqrs7PQRS",
u"tuv8TUV",
u"wxyz9WXYZ",
)
MAP_DE = (
u"0,?!&@=*'+\"()$~%",
u" 1.:;/-_",
u"abcä2ABCÄ",
u"def3DEF",
u"ghi4GHI",
u"jkl5JKL",
u"mnoö6MNOÖ",
u"pqrsß7PQRSß",
u"tuvü8TUVÜ",
u"wxyz9WXYZ",
)
MAP_ES = (
u"0,?!&@=*'+\"()$~%",
u" 1.:;/-_",
u"abcáà2ABCÁÀ",
u"deéèf3DEFÉÈ",
u"ghiíì4GHIÍÌ",
u"jkl5JKL",
u"mnñoóò6MNÑOÓÒ",
u"pqrs7PQRS",
u"tuvúù8TUVÚÙ",
u"wxyz9WXYZ",
)
MAP_SE = (
u"0,?!&@=*'+\"()$~%",
u" 1.:;/-_",
u"abcåä2ABCÅÄ",
u"defé3DEFÉ",
u"ghi4GHI",
u"jkl5JKL",
u"mnoö6MNOÖ",
u"pqrs7PQRS",
u"tuv8TUV",
u"wxyz9WXYZ",
)
MAP_CZ = (
u"0,?'+\"()@$!=&*%",
u" 1.:;/-_",
u"abc2áäčABCÁÄČ",
u"def3ďéěDEFĎÉĚ",
u"ghi4íGHIÍ",
u"jkl5ľĺJKLĽĹ",
u"mno6ňóöôMNOŇÓÖÔ",
u"pqrs7řŕšPQRSŘŔŠ",
u"tuv8ťúůüTUVŤÚŮÜ",
u"wxyz9ýžWXYZÝŽ",
)
MAP_PL = (
u"0,?'+\"()@$!=&*%",
u" 1.:;/-_",
u"abcąć2ABCĄĆ",
u"defę3DEFĘ",
u"ghi4GHI",
u"jklł5JKLŁ",
u"mnońó6MNOŃÓ",
u"pqrsś7PQRSŚ",
u"tuv8TUV",
u"wxyzźż9WXYZŹŻ",
)
MAP_RU = (
u"0,?'+\"()@$!=&*%",
u" 1.:;/-_",
u"abcабвг2ABCАБВГ",
u"defдежз3DEFДЕЖЗ",
u"ghiийкл4GHIИЙКЛ",
u"jklмноп5JKLМНОП",
u"mnoрсту6MNOРСТУ",
u"pqrsфхцч7PQRSФХЦЧ",
u"tuvшщьы8TUVШЩЬЫ",
u"wxyzъэюя9WXYZЪЭЮЯ",
)
MAPPINGS = {
'de_DE': MAP_DE,
'es_ES': MAP_ES,
'sv_SE': MAP_SE,
'fi_FI': MAP_SE,
'cs_CZ': MAP_CZ,
'sk_SK': MAP_CZ,
'pl_PL': MAP_PL,
'ru_RU': MAP_RU,
}
class NumericalTextInput:
def __init__(self, nextFunc=None, handleTimeout = True, search = False, mapping = None):
self.useableChars=None
self.nextFunction=nextFunc
if handleTimeout:
self.timer = eTimer()
self.timer.callback.append(self.timeout)
else:
self.timer = None
self.lastKey = -1
self.pos = -1
if mapping is not None:
self.mapping = mapping
elif search:
self.mapping = MAP_SEARCH
else:
self.mapping = MAPPINGS.get(language.getLanguage(), MAP_DEFAULT)
def setUseableChars(self, useable):
self.useableChars = unicode(useable)
def getKey(self, num):
cnt=0
if self.lastKey != num:
if self.lastKey != -1:
self.nextChar()
self.lastKey = num
self.pos = -1
if self.timer is not None:
self.timer.start(1000, True)
while True:
self.pos += 1
if len(self.mapping[num]) <= self.pos:
self.pos = 0
if self.useableChars:
pos = self.useableChars.find(self.mapping[num][self.pos])
if pos == -1:
cnt += 1
if cnt < len(self.mapping[num]):
continue
else:
return None
break
return self.mapping[num][self.pos]
def nextKey(self):
if self.timer is not None:
self.timer.stop()
self.lastKey = -1
def nextChar(self):
self.nextKey()
if self.nextFunction:
self.nextFunction()
def timeout(self):
if self.lastKey != -1:
self.nextChar()
| gpl-2.0 |
glove747/liberty-neutron | neutron/tests/unit/extensions/test_external_net.py | 6 | 7741 | # Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_log import log as logging
from oslo_utils import uuidutils
import testtools
from webob import exc
from neutron import context
from neutron.db import models_v2
from neutron.extensions import external_net as external_net
from neutron import manager
from neutron.tests.unit.api.v2 import test_base
from neutron.tests.unit.db import test_db_base_plugin_v2
LOG = logging.getLogger(__name__)
_uuid = uuidutils.generate_uuid
_get_path = test_base._get_path
class ExtNetTestExtensionManager(object):
def get_resources(self):
return []
def get_actions(self):
return []
def get_request_extensions(self):
return []
class ExtNetDBTestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
def _create_network(self, fmt, name, admin_state_up, **kwargs):
"""Override the routine for allowing the router:external attribute."""
# attributes containing a colon should be passed with
# a double underscore
new_args = dict(zip(map(lambda x: x.replace('__', ':'), kwargs),
kwargs.values()))
arg_list = new_args.pop('arg_list', ()) + (external_net.EXTERNAL,)
return super(ExtNetDBTestCase, self)._create_network(
fmt, name, admin_state_up, arg_list=arg_list, **new_args)
def setUp(self):
plugin = 'neutron.tests.unit.extensions.test_l3.TestNoL3NatPlugin'
ext_mgr = ExtNetTestExtensionManager()
super(ExtNetDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
def _set_net_external(self, net_id):
self._update('networks', net_id,
{'network': {external_net.EXTERNAL: True}})
def test_list_nets_external(self):
with self.network() as n1:
self._set_net_external(n1['network']['id'])
with self.network():
body = self._list('networks')
self.assertEqual(len(body['networks']), 2)
body = self._list('networks',
query_params="%s=True" %
external_net.EXTERNAL)
self.assertEqual(len(body['networks']), 1)
body = self._list('networks',
query_params="%s=False" %
external_net.EXTERNAL)
self.assertEqual(len(body['networks']), 1)
def test_list_nets_external_pagination(self):
if self._skip_native_pagination:
self.skipTest("Skip test for not implemented pagination feature")
with self.network(name='net1') as n1, self.network(name='net3') as n3:
self._set_net_external(n1['network']['id'])
self._set_net_external(n3['network']['id'])
with self.network(name='net2') as n2:
self._test_list_with_pagination(
'network', (n1, n3), ('name', 'asc'), 1, 3,
query_params='router:external=True')
self._test_list_with_pagination(
'network', (n2, ), ('name', 'asc'), 1, 2,
query_params='router:external=False')
def test_get_network_succeeds_without_filter(self):
plugin = manager.NeutronManager.get_plugin()
ctx = context.Context(None, None, is_admin=True)
result = plugin.get_networks(ctx, filters=None)
self.assertEqual(result, [])
def test_update_network_set_external_non_admin_fails(self):
# Assert that a non-admin user cannot update the
# router:external attribute
with self.network(tenant_id='noadmin') as network:
data = {'network': {'router:external': True}}
req = self.new_update_request('networks',
data,
network['network']['id'])
req.environ['neutron.context'] = context.Context('', 'noadmin')
res = req.get_response(self.api)
self.assertEqual(exc.HTTPForbidden.code, res.status_int)
def test_network_filter_hook_admin_context(self):
plugin = manager.NeutronManager.get_plugin()
ctx = context.Context(None, None, is_admin=True)
model = models_v2.Network
conditions = plugin._network_filter_hook(ctx, model, [])
self.assertEqual(conditions, [])
def test_network_filter_hook_nonadmin_context(self):
plugin = manager.NeutronManager.get_plugin()
ctx = context.Context('edinson', 'cavani')
model = models_v2.Network
txt = "externalnetworks.network_id IS NOT NULL"
conditions = plugin._network_filter_hook(ctx, model, [])
self.assertEqual(conditions.__str__(), txt)
# Try to concatenate conditions
conditions = plugin._network_filter_hook(ctx, model, conditions)
self.assertEqual(conditions.__str__(), "%s OR %s" % (txt, txt))
def test_create_port_external_network_non_admin_fails(self):
with self.network(router__external=True) as ext_net:
with self.subnet(network=ext_net) as ext_subnet:
with testtools.ExpectedException(
exc.HTTPClientError) as ctx_manager:
with self.port(subnet=ext_subnet,
set_context='True',
tenant_id='noadmin'):
pass
self.assertEqual(ctx_manager.exception.code, 403)
def test_create_port_external_network_admin_succeeds(self):
with self.network(router__external=True) as ext_net:
with self.subnet(network=ext_net) as ext_subnet:
with self.port(subnet=ext_subnet) as port:
self.assertEqual(port['port']['network_id'],
ext_net['network']['id'])
def test_create_external_network_non_admin_fails(self):
with testtools.ExpectedException(exc.HTTPClientError) as ctx_manager:
with self.network(router__external=True,
set_context='True',
tenant_id='noadmin'):
pass
self.assertEqual(ctx_manager.exception.code, 403)
def test_create_external_network_admin_succeeds(self):
with self.network(router__external=True) as ext_net:
self.assertTrue(ext_net['network'][external_net.EXTERNAL])
def test_delete_network_check_disassociated_floatingips(self):
with mock.patch.object(manager.NeutronManager,
'get_service_plugins') as srv_plugins:
l3_mock = mock.Mock()
srv_plugins.return_value = {'L3_ROUTER_NAT': l3_mock}
with self.network() as net:
req = self.new_delete_request('networks', net['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, exc.HTTPNoContent.code)
(l3_mock.delete_disassociated_floatingips
.assert_called_once_with(mock.ANY, net['network']['id']))
| apache-2.0 |
lgarren/spack | lib/spack/external/_pytest/recwarn.py | 10 | 7361 | """ recording warnings during test function execution. """
import inspect
import _pytest._code
import py
import sys
import warnings
import pytest
@pytest.yield_fixture
def recwarn(request):
"""Return a WarningsRecorder instance that provides these methods:
* ``pop(category=None)``: return last warning matching the category.
* ``clear()``: clear list of warnings
See http://docs.python.org/library/warnings.html for information
on warning categories.
"""
wrec = WarningsRecorder()
with wrec:
warnings.simplefilter('default')
yield wrec
def pytest_namespace():
return {'deprecated_call': deprecated_call,
'warns': warns}
def deprecated_call(func=None, *args, **kwargs):
""" assert that calling ``func(*args, **kwargs)`` triggers a
``DeprecationWarning`` or ``PendingDeprecationWarning``.
This function can be used as a context manager::
>>> import warnings
>>> def api_call_v2():
... warnings.warn('use v3 of this api', DeprecationWarning)
... return 200
>>> with deprecated_call():
... assert api_call_v2() == 200
Note: we cannot use WarningsRecorder here because it is still subject
to the mechanism that prevents warnings of the same type from being
triggered twice for the same module. See #1190.
"""
if not func:
return WarningsChecker(expected_warning=DeprecationWarning)
categories = []
def warn_explicit(message, category, *args, **kwargs):
categories.append(category)
old_warn_explicit(message, category, *args, **kwargs)
def warn(message, category=None, *args, **kwargs):
if isinstance(message, Warning):
categories.append(message.__class__)
else:
categories.append(category)
old_warn(message, category, *args, **kwargs)
old_warn = warnings.warn
old_warn_explicit = warnings.warn_explicit
warnings.warn_explicit = warn_explicit
warnings.warn = warn
try:
ret = func(*args, **kwargs)
finally:
warnings.warn_explicit = old_warn_explicit
warnings.warn = old_warn
deprecation_categories = (DeprecationWarning, PendingDeprecationWarning)
if not any(issubclass(c, deprecation_categories) for c in categories):
__tracebackhide__ = True
raise AssertionError("%r did not produce DeprecationWarning" % (func,))
return ret
def warns(expected_warning, *args, **kwargs):
"""Assert that code raises a particular class of warning.
Specifically, the input @expected_warning can be a warning class or
tuple of warning classes, and the code must return that warning
(if a single class) or one of those warnings (if a tuple).
This helper produces a list of ``warnings.WarningMessage`` objects,
one for each warning raised.
This function can be used as a context manager, or any of the other ways
``pytest.raises`` can be used::
>>> with warns(RuntimeWarning):
... warnings.warn("my warning", RuntimeWarning)
"""
wcheck = WarningsChecker(expected_warning)
if not args:
return wcheck
elif isinstance(args[0], str):
code, = args
assert isinstance(code, str)
frame = sys._getframe(1)
loc = frame.f_locals.copy()
loc.update(kwargs)
with wcheck:
code = _pytest._code.Source(code).compile()
py.builtin.exec_(code, frame.f_globals, loc)
else:
func = args[0]
with wcheck:
return func(*args[1:], **kwargs)
class RecordedWarning(object):
def __init__(self, message, category, filename, lineno, file, line):
self.message = message
self.category = category
self.filename = filename
self.lineno = lineno
self.file = file
self.line = line
class WarningsRecorder(object):
"""A context manager to record raised warnings.
Adapted from `warnings.catch_warnings`.
"""
def __init__(self, module=None):
self._module = sys.modules['warnings'] if module is None else module
self._entered = False
self._list = []
@property
def list(self):
"""The list of recorded warnings."""
return self._list
def __getitem__(self, i):
"""Get a recorded warning by index."""
return self._list[i]
def __iter__(self):
"""Iterate through the recorded warnings."""
return iter(self._list)
def __len__(self):
"""The number of recorded warnings."""
return len(self._list)
def pop(self, cls=Warning):
"""Pop the first recorded warning, raise exception if not exists."""
for i, w in enumerate(self._list):
if issubclass(w.category, cls):
return self._list.pop(i)
__tracebackhide__ = True
raise AssertionError("%r not found in warning list" % cls)
def clear(self):
"""Clear the list of recorded warnings."""
self._list[:] = []
def __enter__(self):
if self._entered:
__tracebackhide__ = True
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
def showwarning(message, category, filename, lineno,
file=None, line=None):
self._list.append(RecordedWarning(
message, category, filename, lineno, file, line))
# still perform old showwarning functionality
self._showwarning(
message, category, filename, lineno, file=file, line=line)
self._module.showwarning = showwarning
# allow the same warning to be raised more than once
self._module.simplefilter('always')
return self
def __exit__(self, *exc_info):
if not self._entered:
__tracebackhide__ = True
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
class WarningsChecker(WarningsRecorder):
def __init__(self, expected_warning=None, module=None):
super(WarningsChecker, self).__init__(module=module)
msg = ("exceptions must be old-style classes or "
"derived from Warning, not %s")
if isinstance(expected_warning, tuple):
for exc in expected_warning:
if not inspect.isclass(exc):
raise TypeError(msg % type(exc))
elif inspect.isclass(expected_warning):
expected_warning = (expected_warning,)
elif expected_warning is not None:
raise TypeError(msg % type(expected_warning))
self.expected_warning = expected_warning
def __exit__(self, *exc_info):
super(WarningsChecker, self).__exit__(*exc_info)
# only check if we're not currently handling an exception
if all(a is None for a in exc_info):
if self.expected_warning is not None:
if not any(r.category in self.expected_warning for r in self):
__tracebackhide__ = True
pytest.fail("DID NOT WARN")
| lgpl-2.1 |
jjanssen/django-cms-patches | cms/admin/useradmin.py | 2 | 2246 | from django.conf import settings
from cms.admin.forms import PageUserForm, PageUserGroupForm
from cms.admin.permissionadmin import GenericCmsPermissionAdmin
from cms.exceptions import NoPermissionsException
from cms.models import PageUser, PageUserGroup
from cms.utils.permissions import get_subordinate_users
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.utils.translation import ugettext as _
class PageUserAdmin(UserAdmin, GenericCmsPermissionAdmin):
form = PageUserForm
model = PageUser
list_display = ('username', 'email', 'first_name', 'last_name', 'created_by')
# get_fieldsets method may add fieldsets depending on user
fieldsets = [
(None, {'fields': ('username', ('password1', 'password2'), 'notify_user')}),
(_('User details'), {'fields': (('first_name', 'last_name'), 'email')}),
(_('Groups'), {'fields': ('groups',)}),
]
def get_fieldsets(self, request, obj=None):
fieldsets = self.update_permission_fieldsets(request, obj)
if not '/add' in request.path:
fieldsets[0] = (None, {'fields': ('username', 'notify_user')})
fieldsets.append((_('Password'), {'fields': ('password1', 'password2'), 'classes': ('collapse',)}))
return fieldsets
def queryset(self, request):
qs = super(PageUserAdmin, self).queryset(request)
try:
user_id_set = get_subordinate_users(request.user).values_list('id', flat=True)
return qs.filter(pk__in=user_id_set)
except NoPermissionsException:
return self.model.objects.get_empty_query_set()
def add_view(self, request):
return super(UserAdmin, self).add_view(request)
class PageUserGroupAdmin(admin.ModelAdmin, GenericCmsPermissionAdmin):
form = PageUserGroupForm
list_display = ('name', 'created_by')
fieldsets = [
(None, {'fields': ('name',)}),
]
def get_fieldsets(self, request, obj=None):
return self.update_permission_fieldsets(request, obj)
if settings.CMS_PERMISSION:
admin.site.register(PageUser, PageUserAdmin)
admin.site.register(PageUserGroup, PageUserGroupAdmin) | bsd-3-clause |
UniMOOC/AAClassroom | modules/dashboard/course_settings.py | 5 | 15703 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes supporting updates to basic course settings."""
__author__ = 'Abhinav Khandelwal ([email protected])'
import cgi
import urllib
from common import schema_fields
from controllers.utils import ApplicationHandler
from controllers.utils import BaseRESTHandler
from controllers.utils import XsrfTokenManager
from models import courses
from models import models
from models import roles
from models import transforms
from modules.dashboard import filer
from modules.dashboard import messages
from modules.oeditor import oeditor
class CourseSettingsRights(object):
"""Manages view/edit rights for files."""
@classmethod
def can_view(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_edit(cls, handler):
return roles.Roles.is_course_admin(handler.app_context)
@classmethod
def can_delete(cls, handler):
return cls.can_edit(handler)
@classmethod
def can_add(cls, handler):
return cls.can_edit(handler)
class CourseSettingsHandler(ApplicationHandler):
"""Course settings handler."""
EXTRA_CSS_FILES = []
EXTRA_JS_FILES = []
ADDITIONAL_DIRS = []
def post_course_availability(self):
course = self.get_course()
settings = course.get_environ(self.app_context)
availability = self.request.get('availability') == 'True'
settings['course']['now_available'] = availability
course.save_settings(settings)
self.redirect('/dashboard')
def post_course_browsability(self):
course = self.get_course()
settings = course.get_environ(self.app_context)
browsability = self.request.get('browsability') == 'True'
settings['course']['browsable'] = browsability
course.save_settings(settings)
self.redirect('/dashboard')
def post_edit_course_settings(self):
"""Handles editing of course.yaml."""
filer.create_course_file_if_not_exists(self)
extra_args = {}
for name in ('section_names', 'tab', 'tab_title'):
value = self.request.get(name)
if value:
extra_args[name] = value
self.redirect(self.get_action_url(
'edit_basic_settings', key='/course.yaml', extra_args=extra_args))
def get_edit_basic_settings(self):
"""Shows editor for course.yaml."""
key = self.request.get('key')
# The editor for all course settings is getting rather large. Here,
# prune out all sections except the one named. Names can name either
# entire sub-registries, or a single item. E.g., "course" selects all
# items under the 'course' sub-registry, while
# "base:before_head_tag_ends" selects just that one field.
registry = self.get_course().create_settings_schema()
section_names = urllib.unquote(self.request.get('section_names'))
if section_names:
registry = registry.clone_only_items_named(section_names.split(','))
tab = self.request.get('tab')
exit_url = self.canonicalize_url('/dashboard?action=settings&tab=%s' %
tab)
rest_url = self.canonicalize_url(CourseSettingsRESTHandler.URI)
form_html = oeditor.ObjectEditor.get_html_for(
self, registry.get_json_schema(), registry.get_schema_dict(),
key, rest_url, exit_url, extra_css_files=self.EXTRA_CSS_FILES,
extra_js_files=self.EXTRA_JS_FILES,
additional_dirs=self.ADDITIONAL_DIRS,
required_modules=CourseSettingsRESTHandler.REQUIRED_MODULES)
template_values = {
'page_title': self.format_title(
'Settings > %s' %
urllib.unquote(self.request.get('tab_title'))),
'page_description': messages.EDIT_SETTINGS_DESCRIPTION,
'main_content': form_html,
}
self.render_page(template_values, in_action='settings')
class CourseYamlRESTHandler(BaseRESTHandler):
"""Common base for REST handlers in this file."""
def get_course_dict(self):
return self.get_course().get_environ(self.app_context)
def get(self):
"""Handles REST GET verb and returns an object as JSON payload."""
assert self.app_context.is_editable_fs()
key = self.request.get('key')
if not CourseSettingsRights.can_view(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
# Load data if possible.
fs = self.app_context.fs.impl
filename = fs.physical_to_logical('/course.yaml')
try:
stream = fs.get(filename)
except: # pylint: disable=bare-except
stream = None
if not stream:
transforms.send_json_response(
self, 404, 'Object not found.', {'key': key})
return
# Prepare data.
json_payload = self.process_get()
transforms.send_json_response(
self, 200, 'Success.',
payload_dict=json_payload,
xsrf_token=XsrfTokenManager.create_xsrf_token(self.XSRF_ACTION))
def put(self):
"""Handles REST PUT verb with JSON payload."""
assert self.app_context.is_editable_fs()
request_param = self.request.get('request')
if not request_param:
transforms.send_json_response(
self, 400, 'Missing "request" parameter.')
return
try:
request = transforms.loads(request_param)
except ValueError:
transforms.send_json_response(
self, 400, 'Malformed "request" parameter.')
return
key = request.get('key')
if not key:
transforms.send_json_response(
self, 400, 'Request missing "key" parameter.')
return
payload_param = request.get('payload')
if not payload_param:
transforms.send_json_response(
self, 400, 'Request missing "payload" parameter.')
return
try:
payload = transforms.loads(payload_param)
except ValueError:
transforms.send_json_response(
self, 400, 'Malformed "payload" parameter.')
return
if not self.assert_xsrf_token_or_fail(
request, self.XSRF_ACTION, {'key': key}):
return
if not CourseSettingsRights.can_edit(self):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
request_data = self.process_put(request, payload)
if request_data:
course_settings = courses.deep_dict_merge(
request_data, self.get_course_dict())
if not self.get_course().save_settings(course_settings):
transforms.send_json_response(self, 412, 'Validation error.')
transforms.send_json_response(self, 200, 'Saved.')
def delete(self):
"""Handles REST DELETE verb with JSON payload."""
key = self.request.get('key')
if not self.assert_xsrf_token_or_fail(
self.request, self.XSRF_ACTION, {'key': key}):
return
if (not CourseSettingsRights.can_delete(self) or
not self.is_deletion_allowed()):
transforms.send_json_response(
self, 401, 'Access denied.', {'key': key})
return
entity = self.process_delete()
if self.get_course().save_settings(entity):
transforms.send_json_response(self, 200, 'Deleted.')
class CourseSettingsRESTHandler(CourseYamlRESTHandler):
"""Provides REST API for a file."""
REQUIRED_MODULES = [
'inputex-date', 'inputex-string', 'inputex-textarea', 'inputex-url',
'inputex-checkbox', 'inputex-select', 'inputex-uneditable', 'gcb-rte']
URI = '/rest/course/settings'
XSRF_ACTION = 'basic-course-settings-put'
def get_group_id(self, email):
if not email or '@googlegroups.com' not in email:
return None
return email.split('@')[0]
def get_groups_web_url(self, email):
group_id = self.get_group_id(email)
if not group_id:
return None
return 'https://groups.google.com/group/' + group_id
def get_groups_embed_url(self, email):
group_id = self.get_group_id(email)
if not group_id:
return None
return 'https://groups.google.com/forum/embed/?place=forum/' + group_id
def process_get(self):
entity = {}
schema = self.get_course().create_settings_schema()
schema.convert_entity_to_json_entity(
self.get_course_dict(), entity)
json_payload = transforms.dict_to_json(
entity, schema.get_json_schema_dict())
return json_payload
def _process_course_data(self, course_data):
if 'forum_email' in course_data:
forum_email = course_data['forum_email']
forum_web_url = self.get_groups_web_url(forum_email)
if forum_web_url:
course_data['forum_url'] = forum_web_url
forum_web_url = self.get_groups_embed_url(forum_email)
if forum_web_url:
course_data['forum_embed_url'] = forum_web_url
if 'announcement_list_email' in course_data:
announcement_email = course_data['announcement_list_email']
announcement_web_url = self.get_groups_web_url(
announcement_email)
if announcement_web_url:
course_data['announcement_list_url'] = announcement_web_url
def _process_extra_locales(self, extra_locales):
"""Make sure each locale has a label to go along."""
existing = set([
label.title for label in models.LabelDAO.get_all_of_type(
models.LabelDTO.LABEL_TYPE_LOCALE)])
course_locale = self.app_context.default_locale
for extra_locale in extra_locales + [{'locale': course_locale}]:
locale = extra_locale['locale']
if locale in existing:
continue
models.LabelDAO.save(models.LabelDTO(
None, {'title': locale,
'version': '1.0',
'description': '[%s] locale' % locale,
'type': models.LabelDTO.LABEL_TYPE_LOCALE}))
def process_put(self, request, payload):
errors = []
request_data = {}
schema = self.get_course().create_settings_schema()
schema.convert_json_to_entity(payload, request_data)
schema.validate(request_data, errors)
if errors:
transforms.send_json_response(
self, 400, 'Invalid data: \n' + '\n'.join(errors))
return
if 'extra_locales' in request_data:
self._process_extra_locales(request_data['extra_locales'])
if 'course' in request_data:
self._process_course_data(request_data['course'])
return request_data
def is_deletion_allowed(self):
return False
class HtmlHookHandler(ApplicationHandler):
"""Set up for OEditor manipulation of HTML hook contents.
A separate handler and REST handler is required for hook contents,
since the set of hooks is not statically known. Users are free to add
whatever hooks they want where-ever they want with fairly arbitrary
names. This class and its companion REST class deal with persisting the
hook values into the course.yaml settings.
"""
def post_edit_html_hook(self):
filer.create_course_file_if_not_exists(self)
self.redirect(self.get_action_url(
'edit_html_hook', key=self.request.get('html_hook')))
def get_edit_html_hook(self):
key = self.request.get('key')
registry = HtmlHookRESTHandler.REGISTRY
exit_url = self.canonicalize_url(self.request.referer)
rest_url = self.canonicalize_url(HtmlHookRESTHandler.URI)
delete_url = '%s?%s' % (
self.canonicalize_url(HtmlHookRESTHandler.URI),
urllib.urlencode({
'key': key,
'xsrf_token': cgi.escape(
self.create_xsrf_token(HtmlHookRESTHandler.XSRF_ACTION))
}))
form_html = oeditor.ObjectEditor.get_html_for(
self, registry.get_json_schema(), registry.get_schema_dict(),
key, rest_url, exit_url,
delete_url=delete_url, delete_method='delete',
required_modules=HtmlHookRESTHandler.REQUIRED_MODULES)
template_values = {}
template_values['page_title'] = self.format_title('Edit Hook HTML')
template_values['page_description'] = (
messages.EDIT_HTML_HOOK_DESCRIPTION)
template_values['main_content'] = form_html
self.render_page(template_values)
def _create_hook_registry():
reg = schema_fields.FieldRegistry('Html Hook', description='Html Hook')
reg.add_property(schema_fields.SchemaField(
'hook_content', 'HTML Hook Content', 'html',
optional=True))
return reg
class HtmlHookRESTHandler(CourseYamlRESTHandler):
"""REST API for individual HTML hook entries in course.yaml."""
REGISTRY = _create_hook_registry()
REQUIRED_MODULES = [
'inputex-textarea', 'inputex-uneditable', 'gcb-rte', 'inputex-hidden']
URI = '/rest/course/html_hook'
XSRF_ACTION = 'html-hook-put'
def process_get(self):
course_dict = self.get_course_dict()
html_hook = self.request.get('key')
path = html_hook.split(':')
for element in path:
item = course_dict.get(element)
if type(item) == dict:
course_dict = item
return {'hook_content': item}
def process_put(self, request, payload):
request_data = {}
HtmlHookRESTHandler.REGISTRY.convert_json_to_entity(
payload, request_data)
if 'hook_content' not in request_data:
transforms.send_json_response(
self, 400, 'Payload missing "hook_content" parameter.')
return None
# Walk from bottom to top of hook element name building up
# dict-in-dict until we are at outermost level, which is
# the course_dict we will return.
course_dict = request_data['hook_content']
for element in reversed(request['key'].split(':')):
course_dict = {element: course_dict}
return course_dict
def is_deletion_allowed(self):
return True
def process_delete(self):
html_hook = self.request.get('key')
course_dict = self.get_course_dict()
pruned_dict = course_dict
for element in html_hook.split(':'):
if element in pruned_dict:
if type(pruned_dict[element]) == dict:
pruned_dict = pruned_dict[element]
else:
del pruned_dict[element]
return course_dict
| apache-2.0 |
rossburton/yocto-autobuilder | lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/manhole/telnet.py | 37 | 3494 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""Telnet-based shell."""
# twisted imports
from twisted.protocols import telnet
from twisted.internet import protocol
from twisted.python import log, failure
# system imports
import string, copy, sys
from cStringIO import StringIO
class Shell(telnet.Telnet):
"""A Python command-line shell."""
def connectionMade(self):
telnet.Telnet.connectionMade(self)
self.lineBuffer = []
def loggedIn(self):
self.transport.write(">>> ")
def checkUserAndPass(self, username, password):
return ((self.factory.username == username) and (password == self.factory.password))
def write(self, data):
"""Write some data to the transport.
"""
self.transport.write(data)
def telnet_Command(self, cmd):
if self.lineBuffer:
if not cmd:
cmd = string.join(self.lineBuffer, '\n') + '\n\n\n'
self.doCommand(cmd)
self.lineBuffer = []
return "Command"
else:
self.lineBuffer.append(cmd)
self.transport.write("... ")
return "Command"
else:
self.doCommand(cmd)
return "Command"
def doCommand(self, cmd):
# TODO -- refactor this, Reality.author.Author, and the manhole shell
#to use common functionality (perhaps a twisted.python.code module?)
fn = '$telnet$'
result = None
try:
out = sys.stdout
sys.stdout = self
try:
code = compile(cmd,fn,'eval')
result = eval(code, self.factory.namespace)
except:
try:
code = compile(cmd, fn, 'exec')
exec code in self.factory.namespace
except SyntaxError, e:
if not self.lineBuffer and str(e)[:14] == "unexpected EOF":
self.lineBuffer.append(cmd)
self.transport.write("... ")
return
else:
failure.Failure().printTraceback(file=self)
log.deferr()
self.write('\r\n>>> ')
return
except:
io = StringIO()
failure.Failure().printTraceback(file=self)
log.deferr()
self.write('\r\n>>> ')
return
finally:
sys.stdout = out
self.factory.namespace['_'] = result
if result is not None:
self.transport.write(repr(result))
self.transport.write('\r\n')
self.transport.write(">>> ")
class ShellFactory(protocol.Factory):
username = "admin"
password = "admin"
protocol = Shell
service = None
def __init__(self):
self.namespace = {
'factory': self,
'service': None,
'_': None
}
def setService(self, service):
self.namespace['service'] = self.service = service
def __getstate__(self):
"""This returns the persistent state of this shell factory.
"""
dict = self.__dict__
ns = copy.copy(dict['namespace'])
dict['namespace'] = ns
if ns.has_key('__builtins__'):
del ns['__builtins__']
return dict
| gpl-2.0 |
TeamEOS/external_chromium_org | tools/cr/cr/commands/info.py | 44 | 1236 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the info implementation of Command."""
import cr
class InfoCommand(cr.Command):
"""The cr info command implementation."""
def __init__(self):
super(InfoCommand, self).__init__()
self.help = 'Print information about the cr environment'
def AddArguments(self, subparsers):
parser = super(InfoCommand, self).AddArguments(subparsers)
parser.add_argument(
'-s', '--short', dest='_short',
action='store_true', default=False,
help='Short form results, useful for scripting.'
)
self.ConsumeArgs(parser, 'the environment')
return parser
def EarlyArgProcessing(self):
if getattr(cr.context.args, '_short', False):
self.requires_build_dir = False
def Run(self):
if cr.context.remains:
for var in cr.context.remains:
if getattr(cr.context.args, '_short', False):
val = cr.context.Find(var)
if val is None:
val = ''
print val
else:
print var, '=', cr.context.Find(var)
else:
cr.base.client.PrintInfo()
| bsd-3-clause |
alexandregz/simian | src/simian/mac/admin/packages.py | 1 | 7391 | #!/usr/bin/env python
#
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
"""Packages admin handler."""
import datetime
from simian.mac import admin
from simian.mac import common
from simian.mac import models
from simian.mac.common import auth
DEFAULT_PACKAGE_LOG_FETCH_LIMIT = 25
class Packages(admin.AdminHandler):
"""Handler for /admin/packages."""
XSRF_PROTECT = True
DATASTORE_MODEL = models.PackageInfo
LOGGING_MODEL = models.AdminPackageLog
TEMPLATE = 'packages.html'
REPORT_TYPE = 'packages'
LOG_REPORT_TYPE = 'package_logs'
def get(self, report=None):
"""GET handler."""
auth.DoUserAuth()
if report == 'logs':
self._DisplayLogs()
else:
historical = self.request.get('historical') == '1'
applesus = self.request.get('applesus') == '1'
if historical or applesus:
self._DisplayPackagesListFromCache(applesus=applesus)
else:
self._DisplayPackagesList()
def _GetPackageQuery(self):
"""Build query."""
all_packages = self.request.get('all_packages') == '1'
query = self.DATASTORE_MODEL.all()
if self.REPORT_TYPE == 'packages' and not all_packages:
query.filter('catalogs IN', common.TRACKS)
return query
def _DisplayPackagesList(self):
"""Displays list of all installs/removals/etc."""
installs, counts_mtime = models.ReportsCache.GetInstallCounts()
pending, pending_mtime = models.ReportsCache.GetPendingCounts()
packages = []
all_packages = self.request.get('all_packages') == '1'
query = self._GetPackageQuery()
for p in query:
if not p.plist:
self.error(403)
self.response.out.write('Package %s has a broken plist!' % p.filename)
return
pkg = {}
pkg['count'] = installs.get(p.munki_name, {}).get('install_count', 'N/A')
pkg['fail_count'] = installs.get(p.munki_name, {}).get(
'install_fail_count', 'N/A')
pkg['pending_count'] = pending.get(p.munki_name, 'N/A')
pkg['duration_seconds_avg'] = installs.get(p.munki_name, {}).get(
'duration_seconds_avg', None) or 'N/A'
pkg['unattended'] = p.plist.get('unattended_install', False)
force_install_after_date = p.plist.get('force_install_after_date', None)
if force_install_after_date:
pkg['force_install_after_date'] = force_install_after_date
pkg['catalogs'] = p.catalog_matrix
pkg['manifests'] = p.manifest_matrix
pkg['munki_name'] = p.munki_name or p.plist.GetMunkiName()
pkg['filename'] = p.filename
pkg['file_size'] = p.plist.get('installer_item_size', 0) * 1024
pkg['install_types'] = p.install_types
pkg['manifest_mod_access'] = p.manifest_mod_access
pkg['description'] = p.description
packages.append(pkg)
packages.sort(key=lambda pkg: pkg['munki_name'].lower())
self.Render(self.TEMPLATE,
{'packages': packages, 'counts_mtime': counts_mtime,
'pending_mtime': pending_mtime,
'report_type': self.REPORT_TYPE,
'active_pkg': self.request.GET.get('activepkg'),
'is_support_user': auth.IsSupportUser(),
'can_upload': auth.HasPermission(auth.UPLOAD),
'is_admin': auth.IsAdminUser(),
'all_packages': all_packages,})
def _DisplayPackagesListFromCache(self, applesus=False):
installs, counts_mtime = models.ReportsCache.GetInstallCounts()
pkgs = []
names = installs.keys()
names.sort()
for name in names:
install = installs[name]
if applesus and install.get('applesus', False):
d = {'name': name,
'count': install.get('install_count', 'N/A'),
'fail_count': install.get('install_fail_count', 'N/A'),
'duration_seconds_avg': install.get('duration_seconds_avg', 'N/A')
}
pkgs.append(d)
elif not applesus and not install['applesus']:
d = {'name': name,
'count': install.get('install_count', 'N/A'),
'fail_count': install.get('install_fail_count', 'N/A'),
'duration_seconds_avg': install.get('duration_seconds_avg', 'N/A')
}
pkgs.append(d)
if applesus:
report_type = 'apple_historical'
else:
report_type = 'packages_historical'
self.Render(self.TEMPLATE,
{'packages': pkgs, 'counts_mtime': counts_mtime,
'applesus': applesus, 'cached_pkgs_list': True,
'report_type': report_type})
def _DisplayLogs(self):
"""Displays all models.AdminPackageLog entities."""
key_id = self.request.get('plist')
if key_id:
try:
key_id = int(key_id)
except ValueError:
self.error(404)
return
log = self.LOGGING_MODEL.get_by_id(key_id)
if self.request.get('format') == 'xml':
self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'
self.response.out.write(log.plist)
else:
time = datetime.datetime.strftime(log.mtime, '%Y-%m-%d %H:%M:%S')
title = 'plist for Package Log <b>%s - %s</b>' % (log.filename, time)
raw_xml = '/admin/packages/logs?plist=%d&format=xml' % key_id
self.Render('plist.html',
{'plist_type': 'package_log',
'xml': admin.XmlToHtml(log.plist.GetXml()),
'title': title,
'raw_xml_link': raw_xml,
})
else:
filename = self.request.get('filename')
query = self.LOGGING_MODEL.all()
if filename:
query.filter('filename =', filename)
query.order('-mtime')
logs = self.Paginate(query, DEFAULT_PACKAGE_LOG_FETCH_LIMIT)
formatted_logs = []
for log in logs:
formatted_log = {}
formatted_log['data'] = log
if (hasattr(log, 'proposed_catalogs')
and hasattr(log, 'proposed_manifest')):
formatted_log['catalogs'] = common.util.MakeTrackMatrix(
log.catalogs, log.proposed_catalogs)
formatted_log['manifests'] = common.util.MakeTrackMatrix(
log.manifests, log.proposed_manifests)
else:
formatted_log['catalogs'] = common.util.MakeTrackMatrix(log.catalogs)
formatted_log['manifests'] = common.util.MakeTrackMatrix(
log.manifests)
formatted_logs.append(formatted_log)
self.Render(
'package_logs.html',
{'logs': formatted_logs,
'report_type': self.LOG_REPORT_TYPE,
'filename': filename})
class PackageProposals(Packages):
"""Handler for /admin/proposals."""
DATASTORE_MODEL = models.PackageInfoProposal
LOGGING_MODEL = models.AdminPackageProposalLog
TEMPLATE = 'packages.html'
LOG_REPORT_TYPE = 'proposal_logs'
REPORT_TYPE = 'proposals'
def _GetPackageQuery(self):
return self.DATASTORE_MODEL.all()
| apache-2.0 |
DwangoMediaVillage/pqkmeans | test/encoder/test_pq_encoder.py | 2 | 1590 | import unittest
import pqkmeans
import numpy
import pipe
class TestPQEncoder(unittest.TestCase):
def data_source(self, n: int):
for i in range(n):
for _ in range(3):
yield [i * 100] * 6
def setUp(self):
self.encoder = pqkmeans.encoder.PQEncoder(num_subdim=2)
def test_just_train_array(self):
input_array = numpy.random.random((300, 10))
self.encoder.fit(numpy.array(input_array))
encoded = list(self.encoder.transform(numpy.array(input_array)))
self.assertEqual(len(input_array), len(encoded))
def test_fit_and_transform_generator(self):
self.encoder.fit(numpy.array(list(self.data_source(300))))
# infinite list
encoded = self.encoder.transform_generator(self.data_source(100000000)) | pipe.take(60) | pipe.as_list
for i in range(0, len(encoded), 3):
numpy.testing.assert_array_almost_equal(encoded[i], encoded[i + 1])
numpy.testing.assert_array_almost_equal(encoded[i], encoded[i + 2])
def test_transform_and_inverse_transform(self):
input_array = numpy.random.random((300, 10))
self.encoder.fit(numpy.array(input_array))
encoded = self.encoder.transform(numpy.array(input_array))
decoded = self.encoder.inverse_transform(encoded)
N1, M = encoded.shape
N2, D = decoded.shape
self.assertEqual(N1, N2)
self.assertEqual(M, self.encoder.M)
self.assertEqual(D, self.encoder.Ds * self.encoder.M)
self.assertEqual(encoded.dtype, self.encoder.code_dtype)
| mit |
rs2/pandas | pandas/tests/indexes/test_base.py | 1 | 93051 | from collections import defaultdict
from datetime import datetime, timedelta
from io import StringIO
import math
import operator
import re
import numpy as np
import pytest
import pandas._config.config as cf
from pandas._libs.tslib import Timestamp
from pandas.compat.numpy import np_datetime64_compat
from pandas.util._test_decorators import async_mark
from pandas.core.dtypes.generic import ABCIndex
import pandas as pd
from pandas import (
CategoricalIndex,
DataFrame,
DatetimeIndex,
Float64Index,
Int64Index,
PeriodIndex,
RangeIndex,
Series,
TimedeltaIndex,
UInt64Index,
date_range,
isna,
period_range,
)
import pandas._testing as tm
from pandas.core.indexes.api import (
Index,
MultiIndex,
_get_combined_index,
ensure_index,
ensure_index_from_sequences,
)
from pandas.tests.indexes.common import Base
class TestIndex(Base):
_holder = Index
def create_index(self) -> Index:
return Index(list("abcde"))
def test_can_hold_identifiers(self):
index = self.create_index()
key = index[0]
assert index._can_hold_identifiers_and_holds_name(key) is True
@pytest.mark.parametrize("index", ["datetime"], indirect=True)
def test_new_axis(self, index):
with tm.assert_produces_warning(FutureWarning):
# GH#30588 multi-dimensional indexing deprecated
new_index = index[None, :]
assert new_index.ndim == 2
assert isinstance(new_index, np.ndarray)
def test_constructor_regular(self, index):
tm.assert_contains_all(index, index)
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_constructor_casting(self, index):
# casting
arr = np.array(index)
new_index = Index(arr)
tm.assert_contains_all(arr, new_index)
tm.assert_index_equal(index, new_index)
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_constructor_copy(self, index):
# copy
# index = self.create_index()
arr = np.array(index)
new_index = Index(arr, copy=True, name="name")
assert isinstance(new_index, Index)
assert new_index.name == "name"
tm.assert_numpy_array_equal(arr, new_index.values)
arr[0] = "SOMEBIGLONGSTRING"
assert new_index[0] != "SOMEBIGLONGSTRING"
# FIXME: dont leave commented-out
# what to do here?
# arr = np.array(5.)
# pytest.raises(Exception, arr.view, Index)
@pytest.mark.parametrize("cast_as_obj", [True, False])
@pytest.mark.parametrize(
"index",
[
pd.date_range(
"2015-01-01 10:00",
freq="D",
periods=3,
tz="US/Eastern",
name="Green Eggs & Ham",
), # DTI with tz
pd.date_range("2015-01-01 10:00", freq="D", periods=3), # DTI no tz
pd.timedelta_range("1 days", freq="D", periods=3), # td
pd.period_range("2015-01-01", freq="D", periods=3), # period
],
)
def test_constructor_from_index_dtlike(self, cast_as_obj, index):
if cast_as_obj:
result = pd.Index(index.astype(object))
else:
result = pd.Index(index)
tm.assert_index_equal(result, index)
if isinstance(index, pd.DatetimeIndex):
assert result.tz == index.tz
if cast_as_obj:
# GH#23524 check that Index(dti, dtype=object) does not
# incorrectly raise ValueError, and that nanoseconds are not
# dropped
index += pd.Timedelta(nanoseconds=50)
result = pd.Index(index, dtype=object)
assert result.dtype == np.object_
assert list(result) == list(index)
@pytest.mark.parametrize(
"index,has_tz",
[
(
pd.date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern"),
True,
), # datetimetz
(pd.timedelta_range("1 days", freq="D", periods=3), False), # td
(pd.period_range("2015-01-01", freq="D", periods=3), False), # period
],
)
def test_constructor_from_series_dtlike(self, index, has_tz):
result = pd.Index(pd.Series(index))
tm.assert_index_equal(result, index)
if has_tz:
assert result.tz == index.tz
def test_constructor_from_series_freq(self):
# GH 6273
# create from a series, passing a freq
dts = ["1-1-1990", "2-1-1990", "3-1-1990", "4-1-1990", "5-1-1990"]
expected = DatetimeIndex(dts, freq="MS")
s = Series(pd.to_datetime(dts))
result = DatetimeIndex(s, freq="MS")
tm.assert_index_equal(result, expected)
def test_constructor_from_frame_series_freq(self):
# GH 6273
# create from a series, passing a freq
dts = ["1-1-1990", "2-1-1990", "3-1-1990", "4-1-1990", "5-1-1990"]
expected = DatetimeIndex(dts, freq="MS")
df = pd.DataFrame(np.random.rand(5, 3))
df["date"] = dts
result = DatetimeIndex(df["date"], freq="MS")
assert df["date"].dtype == object
expected.name = "date"
tm.assert_index_equal(result, expected)
expected = pd.Series(dts, name="date")
tm.assert_series_equal(df["date"], expected)
# GH 6274
# infer freq of same
freq = pd.infer_freq(df["date"])
assert freq == "MS"
@pytest.mark.parametrize(
"array",
[
np.arange(5),
np.array(["a", "b", "c"]),
date_range("2000-01-01", periods=3).values,
],
)
def test_constructor_ndarray_like(self, array):
# GH 5460#issuecomment-44474502
# it should be possible to convert any object that satisfies the numpy
# ndarray interface directly into an Index
class ArrayLike:
def __init__(self, array):
self.array = array
def __array__(self, dtype=None) -> np.ndarray:
return self.array
expected = pd.Index(array)
result = pd.Index(ArrayLike(array))
tm.assert_index_equal(result, expected)
def test_constructor_int_dtype_nan(self):
# see gh-15187
data = [np.nan]
expected = Float64Index(data)
result = Index(data, dtype="float")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("dtype", ["int64", "uint64"])
def test_constructor_int_dtype_nan_raises(self, dtype):
# see gh-15187
data = [np.nan]
msg = "cannot convert"
with pytest.raises(ValueError, match=msg):
Index(data, dtype=dtype)
def test_constructor_no_pandas_array(self):
ser = pd.Series([1, 2, 3])
result = pd.Index(ser.array)
expected = pd.Index([1, 2, 3])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"klass,dtype,na_val",
[
(pd.Float64Index, np.float64, np.nan),
(pd.DatetimeIndex, "datetime64[ns]", pd.NaT),
],
)
def test_index_ctor_infer_nan_nat(self, klass, dtype, na_val):
# GH 13467
na_list = [na_val, na_val]
expected = klass(na_list)
assert expected.dtype == dtype
result = Index(na_list)
tm.assert_index_equal(result, expected)
result = Index(np.array(na_list))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"vals,dtype",
[
([1, 2, 3, 4, 5], "int"),
([1.1, np.nan, 2.2, 3.0], "float"),
(["A", "B", "C", np.nan], "obj"),
],
)
def test_constructor_simple_new(self, vals, dtype):
index = Index(vals, name=dtype)
result = index._simple_new(index.values, dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize(
"vals",
[
[1, 2, 3],
np.array([1, 2, 3]),
np.array([1, 2, 3], dtype=int),
# below should coerce
[1.0, 2.0, 3.0],
np.array([1.0, 2.0, 3.0], dtype=float),
],
)
def test_constructor_dtypes_to_int64(self, vals):
index = Index(vals, dtype=int)
assert isinstance(index, Int64Index)
@pytest.mark.parametrize(
"vals",
[
[1, 2, 3],
[1.0, 2.0, 3.0],
np.array([1.0, 2.0, 3.0]),
np.array([1, 2, 3], dtype=int),
np.array([1.0, 2.0, 3.0], dtype=float),
],
)
def test_constructor_dtypes_to_float64(self, vals):
index = Index(vals, dtype=float)
assert isinstance(index, Float64Index)
@pytest.mark.parametrize(
"vals",
[
[1, 2, 3],
np.array([1, 2, 3], dtype=int),
np.array(
[np_datetime64_compat("2011-01-01"), np_datetime64_compat("2011-01-02")]
),
[datetime(2011, 1, 1), datetime(2011, 1, 2)],
],
)
def test_constructor_dtypes_to_categorical(self, vals):
index = Index(vals, dtype="category")
assert isinstance(index, CategoricalIndex)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize(
"vals",
[
Index(
np.array(
[
np_datetime64_compat("2011-01-01"),
np_datetime64_compat("2011-01-02"),
]
)
),
Index([datetime(2011, 1, 1), datetime(2011, 1, 2)]),
],
)
def test_constructor_dtypes_to_datetime(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=object)
assert isinstance(index, Index)
assert index.dtype == object
else:
index = Index(vals)
assert isinstance(index, DatetimeIndex)
@pytest.mark.parametrize("cast_index", [True, False])
@pytest.mark.parametrize(
"vals",
[
np.array([np.timedelta64(1, "D"), np.timedelta64(1, "D")]),
[timedelta(1), timedelta(1)],
],
)
def test_constructor_dtypes_to_timedelta(self, cast_index, vals):
if cast_index:
index = Index(vals, dtype=object)
assert isinstance(index, Index)
assert index.dtype == object
else:
index = Index(vals)
assert isinstance(index, TimedeltaIndex)
@pytest.mark.parametrize("attr", ["values", "asi8"])
@pytest.mark.parametrize("klass", [pd.Index, pd.DatetimeIndex])
def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, klass):
# Test constructing with a datetimetz dtype
# .values produces numpy datetimes, so these are considered naive
# .asi8 produces integers, so these are considered epoch timestamps
# ^the above will be true in a later version. Right now we `.view`
# the i8 values as NS_DTYPE, effectively treating them as wall times.
index = pd.date_range("2011-01-01", periods=5)
arg = getattr(index, attr)
index = index.tz_localize(tz_naive_fixture)
dtype = index.dtype
if attr == "asi8":
result = pd.DatetimeIndex(arg).tz_localize(tz_naive_fixture)
else:
result = klass(arg, tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
if attr == "asi8":
result = pd.DatetimeIndex(arg).astype(dtype)
else:
result = klass(arg, dtype=dtype)
tm.assert_index_equal(result, index)
if attr == "asi8":
result = pd.DatetimeIndex(list(arg)).tz_localize(tz_naive_fixture)
else:
result = klass(list(arg), tz=tz_naive_fixture)
tm.assert_index_equal(result, index)
if attr == "asi8":
result = pd.DatetimeIndex(list(arg)).astype(dtype)
else:
result = klass(list(arg), dtype=dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("attr", ["values", "asi8"])
@pytest.mark.parametrize("klass", [pd.Index, pd.TimedeltaIndex])
def test_constructor_dtypes_timedelta(self, attr, klass):
index = pd.timedelta_range("1 days", periods=5)
index = index._with_freq(None) # wont be preserved by constructors
dtype = index.dtype
values = getattr(index, attr)
result = klass(values, dtype=dtype)
tm.assert_index_equal(result, index)
result = klass(list(values), dtype=dtype)
tm.assert_index_equal(result, index)
@pytest.mark.parametrize("value", [[], iter([]), (_ for _ in [])])
@pytest.mark.parametrize(
"klass",
[
Index,
Float64Index,
Int64Index,
UInt64Index,
CategoricalIndex,
DatetimeIndex,
TimedeltaIndex,
],
)
def test_constructor_empty(self, value, klass):
empty = klass(value)
assert isinstance(empty, klass)
assert not len(empty)
@pytest.mark.parametrize(
"empty,klass",
[
(PeriodIndex([], freq="B"), PeriodIndex),
(PeriodIndex(iter([]), freq="B"), PeriodIndex),
(PeriodIndex((_ for _ in []), freq="B"), PeriodIndex),
(RangeIndex(step=1), pd.RangeIndex),
(MultiIndex(levels=[[1, 2], ["blue", "red"]], codes=[[], []]), MultiIndex),
],
)
def test_constructor_empty_special(self, empty, klass):
assert isinstance(empty, klass)
assert not len(empty)
def test_constructor_overflow_int64(self):
# see gh-15832
msg = (
"The elements provided in the data cannot "
"all be casted to the dtype int64"
)
with pytest.raises(OverflowError, match=msg):
Index([np.iinfo(np.uint64).max - 1], dtype="int64")
@pytest.mark.parametrize(
"index",
[
"datetime",
"float",
"int",
"period",
"range",
"repeats",
"timedelta",
"tuples",
"uint",
],
indirect=True,
)
def test_view_with_args(self, index):
index.view("i8")
@pytest.mark.parametrize(
"index",
[
"unicode",
"string",
pytest.param("categorical", marks=pytest.mark.xfail(reason="gh-25464")),
"bool",
"empty",
],
indirect=True,
)
def test_view_with_args_object_array_raises(self, index):
msg = "Cannot change data-type for object array"
with pytest.raises(TypeError, match=msg):
index.view("i8")
@pytest.mark.parametrize("index", ["int", "range"], indirect=True)
def test_astype(self, index):
casted = index.astype("i8")
# it works!
casted.get_loc(5)
# pass on name
index.name = "foobar"
casted = index.astype("i8")
assert casted.name == "foobar"
def test_equals_object(self):
# same
assert Index(["a", "b", "c"]).equals(Index(["a", "b", "c"]))
@pytest.mark.parametrize(
"comp", [Index(["a", "b"]), Index(["a", "b", "d"]), ["a", "b", "c"]]
)
def test_not_equals_object(self, comp):
assert not Index(["a", "b", "c"]).equals(comp)
def test_insert_missing(self, nulls_fixture):
# GH 22295
# test there is no mangling of NA values
expected = Index(["a", nulls_fixture, "b", "c"])
result = Index(list("abc")).insert(1, nulls_fixture)
tm.assert_index_equal(result, expected)
def test_delete_raises(self):
index = Index(["a", "b", "c", "d"], name="index")
msg = "index 5 is out of bounds for axis 0 with size 4"
with pytest.raises(IndexError, match=msg):
index.delete(5)
def test_identical(self):
# index
i1 = Index(["a", "b", "c"])
i2 = Index(["a", "b", "c"])
assert i1.identical(i2)
i1 = i1.rename("foo")
assert i1.equals(i2)
assert not i1.identical(i2)
i2 = i2.rename("foo")
assert i1.identical(i2)
i3 = Index([("a", "a"), ("a", "b"), ("b", "a")])
i4 = Index([("a", "a"), ("a", "b"), ("b", "a")], tupleize_cols=False)
assert not i3.identical(i4)
def test_is_(self):
ind = Index(range(10))
assert ind.is_(ind)
assert ind.is_(ind.view().view().view().view())
assert not ind.is_(Index(range(10)))
assert not ind.is_(ind.copy())
assert not ind.is_(ind.copy(deep=False))
assert not ind.is_(ind[:])
assert not ind.is_(np.array(range(10)))
# quasi-implementation dependent
assert ind.is_(ind.view())
ind2 = ind.view()
ind2.name = "bob"
assert ind.is_(ind2)
assert ind2.is_(ind)
# doesn't matter if Indices are *actually* views of underlying data,
assert not ind.is_(Index(ind.values))
arr = np.array(range(1, 11))
ind1 = Index(arr, copy=False)
ind2 = Index(arr, copy=False)
assert not ind1.is_(ind2)
@pytest.mark.parametrize("index", ["datetime"], indirect=True)
def test_asof(self, index):
d = index[0]
assert index.asof(d) == d
assert isna(index.asof(d - timedelta(1)))
d = index[-1]
assert index.asof(d + timedelta(1)) == d
d = index[0].to_pydatetime()
assert isinstance(index.asof(d), Timestamp)
def test_asof_datetime_partial(self):
index = pd.date_range("2010-01-01", periods=2, freq="m")
expected = Timestamp("2010-02-28")
result = index.asof("2010-02")
assert result == expected
assert not isinstance(result, Index)
def test_nanosecond_index_access(self):
s = Series([Timestamp("20130101")]).values.view("i8")[0]
r = DatetimeIndex([s + 50 + i for i in range(100)])
x = Series(np.random.randn(100), index=r)
first_value = x.asof(x.index[0])
# this does not yet work, as parsing strings is done via dateutil
# assert first_value == x['2013-01-01 00:00:00.000000050+0000']
expected_ts = np_datetime64_compat("2013-01-01 00:00:00.000000050+0000", "ns")
assert first_value == x[Timestamp(expected_ts)]
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_booleanindex(self, index):
bool_index = np.ones(len(index), dtype=bool)
bool_index[5:30:2] = False
sub_index = index[bool_index]
for i, val in enumerate(sub_index):
assert sub_index.get_loc(val) == i
sub_index = index[list(bool_index)]
for i, val in enumerate(sub_index):
assert sub_index.get_loc(val) == i
def test_fancy(self):
index = self.create_index()
sl = index[[1, 2, 3]]
for i in sl:
assert i == sl[sl.get_loc(i)]
@pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True)
@pytest.mark.parametrize("dtype", [np.int_, np.bool_])
def test_empty_fancy(self, index, dtype):
empty_arr = np.array([], dtype=dtype)
empty_index = type(index)([])
assert index[[]].identical(empty_index)
assert index[empty_arr].identical(empty_index)
@pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True)
def test_empty_fancy_raises(self, index):
# pd.DatetimeIndex is excluded, because it overrides getitem and should
# be tested separately.
empty_farr = np.array([], dtype=np.float_)
empty_index = type(index)([])
assert index[[]].identical(empty_index)
# np.ndarray only accepts ndarray of int & bool dtypes, so should Index
msg = r"arrays used as indices must be of integer \(or boolean\) type"
with pytest.raises(IndexError, match=msg):
index[empty_farr]
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_intersection(self, index, sort):
first = index[:20]
second = index[:10]
intersect = first.intersection(second, sort=sort)
if sort is None:
tm.assert_index_equal(intersect, second.sort_values())
assert tm.equalContents(intersect, second)
# Corner cases
inter = first.intersection(first, sort=sort)
assert inter is first
@pytest.mark.parametrize(
"index2,keeps_name",
[
(Index([3, 4, 5, 6, 7], name="index"), True), # preserve same name
(Index([3, 4, 5, 6, 7], name="other"), False), # drop diff names
(Index([3, 4, 5, 6, 7]), False),
],
)
def test_intersection_name_preservation(self, index2, keeps_name, sort):
index1 = Index([1, 2, 3, 4, 5], name="index")
expected = Index([3, 4, 5])
result = index1.intersection(index2, sort)
if keeps_name:
expected.name = "index"
assert result.name == expected.name
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index", ["string"], indirect=True)
@pytest.mark.parametrize(
"first_name,second_name,expected_name",
[("A", "A", "A"), ("A", "B", None), (None, "B", None)],
)
def test_intersection_name_preservation2(
self, index, first_name, second_name, expected_name, sort
):
first = index[5:20]
second = index[:10]
first.name = first_name
second.name = second_name
intersect = first.intersection(second, sort=sort)
assert intersect.name == expected_name
@pytest.mark.parametrize(
"index2,keeps_name",
[
(Index([4, 7, 6, 5, 3], name="index"), True),
(Index([4, 7, 6, 5, 3], name="other"), False),
],
)
def test_intersection_monotonic(self, index2, keeps_name, sort):
index1 = Index([5, 3, 2, 4, 1], name="index")
expected = Index([5, 3, 4])
if keeps_name:
expected.name = "index"
result = index1.intersection(index2, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"index2,expected_arr",
[(Index(["B", "D"]), ["B"]), (Index(["B", "D", "A"]), ["A", "B", "A"])],
)
def test_intersection_non_monotonic_non_unique(self, index2, expected_arr, sort):
# non-monotonic non-unique
index1 = Index(["A", "B", "A", "C"])
expected = Index(expected_arr, dtype="object")
result = index1.intersection(index2, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
def test_intersect_str_dates(self, sort):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
i1 = Index(dt_dates, dtype=object)
i2 = Index(["aa"], dtype=object)
result = i2.intersection(i1, sort=sort)
assert len(result) == 0
@pytest.mark.xfail(reason="Not implemented")
def test_intersection_equal_sort_true(self):
# TODO decide on True behaviour
idx = pd.Index(["c", "a", "b"])
sorted_ = pd.Index(["a", "b", "c"])
tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_)
def test_chained_union(self, sort):
# Chained unions handles names correctly
i1 = Index([1, 2], name="i1")
i2 = Index([5, 6], name="i2")
i3 = Index([3, 4], name="i3")
union = i1.union(i2.union(i3, sort=sort), sort=sort)
expected = i1.union(i2, sort=sort).union(i3, sort=sort)
tm.assert_index_equal(union, expected)
j1 = Index([1, 2], name="j1")
j2 = Index([], name="j2")
j3 = Index([], name="j3")
union = j1.union(j2.union(j3, sort=sort), sort=sort)
expected = j1.union(j2, sort=sort).union(j3, sort=sort)
tm.assert_index_equal(union, expected)
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_union(self, index, sort):
first = index[5:20]
second = index[:10]
everything = index[:20]
union = first.union(second, sort=sort)
if sort is None:
tm.assert_index_equal(union, everything.sort_values())
assert tm.equalContents(union, everything)
@pytest.mark.parametrize("slice_", [slice(None), slice(0)])
def test_union_sort_other_special(self, slice_):
# https://github.com/pandas-dev/pandas/issues/24959
idx = pd.Index([1, 0, 2])
# default, sort=None
other = idx[slice_]
tm.assert_index_equal(idx.union(other), idx)
tm.assert_index_equal(other.union(idx), idx)
# sort=False
tm.assert_index_equal(idx.union(other, sort=False), idx)
@pytest.mark.xfail(reason="Not implemented")
@pytest.mark.parametrize("slice_", [slice(None), slice(0)])
def test_union_sort_special_true(self, slice_):
# TODO decide on True behaviour
# sort=True
idx = pd.Index([1, 0, 2])
# default, sort=None
other = idx[slice_]
result = idx.union(other, sort=True)
expected = pd.Index([0, 1, 2])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("klass", [np.array, Series, list])
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_union_from_iterables(self, index, klass, sort):
# GH 10149
first = index[5:20]
second = index[:10]
everything = index[:20]
case = klass(second.values)
result = first.union(case, sort=sort)
if sort is None:
tm.assert_index_equal(result, everything.sort_values())
assert tm.equalContents(result, everything)
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_union_identity(self, index, sort):
first = index[5:20]
union = first.union(first, sort=sort)
# i.e. identity is not preserved when sort is True
assert (union is first) is (not sort)
# This should no longer be the same object, since [] is not consistent,
# both objects will be recast to dtype('O')
union = first.union([], sort=sort)
assert (union is first) is (not sort)
union = Index([]).union(first, sort=sort)
assert (union is first) is (not sort)
@pytest.mark.parametrize("first_list", [list("ba"), list()])
@pytest.mark.parametrize("second_list", [list("ab"), list()])
@pytest.mark.parametrize(
"first_name, second_name, expected_name",
[("A", "B", None), (None, "B", None), ("A", None, None)],
)
def test_union_name_preservation(
self, first_list, second_list, first_name, second_name, expected_name, sort
):
first = Index(first_list, name=first_name)
second = Index(second_list, name=second_name)
union = first.union(second, sort=sort)
vals = set(first_list).union(second_list)
if sort is None and len(first_list) > 0 and len(second_list) > 0:
expected = Index(sorted(vals), name=expected_name)
tm.assert_index_equal(union, expected)
else:
expected = Index(vals, name=expected_name)
assert tm.equalContents(union, expected)
def test_union_dt_as_obj(self, sort):
# TODO: Replace with fixturesult
index = self.create_index()
date_index = pd.date_range("2019-01-01", periods=10)
first_cat = index.union(date_index)
second_cat = index.union(index)
if date_index.dtype == np.object_:
appended = np.append(index, date_index)
else:
appended = np.append(index, date_index.astype("O"))
assert tm.equalContents(first_cat, appended)
assert tm.equalContents(second_cat, index)
tm.assert_contains_all(index, first_cat)
tm.assert_contains_all(index, second_cat)
tm.assert_contains_all(date_index, first_cat)
def test_map_identity_mapping(self, index):
# GH 12766
tm.assert_index_equal(index, index.map(lambda x: x))
def test_map_with_tuples(self):
# GH 12766
# Test that returning a single tuple from an Index
# returns an Index.
index = tm.makeIntIndex(3)
result = tm.makeIntIndex(3).map(lambda x: (x,))
expected = Index([(i,) for i in index])
tm.assert_index_equal(result, expected)
# Test that returning a tuple from a map of a single index
# returns a MultiIndex object.
result = index.map(lambda x: (x, x == 1))
expected = MultiIndex.from_tuples([(i, i == 1) for i in index])
tm.assert_index_equal(result, expected)
def test_map_with_tuples_mi(self):
# Test that returning a single object from a MultiIndex
# returns an Index.
first_level = ["foo", "bar", "baz"]
multi_index = MultiIndex.from_tuples(zip(first_level, [1, 2, 3]))
reduced_index = multi_index.map(lambda x: x[0])
tm.assert_index_equal(reduced_index, Index(first_level))
@pytest.mark.parametrize(
"attr", ["makeDateIndex", "makePeriodIndex", "makeTimedeltaIndex"]
)
def test_map_tseries_indices_return_index(self, attr):
index = getattr(tm, attr)(10)
expected = Index([1] * 10)
result = index.map(lambda x: 1)
tm.assert_index_equal(expected, result)
def test_map_tseries_indices_accsr_return_index(self):
date_index = tm.makeDateIndex(24, freq="h", name="hourly")
expected = Index(range(24), name="hourly")
tm.assert_index_equal(expected, date_index.map(lambda x: x.hour))
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: pd.Series(values, index),
],
)
def test_map_dictlike_simple(self, mapper):
# GH 12756
expected = Index(["foo", "bar", "baz"])
index = tm.makeIntIndex(3)
result = index.map(mapper(expected.values, index))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"mapper",
[
lambda values, index: {i: e for e, i in zip(values, index)},
lambda values, index: pd.Series(values, index),
],
)
def test_map_dictlike(self, index, mapper):
# GH 12756
if isinstance(index, CategoricalIndex):
# Tested in test_categorical
return
elif not index.is_unique:
# Cannot map duplicated index
return
if index.empty:
# to match proper result coercion for uints
expected = Index([])
else:
expected = Index(np.arange(len(index), 0, -1))
result = index.map(mapper(expected, index))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"mapper",
[Series(["foo", 2.0, "baz"], index=[0, 2, -1]), {0: "foo", 2: 2.0, -1: "baz"}],
)
def test_map_with_non_function_missing_values(self, mapper):
# GH 12756
expected = Index([2.0, np.nan, "foo"])
result = Index([2, 1, 0]).map(mapper)
tm.assert_index_equal(expected, result)
def test_map_na_exclusion(self):
index = Index([1.5, np.nan, 3, np.nan, 5])
result = index.map(lambda x: x * 2, na_action="ignore")
expected = index * 2
tm.assert_index_equal(result, expected)
def test_map_defaultdict(self):
index = Index([1, 2, 3])
default_dict = defaultdict(lambda: "blank")
default_dict[1] = "stuff"
result = index.map(default_dict)
expected = Index(["stuff", "blank", "blank"])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("name,expected", [("foo", "foo"), ("bar", None)])
def test_append_empty_preserve_name(self, name, expected):
left = Index([], name="foo")
right = Index([1, 2, 3], name=name)
result = left.append(right)
assert result.name == expected
@pytest.mark.parametrize("index", ["string"], indirect=True)
@pytest.mark.parametrize("second_name,expected", [(None, None), ("name", "name")])
def test_difference_name_preservation(self, index, second_name, expected, sort):
first = index[5:20]
second = index[:10]
answer = index[10:20]
first.name = "name"
second.name = second_name
result = first.difference(second, sort=sort)
assert tm.equalContents(result, answer)
if expected is None:
assert result.name is None
else:
assert result.name == expected
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_difference_empty_arg(self, index, sort):
first = index[5:20]
first.name = "name"
result = first.difference([], sort)
assert tm.equalContents(result, first)
assert result.name == first.name
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_difference_identity(self, index, sort):
first = index[5:20]
first.name = "name"
result = first.difference(first, sort)
assert len(result) == 0
assert result.name == first.name
@pytest.mark.parametrize("index", ["string"], indirect=True)
def test_difference_sort(self, index, sort):
first = index[5:20]
second = index[:10]
result = first.difference(second, sort)
expected = index[10:20]
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
def test_symmetric_difference(self, sort):
# smoke
index1 = Index([5, 2, 3, 4], name="index1")
index2 = Index([2, 3, 4, 1])
result = index1.symmetric_difference(index2, sort=sort)
expected = Index([5, 1])
assert tm.equalContents(result, expected)
assert result.name is None
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
# __xor__ syntax
expected = index1 ^ index2
assert tm.equalContents(result, expected)
assert result.name is None
@pytest.mark.parametrize("opname", ["difference", "symmetric_difference"])
def test_difference_incomparable(self, opname):
a = pd.Index([3, pd.Timestamp("2000"), 1])
b = pd.Index([2, pd.Timestamp("1999"), 1])
op = operator.methodcaller(opname, b)
# sort=None, the default
result = op(a)
expected = pd.Index([3, pd.Timestamp("2000"), 2, pd.Timestamp("1999")])
if opname == "difference":
expected = expected[:2]
tm.assert_index_equal(result, expected)
# sort=False
op = operator.methodcaller(opname, b, sort=False)
result = op(a)
tm.assert_index_equal(result, expected)
@pytest.mark.xfail(reason="Not implemented")
@pytest.mark.parametrize("opname", ["difference", "symmetric_difference"])
def test_difference_incomparable_true(self, opname):
# TODO decide on True behaviour
# # sort=True, raises
a = pd.Index([3, pd.Timestamp("2000"), 1])
b = pd.Index([2, pd.Timestamp("1999"), 1])
op = operator.methodcaller(opname, b, sort=True)
with pytest.raises(TypeError, match="Cannot compare"):
op(a)
def test_symmetric_difference_mi(self, sort):
index1 = MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3]))
index2 = MultiIndex.from_tuples([("foo", 1), ("bar", 3)])
result = index1.symmetric_difference(index2, sort=sort)
expected = MultiIndex.from_tuples([("bar", 2), ("baz", 3), ("bar", 3)])
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
assert tm.equalContents(result, expected)
@pytest.mark.parametrize(
"index2,expected",
[
(Index([0, 1, np.nan]), Index([2.0, 3.0, 0.0])),
(Index([0, 1]), Index([np.nan, 2.0, 3.0, 0.0])),
],
)
def test_symmetric_difference_missing(self, index2, expected, sort):
# GH 13514 change: {nan} - {nan} == {}
# (GH 6444, sorting of nans, is no longer an issue)
index1 = Index([1, np.nan, 2, 3])
result = index1.symmetric_difference(index2, sort=sort)
if sort is None:
expected = expected.sort_values()
tm.assert_index_equal(result, expected)
def test_symmetric_difference_non_index(self, sort):
index1 = Index([1, 2, 3, 4], name="index1")
index2 = np.array([2, 3, 4, 5])
expected = Index([1, 5])
result = index1.symmetric_difference(index2, sort=sort)
assert tm.equalContents(result, expected)
assert result.name == "index1"
result = index1.symmetric_difference(index2, result_name="new_name", sort=sort)
assert tm.equalContents(result, expected)
assert result.name == "new_name"
def test_difference_type(self, index, sort):
# GH 20040
# If taking difference of a set and itself, it
# needs to preserve the type of the index
if not index.is_unique:
return
result = index.difference(index, sort=sort)
expected = index.drop(index)
tm.assert_index_equal(result, expected)
def test_intersection_difference(self, index, sort):
# GH 20040
# Test that the intersection of an index with an
# empty index produces the same index as the difference
# of an index with itself. Test for all types
if not index.is_unique:
return
inter = index.intersection(index.drop(index))
diff = index.difference(index, sort=sort)
tm.assert_index_equal(inter, diff)
def test_is_mixed_deprecated(self):
# GH#32922
index = self.create_index()
with tm.assert_produces_warning(FutureWarning):
index.is_mixed()
@pytest.mark.parametrize(
"index, expected",
[
("string", False),
("bool", False),
("categorical", False),
("int", True),
("datetime", False),
("float", True),
],
indirect=["index"],
)
def test_is_numeric(self, index, expected):
assert index.is_numeric() is expected
@pytest.mark.parametrize(
"index, expected",
[
("string", True),
("bool", True),
("categorical", False),
("int", False),
("datetime", False),
("float", False),
],
indirect=["index"],
)
def test_is_object(self, index, expected):
assert index.is_object() is expected
@pytest.mark.parametrize(
"index, expected",
[
("string", False),
("bool", False),
("categorical", False),
("int", False),
("datetime", True),
("float", False),
],
indirect=["index"],
)
def test_is_all_dates(self, index, expected):
assert index.is_all_dates is expected
def test_summary(self, index):
self._check_method_works(Index._summary, index)
def test_summary_bug(self):
# GH3869`
ind = Index(["{other}%s", "~:{range}:0"], name="A")
result = ind._summary()
# shouldn't be formatted accidentally.
assert "~:{range}:0" in result
assert "{other}%s" in result
def test_format_different_scalar_lengths(self):
# GH35439
idx = Index(["aaaaaaaaa", "b"])
expected = ["aaaaaaaaa", "b"]
assert idx.format() == expected
def test_format_bug(self):
# GH 14626
# windows has different precision on datetime.datetime.now (it doesn't
# include us since the default for Timestamp shows these but Index
# formatting does not we are skipping)
now = datetime.now()
if not str(now).endswith("000"):
index = Index([now])
formatted = index.format()
expected = [str(index[0])]
assert formatted == expected
Index([]).format()
@pytest.mark.parametrize("vals", [[1, 2.0 + 3.0j, 4.0], ["a", "b", "c"]])
def test_format_missing(self, vals, nulls_fixture):
# 2845
vals = list(vals) # Copy for each iteration
vals.append(nulls_fixture)
index = Index(vals)
formatted = index.format()
expected = [str(index[0]), str(index[1]), str(index[2]), "NaN"]
assert formatted == expected
assert index[3] is nulls_fixture
def test_format_with_name_time_info(self):
# bug I fixed 12/20/2011
dates = date_range("2011-01-01 04:00:00", periods=10, name="something")
formatted = dates.format(name=True)
assert formatted[0] == "something"
def test_format_datetime_with_time(self):
t = Index([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])
result = t.format()
expected = ["2012-02-07 00:00:00", "2012-02-07 23:00:00"]
assert len(result) == 2
assert result == expected
@pytest.mark.parametrize("op", ["any", "all"])
def test_logical_compat(self, op):
index = self.create_index()
assert getattr(index, op)() == getattr(index.values, op)()
def _check_method_works(self, method, index):
method(index)
def test_get_indexer(self):
index1 = Index([1, 2, 3, 4, 5])
index2 = Index([2, 4, 6])
r1 = index1.get_indexer(index2)
e1 = np.array([1, 3, -1], dtype=np.intp)
tm.assert_almost_equal(r1, e1)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize(
"expected,method",
[
(np.array([-1, 0, 0, 1, 1], dtype=np.intp), "pad"),
(np.array([-1, 0, 0, 1, 1], dtype=np.intp), "ffill"),
(np.array([0, 0, 1, 1, 2], dtype=np.intp), "backfill"),
(np.array([0, 0, 1, 1, 2], dtype=np.intp), "bfill"),
],
)
def test_get_indexer_methods(self, reverse, expected, method):
index1 = Index([1, 2, 3, 4, 5])
index2 = Index([2, 4, 6])
if reverse:
index1 = index1[::-1]
expected = expected[::-1]
result = index2.get_indexer(index1, method=method)
tm.assert_almost_equal(result, expected)
def test_get_indexer_invalid(self):
# GH10411
index = Index(np.arange(10))
with pytest.raises(ValueError, match="tolerance argument"):
index.get_indexer([1, 0], tolerance=1)
with pytest.raises(ValueError, match="limit argument"):
index.get_indexer([1, 0], limit=1)
@pytest.mark.parametrize(
"method, tolerance, indexer, expected",
[
("pad", None, [0, 5, 9], [0, 5, 9]),
("backfill", None, [0, 5, 9], [0, 5, 9]),
("nearest", None, [0, 5, 9], [0, 5, 9]),
("pad", 0, [0, 5, 9], [0, 5, 9]),
("backfill", 0, [0, 5, 9], [0, 5, 9]),
("nearest", 0, [0, 5, 9], [0, 5, 9]),
("pad", None, [0.2, 1.8, 8.5], [0, 1, 8]),
("backfill", None, [0.2, 1.8, 8.5], [1, 2, 9]),
("nearest", None, [0.2, 1.8, 8.5], [0, 2, 9]),
("pad", 1, [0.2, 1.8, 8.5], [0, 1, 8]),
("backfill", 1, [0.2, 1.8, 8.5], [1, 2, 9]),
("nearest", 1, [0.2, 1.8, 8.5], [0, 2, 9]),
("pad", 0.2, [0.2, 1.8, 8.5], [0, -1, -1]),
("backfill", 0.2, [0.2, 1.8, 8.5], [-1, 2, -1]),
("nearest", 0.2, [0.2, 1.8, 8.5], [0, 2, -1]),
],
)
def test_get_indexer_nearest(self, method, tolerance, indexer, expected):
index = Index(np.arange(10))
actual = index.get_indexer(indexer, method=method, tolerance=tolerance)
tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp))
@pytest.mark.parametrize("listtype", [list, tuple, Series, np.array])
@pytest.mark.parametrize(
"tolerance, expected",
list(
zip(
[[0.3, 0.3, 0.1], [0.2, 0.1, 0.1], [0.1, 0.5, 0.5]],
[[0, 2, -1], [0, -1, -1], [-1, 2, 9]],
)
),
)
def test_get_indexer_nearest_listlike_tolerance(
self, tolerance, expected, listtype
):
index = Index(np.arange(10))
actual = index.get_indexer(
[0.2, 1.8, 8.5], method="nearest", tolerance=listtype(tolerance)
)
tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp))
def test_get_indexer_nearest_error(self):
index = Index(np.arange(10))
with pytest.raises(ValueError, match="limit argument"):
index.get_indexer([1, 0], method="nearest", limit=1)
with pytest.raises(ValueError, match="tolerance size must match"):
index.get_indexer([1, 0], method="nearest", tolerance=[1, 2, 3])
@pytest.mark.parametrize(
"method,expected",
[("pad", [8, 7, 0]), ("backfill", [9, 8, 1]), ("nearest", [9, 7, 0])],
)
def test_get_indexer_nearest_decreasing(self, method, expected):
index = Index(np.arange(10))[::-1]
actual = index.get_indexer([0, 5, 9], method=method)
tm.assert_numpy_array_equal(actual, np.array([9, 4, 0], dtype=np.intp))
actual = index.get_indexer([0.2, 1.8, 8.5], method=method)
tm.assert_numpy_array_equal(actual, np.array(expected, dtype=np.intp))
@pytest.mark.parametrize(
"method,expected",
[
("pad", np.array([-1, 0, 1, 1], dtype=np.intp)),
("backfill", np.array([0, 0, 1, -1], dtype=np.intp)),
],
)
def test_get_indexer_strings(self, method, expected):
index = pd.Index(["b", "c"])
actual = index.get_indexer(["a", "b", "c", "d"], method=method)
tm.assert_numpy_array_equal(actual, expected)
def test_get_indexer_strings_raises(self):
index = pd.Index(["b", "c"])
msg = r"unsupported operand type\(s\) for -: 'str' and 'str'"
with pytest.raises(TypeError, match=msg):
index.get_indexer(["a", "b", "c", "d"], method="nearest")
with pytest.raises(TypeError, match=msg):
index.get_indexer(["a", "b", "c", "d"], method="pad", tolerance=2)
with pytest.raises(TypeError, match=msg):
index.get_indexer(
["a", "b", "c", "d"], method="pad", tolerance=[2, 2, 2, 2]
)
@pytest.mark.parametrize("idx_class", [Int64Index, RangeIndex, Float64Index])
def test_get_indexer_numeric_index_boolean_target(self, idx_class):
# GH 16877
numeric_index = idx_class(RangeIndex(4))
result = numeric_index.get_indexer([True, False, True])
expected = np.array([-1, -1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_with_NA_values(
self, unique_nulls_fixture, unique_nulls_fixture2
):
# GH 22332
# check pairwise, that no pair of na values
# is mangled
if unique_nulls_fixture is unique_nulls_fixture2:
return # skip it, values are not unique
arr = np.array([unique_nulls_fixture, unique_nulls_fixture2], dtype=object)
index = pd.Index(arr, dtype=object)
result = index.get_indexer(
[unique_nulls_fixture, unique_nulls_fixture2, "Unknown"]
)
expected = np.array([0, 1, -1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("method", [None, "pad", "backfill", "nearest"])
def test_get_loc(self, method):
index = pd.Index([0, 1, 2])
assert index.get_loc(1, method=method) == 1
if method:
assert index.get_loc(1, method=method, tolerance=0) == 1
@pytest.mark.parametrize("method", [None, "pad", "backfill", "nearest"])
def test_get_loc_raises_bad_label(self, method):
index = pd.Index([0, 1, 2])
if method:
msg = "not supported between"
else:
msg = "invalid key"
with pytest.raises(TypeError, match=msg):
index.get_loc([1, 2], method=method)
@pytest.mark.parametrize(
"method,loc", [("pad", 1), ("backfill", 2), ("nearest", 1)]
)
def test_get_loc_tolerance(self, method, loc):
index = pd.Index([0, 1, 2])
assert index.get_loc(1.1, method) == loc
assert index.get_loc(1.1, method, tolerance=1) == loc
@pytest.mark.parametrize("method", ["pad", "backfill", "nearest"])
def test_get_loc_outside_tolerance_raises(self, method):
index = pd.Index([0, 1, 2])
with pytest.raises(KeyError, match="1.1"):
index.get_loc(1.1, method, tolerance=0.05)
def test_get_loc_bad_tolerance_raises(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match="must be numeric"):
index.get_loc(1.1, "nearest", tolerance="invalid")
def test_get_loc_tolerance_no_method_raises(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match="tolerance .* valid if"):
index.get_loc(1.1, tolerance=1)
def test_get_loc_raises_missized_tolerance(self):
index = pd.Index([0, 1, 2])
with pytest.raises(ValueError, match="tolerance size must match"):
index.get_loc(1.1, "nearest", tolerance=[1, 1])
def test_get_loc_raises_object_nearest(self):
index = pd.Index(["a", "c"])
with pytest.raises(TypeError, match="unsupported operand type"):
index.get_loc("a", method="nearest")
def test_get_loc_raises_object_tolerance(self):
index = pd.Index(["a", "c"])
with pytest.raises(TypeError, match="unsupported operand type"):
index.get_loc("a", method="pad", tolerance="invalid")
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_locs(self, dtype):
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(index)
assert index.slice_locs(start=2) == (2, n)
assert index.slice_locs(start=3) == (3, n)
assert index.slice_locs(3, 8) == (3, 6)
assert index.slice_locs(5, 10) == (3, n)
assert index.slice_locs(end=8) == (0, 6)
assert index.slice_locs(end=9) == (0, 7)
# reversed
index2 = index[::-1]
assert index2.slice_locs(8, 2) == (2, 6)
assert index2.slice_locs(7, 3) == (2, 5)
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_float_locs(self, dtype):
index = Index(np.array([0, 1, 2, 5, 6, 7, 9, 10], dtype=dtype))
n = len(index)
assert index.slice_locs(5.0, 10.0) == (3, n)
assert index.slice_locs(4.5, 10.5) == (3, 8)
index2 = index[::-1]
assert index2.slice_locs(8.5, 1.5) == (2, 6)
assert index2.slice_locs(10.5, -1) == (0, n)
def test_slice_locs_dup(self):
index = Index(["a", "a", "b", "c", "d", "d"])
assert index.slice_locs("a", "d") == (0, 6)
assert index.slice_locs(end="d") == (0, 6)
assert index.slice_locs("a", "c") == (0, 4)
assert index.slice_locs("b", "d") == (2, 6)
index2 = index[::-1]
assert index2.slice_locs("d", "a") == (0, 6)
assert index2.slice_locs(end="a") == (0, 6)
assert index2.slice_locs("d", "b") == (0, 4)
assert index2.slice_locs("c", "a") == (2, 6)
@pytest.mark.parametrize("dtype", [int, float])
def test_slice_locs_dup_numeric(self, dtype):
index = Index(np.array([10, 12, 12, 14], dtype=dtype))
assert index.slice_locs(12, 12) == (1, 3)
assert index.slice_locs(11, 13) == (1, 3)
index2 = index[::-1]
assert index2.slice_locs(12, 12) == (1, 3)
assert index2.slice_locs(13, 11) == (1, 3)
def test_slice_locs_na(self):
index = Index([np.nan, 1, 2])
assert index.slice_locs(1) == (1, 3)
assert index.slice_locs(np.nan) == (0, 3)
index = Index([0, np.nan, np.nan, 1, 2])
assert index.slice_locs(np.nan) == (1, 5)
def test_slice_locs_na_raises(self):
index = Index([np.nan, 1, 2])
with pytest.raises(KeyError, match=""):
index.slice_locs(start=1.5)
with pytest.raises(KeyError, match=""):
index.slice_locs(end=1.5)
@pytest.mark.parametrize(
"in_slice,expected",
[
# error: Slice index must be an integer or None
(pd.IndexSlice[::-1], "yxdcb"),
(pd.IndexSlice["b":"y":-1], ""), # type: ignore[misc]
(pd.IndexSlice["b"::-1], "b"), # type: ignore[misc]
(pd.IndexSlice[:"b":-1], "yxdcb"), # type: ignore[misc]
(pd.IndexSlice[:"y":-1], "y"), # type: ignore[misc]
(pd.IndexSlice["y"::-1], "yxdcb"), # type: ignore[misc]
(pd.IndexSlice["y"::-4], "yb"), # type: ignore[misc]
# absent labels
(pd.IndexSlice[:"a":-1], "yxdcb"), # type: ignore[misc]
(pd.IndexSlice[:"a":-2], "ydb"), # type: ignore[misc]
(pd.IndexSlice["z"::-1], "yxdcb"), # type: ignore[misc]
(pd.IndexSlice["z"::-3], "yc"), # type: ignore[misc]
(pd.IndexSlice["m"::-1], "dcb"), # type: ignore[misc]
(pd.IndexSlice[:"m":-1], "yx"), # type: ignore[misc]
(pd.IndexSlice["a":"a":-1], ""), # type: ignore[misc]
(pd.IndexSlice["z":"z":-1], ""), # type: ignore[misc]
(pd.IndexSlice["m":"m":-1], ""), # type: ignore[misc]
],
)
def test_slice_locs_negative_step(self, in_slice, expected):
index = Index(list("bcdxy"))
s_start, s_stop = index.slice_locs(in_slice.start, in_slice.stop, in_slice.step)
result = index[s_start : s_stop : in_slice.step]
expected = pd.Index(list(expected))
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True)
def test_drop_by_str_label(self, index):
n = len(index)
drop = index[list(range(5, 10))]
dropped = index.drop(drop)
expected = index[list(range(5)) + list(range(10, n))]
tm.assert_index_equal(dropped, expected)
dropped = index.drop(index[0])
expected = index[1:]
tm.assert_index_equal(dropped, expected)
@pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True)
@pytest.mark.parametrize("keys", [["foo", "bar"], ["1", "bar"]])
def test_drop_by_str_label_raises_missing_keys(self, index, keys):
with pytest.raises(KeyError, match=""):
index.drop(keys)
@pytest.mark.parametrize("index", ["string", "int", "float"], indirect=True)
def test_drop_by_str_label_errors_ignore(self, index):
n = len(index)
drop = index[list(range(5, 10))]
mixed = drop.tolist() + ["foo"]
dropped = index.drop(mixed, errors="ignore")
expected = index[list(range(5)) + list(range(10, n))]
tm.assert_index_equal(dropped, expected)
dropped = index.drop(["foo", "bar"], errors="ignore")
expected = index[list(range(n))]
tm.assert_index_equal(dropped, expected)
def test_drop_by_numeric_label_loc(self):
# TODO: Parametrize numeric and str tests after self.strIndex fixture
index = Index([1, 2, 3])
dropped = index.drop(1)
expected = Index([2, 3])
tm.assert_index_equal(dropped, expected)
def test_drop_by_numeric_label_raises_missing_keys(self):
index = Index([1, 2, 3])
with pytest.raises(KeyError, match=""):
index.drop([3, 4])
@pytest.mark.parametrize(
"key,expected", [(4, Index([1, 2, 3])), ([3, 4, 5], Index([1, 2]))]
)
def test_drop_by_numeric_label_errors_ignore(self, key, expected):
index = Index([1, 2, 3])
dropped = index.drop(key, errors="ignore")
tm.assert_index_equal(dropped, expected)
@pytest.mark.parametrize(
"values",
[["a", "b", ("c", "d")], ["a", ("c", "d"), "b"], [("c", "d"), "a", "b"]],
)
@pytest.mark.parametrize("to_drop", [[("c", "d"), "a"], ["a", ("c", "d")]])
def test_drop_tuple(self, values, to_drop):
# GH 18304
index = pd.Index(values)
expected = pd.Index(["b"])
result = index.drop(to_drop)
tm.assert_index_equal(result, expected)
removed = index.drop(to_drop[0])
for drop_me in to_drop[1], [to_drop[1]]:
result = removed.drop(drop_me)
tm.assert_index_equal(result, expected)
removed = index.drop(to_drop[1])
msg = fr"\"\[{re.escape(to_drop[1].__repr__())}\] not found in axis\""
for drop_me in to_drop[1], [to_drop[1]]:
with pytest.raises(KeyError, match=msg):
removed.drop(drop_me)
@pytest.mark.parametrize(
"method,expected,sort",
[
(
"intersection",
np.array(
[(1, "A"), (2, "A"), (1, "B"), (2, "B")],
dtype=[("num", int), ("let", "a1")],
),
False,
),
(
"intersection",
np.array(
[(1, "A"), (1, "B"), (2, "A"), (2, "B")],
dtype=[("num", int), ("let", "a1")],
),
None,
),
(
"union",
np.array(
[(1, "A"), (1, "B"), (1, "C"), (2, "A"), (2, "B"), (2, "C")],
dtype=[("num", int), ("let", "a1")],
),
None,
),
],
)
def test_tuple_union_bug(self, method, expected, sort):
index1 = Index(
np.array(
[(1, "A"), (2, "A"), (1, "B"), (2, "B")],
dtype=[("num", int), ("let", "a1")],
)
)
index2 = Index(
np.array(
[(1, "A"), (2, "A"), (1, "B"), (2, "B"), (1, "C"), (2, "C")],
dtype=[("num", int), ("let", "a1")],
)
)
result = getattr(index1, method)(index2, sort=sort)
assert result.ndim == 1
expected = Index(expected)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"attr",
[
"is_monotonic_increasing",
"is_monotonic_decreasing",
"_is_strictly_monotonic_increasing",
"_is_strictly_monotonic_decreasing",
],
)
def test_is_monotonic_incomparable(self, attr):
index = Index([5, datetime.now(), 7])
assert not getattr(index, attr)
def test_set_value_deprecated(self):
# GH 28621
idx = self.create_index()
arr = np.array([1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
idx.set_value(arr, idx[1], 80)
assert arr[1] == 80
@pytest.mark.parametrize(
"index", ["string", "int", "datetime", "timedelta"], indirect=True
)
def test_get_value(self, index):
# TODO: Remove function? GH 19728
values = np.random.randn(100)
value = index[67]
with pytest.raises(AttributeError, match="has no attribute '_values'"):
# Index.get_value requires a Series, not an ndarray
with tm.assert_produces_warning(FutureWarning):
index.get_value(values, value)
with tm.assert_produces_warning(FutureWarning):
result = index.get_value(Series(values, index=values), value)
tm.assert_almost_equal(result, values[67])
@pytest.mark.parametrize("values", [["foo", "bar", "quux"], {"foo", "bar", "quux"}])
@pytest.mark.parametrize(
"index,expected",
[
(Index(["qux", "baz", "foo", "bar"]), np.array([False, False, True, True])),
(Index([]), np.array([], dtype=bool)), # empty
],
)
def test_isin(self, values, index, expected):
result = index.isin(values)
tm.assert_numpy_array_equal(result, expected)
def test_isin_nan_common_object(self, nulls_fixture, nulls_fixture2):
# Test cartesian product of null fixtures and ensure that we don't
# mangle the various types (save a corner case with PyPy)
# all nans are the same
if (
isinstance(nulls_fixture, float)
and isinstance(nulls_fixture2, float)
and math.isnan(nulls_fixture)
and math.isnan(nulls_fixture2)
):
tm.assert_numpy_array_equal(
Index(["a", nulls_fixture]).isin([nulls_fixture2]),
np.array([False, True]),
)
elif nulls_fixture is nulls_fixture2: # should preserve NA type
tm.assert_numpy_array_equal(
Index(["a", nulls_fixture]).isin([nulls_fixture2]),
np.array([False, True]),
)
else:
tm.assert_numpy_array_equal(
Index(["a", nulls_fixture]).isin([nulls_fixture2]),
np.array([False, False]),
)
def test_isin_nan_common_float64(self, nulls_fixture):
if nulls_fixture is pd.NaT:
pytest.skip("pd.NaT not compatible with Float64Index")
# Float64Index overrides isin, so must be checked separately
if nulls_fixture is pd.NA:
pytest.xfail("Float64Index cannot contain pd.NA")
tm.assert_numpy_array_equal(
Float64Index([1.0, nulls_fixture]).isin([np.nan]), np.array([False, True])
)
# we cannot compare NaT with NaN
tm.assert_numpy_array_equal(
Float64Index([1.0, nulls_fixture]).isin([pd.NaT]), np.array([False, False])
)
@pytest.mark.parametrize("level", [0, -1])
@pytest.mark.parametrize(
"index",
[
Index(["qux", "baz", "foo", "bar"]),
# Float64Index overrides isin, so must be checked separately
Float64Index([1.0, 2.0, 3.0, 4.0]),
],
)
def test_isin_level_kwarg(self, level, index):
values = index.tolist()[-2:] + ["nonexisting"]
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(expected, index.isin(values, level=level))
index.name = "foobar"
tm.assert_numpy_array_equal(expected, index.isin(values, level="foobar"))
def test_isin_level_kwarg_bad_level_raises(self, index):
for level in [10, index.nlevels, -(index.nlevels + 1)]:
with pytest.raises(IndexError, match="Too many levels"):
index.isin([], level=level)
@pytest.mark.parametrize("label", [1.0, "foobar", "xyzzy", np.nan])
def test_isin_level_kwarg_bad_label_raises(self, label, index):
if isinstance(index, MultiIndex):
index = index.rename(["foo", "bar"] + index.names[2:])
msg = f"'Level {label} not found'"
else:
index = index.rename("foo")
msg = fr"Requested level \({label}\) does not match index name \(foo\)"
with pytest.raises(KeyError, match=msg):
index.isin([], level=label)
@pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])
def test_isin_empty(self, empty):
# see gh-16991
index = Index(["a", "b"])
expected = np.array([False, False])
result = index.isin(empty)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize(
"values",
[
[1, 2, 3, 4],
[1.0, 2.0, 3.0, 4.0],
[True, True, True, True],
["foo", "bar", "baz", "qux"],
pd.date_range("2018-01-01", freq="D", periods=4),
],
)
def test_boolean_cmp(self, values):
index = Index(values)
result = index == values
expected = np.array([True, True, True, True], dtype=bool)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("index", ["string"], indirect=True)
@pytest.mark.parametrize("name,level", [(None, 0), ("a", "a")])
def test_get_level_values(self, index, name, level):
expected = index.copy()
if name:
expected.name = name
result = expected.get_level_values(level)
tm.assert_index_equal(result, expected)
def test_slice_keep_name(self):
index = Index(["a", "b"], name="asdf")
assert index.name == index[1:].name
@pytest.mark.parametrize(
"index",
["unicode", "string", "datetime", "int", "uint", "float"],
indirect=True,
)
def test_join_self(self, index, join_type):
joined = index.join(index, how=join_type)
assert index is joined
@pytest.mark.parametrize("method", ["strip", "rstrip", "lstrip"])
def test_str_attribute(self, method):
# GH9068
index = Index([" jack", "jill ", " jesse ", "frank"])
expected = Index([getattr(str, method)(x) for x in index.values])
result = getattr(index.str, method)()
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"index",
[
Index(range(5)),
tm.makeDateIndex(10),
MultiIndex.from_tuples([("foo", "1"), ("bar", "3")]),
period_range(start="2000", end="2010", freq="A"),
],
)
def test_str_attribute_raises(self, index):
with pytest.raises(AttributeError, match="only use .str accessor"):
index.str.repeat(2)
@pytest.mark.parametrize(
"expand,expected",
[
(None, Index([["a", "b", "c"], ["d", "e"], ["f"]])),
(False, Index([["a", "b", "c"], ["d", "e"], ["f"]])),
(
True,
MultiIndex.from_tuples(
[("a", "b", "c"), ("d", "e", np.nan), ("f", np.nan, np.nan)]
),
),
],
)
def test_str_split(self, expand, expected):
index = Index(["a b c", "d e", "f"])
if expand is not None:
result = index.str.split(expand=expand)
else:
result = index.str.split()
tm.assert_index_equal(result, expected)
def test_str_bool_return(self):
# test boolean case, should return np.array instead of boolean Index
index = Index(["a1", "a2", "b1", "b2"])
result = index.str.startswith("a")
expected = np.array([True, True, False, False])
tm.assert_numpy_array_equal(result, expected)
assert isinstance(result, np.ndarray)
def test_str_bool_series_indexing(self):
index = Index(["a1", "a2", "b1", "b2"])
s = Series(range(4), index=index)
result = s[s.index.str.startswith("a")]
expected = Series(range(2), index=["a1", "a2"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"index,expected", [(Index(list("abcd")), True), (Index(range(4)), False)]
)
def test_tab_completion(self, index, expected):
# GH 9910
result = "str" in dir(index)
assert result == expected
def test_indexing_doesnt_change_class(self):
index = Index([1, 2, 3, "a", "b", "c"])
assert index[1:3].identical(pd.Index([2, 3], dtype=np.object_))
assert index[[0, 1]].identical(pd.Index([1, 2], dtype=np.object_))
def test_outer_join_sort(self):
left_index = Index(np.random.permutation(15))
right_index = tm.makeDateIndex(10)
with tm.assert_produces_warning(RuntimeWarning):
result = left_index.join(right_index, how="outer")
# right_index in this case because DatetimeIndex has join precedence
# over Int64Index
with tm.assert_produces_warning(RuntimeWarning):
expected = right_index.astype(object).union(left_index.astype(object))
tm.assert_index_equal(result, expected)
def test_nan_first_take_datetime(self):
index = Index([pd.NaT, Timestamp("20130101"), Timestamp("20130102")])
result = index.take([-1, 0, 1])
expected = Index([index[-1], index[0], index[1]])
tm.assert_index_equal(result, expected)
def test_take_fill_value(self):
# GH 12631
index = pd.Index(list("ABC"), name="xxx")
result = index.take(np.array([1, 0, -1]))
expected = pd.Index(list("BAC"), name="xxx")
tm.assert_index_equal(result, expected)
# fill_value
result = index.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.Index(["B", "A", np.nan], name="xxx")
tm.assert_index_equal(result, expected)
# allow_fill=False
result = index.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = pd.Index(["B", "A", "C"], name="xxx")
tm.assert_index_equal(result, expected)
def test_take_fill_value_none_raises(self):
index = pd.Index(list("ABC"), name="xxx")
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
index.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
index.take(np.array([1, 0, -5]), fill_value=True)
def test_take_bad_bounds_raises(self):
index = pd.Index(list("ABC"), name="xxx")
with pytest.raises(IndexError, match="out of bounds"):
index.take(np.array([1, -5]))
@pytest.mark.parametrize("name", [None, "foobar"])
@pytest.mark.parametrize(
"labels",
[
[],
np.array([]),
["A", "B", "C"],
["C", "B", "A"],
np.array(["A", "B", "C"]),
np.array(["C", "B", "A"]),
# Must preserve name even if dtype changes
pd.date_range("20130101", periods=3).values,
pd.date_range("20130101", periods=3).tolist(),
],
)
def test_reindex_preserves_name_if_target_is_list_or_ndarray(self, name, labels):
# GH6552
index = pd.Index([0, 1, 2])
index.name = name
assert index.reindex(labels)[0].name == name
@pytest.mark.parametrize("labels", [[], np.array([]), np.array([], dtype=np.int64)])
def test_reindex_preserves_type_if_target_is_empty_list_or_array(self, labels):
# GH7774
index = pd.Index(list("abc"))
assert index.reindex(labels)[0].dtype.type == np.object_
@pytest.mark.parametrize(
"labels,dtype",
[
(pd.Int64Index([]), np.int64),
(pd.Float64Index([]), np.float64),
(pd.DatetimeIndex([]), np.datetime64),
],
)
def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self, labels, dtype):
# GH7774
index = pd.Index(list("abc"))
assert index.reindex(labels)[0].dtype.type == dtype
def test_reindex_no_type_preserve_target_empty_mi(self):
index = pd.Index(list("abc"))
result = index.reindex(
pd.MultiIndex([pd.Int64Index([]), pd.Float64Index([])], [[], []])
)[0]
assert result.levels[0].dtype.type == np.int64
assert result.levels[1].dtype.type == np.float64
def test_groupby(self):
index = Index(range(5))
result = index.groupby(np.array([1, 1, 2, 2, 2]))
expected = {1: pd.Index([0, 1]), 2: pd.Index([2, 3, 4])}
tm.assert_dict_equal(result, expected)
@pytest.mark.parametrize(
"mi,expected",
[
(MultiIndex.from_tuples([(1, 2), (4, 5)]), np.array([True, True])),
(MultiIndex.from_tuples([(1, 2), (4, 6)]), np.array([True, False])),
],
)
def test_equals_op_multiindex(self, mi, expected):
# GH9785
# test comparisons of multiindex
df = pd.read_csv(StringIO("a,b,c\n1,2,3\n4,5,6"), index_col=[0, 1])
result = df.index == mi
tm.assert_numpy_array_equal(result, expected)
def test_equals_op_multiindex_identify(self):
df = pd.read_csv(StringIO("a,b,c\n1,2,3\n4,5,6"), index_col=[0, 1])
result = df.index == df.index
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"index",
[
MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]),
Index(["foo", "bar", "baz"]),
],
)
def test_equals_op_mismatched_multiindex_raises(self, index):
df = pd.read_csv(StringIO("a,b,c\n1,2,3\n4,5,6"), index_col=[0, 1])
with pytest.raises(ValueError, match="Lengths must match"):
df.index == index
def test_equals_op_index_vs_mi_same_length(self):
mi = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)])
index = Index(["foo", "bar", "baz"])
result = mi == index
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dt_conv", [pd.to_datetime, pd.to_timedelta])
def test_dt_conversion_preserves_name(self, dt_conv):
# GH 10875
index = pd.Index(["01:02:03", "01:02:04"], name="label")
assert index.name == dt_conv(index).name
@pytest.mark.parametrize(
"index,expected",
[
# ASCII
# short
(
pd.Index(["a", "bb", "ccc"]),
"""Index(['a', 'bb', 'ccc'], dtype='object')""",
),
# multiple lines
(
pd.Index(["a", "bb", "ccc"] * 10),
"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc',
'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object')""",
),
# truncated
(
pd.Index(["a", "bb", "ccc"] * 100),
"""\
Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',
...
'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],
dtype='object', length=300)""",
),
# Non-ASCII
# short
(
pd.Index(["あ", "いい", "ううう"]),
"""Index(['あ', 'いい', 'ううう'], dtype='object')""",
),
# multiple lines
(
pd.Index(["あ", "いい", "ううう"] * 10),
(
"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
"'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう'],\n"
" dtype='object')"
),
),
# truncated
(
pd.Index(["あ", "いい", "ううう"] * 100),
(
"Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "
"'あ', 'いい', 'ううう', 'あ',\n"
" ...\n"
" 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう'],\n"
" dtype='object', length=300)"
),
),
],
)
def test_string_index_repr(self, index, expected):
result = repr(index)
assert result == expected
@pytest.mark.parametrize(
"index,expected",
[
# short
(
pd.Index(["あ", "いい", "ううう"]),
("Index(['あ', 'いい', 'ううう'], dtype='object')"),
),
# multiple lines
(
pd.Index(["あ", "いい", "ううう"] * 10),
(
"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ', 'いい', 'ううう'],\n"
" dtype='object')"
""
),
),
# truncated
(
pd.Index(["あ", "いい", "ううう"] * 100),
(
"Index(['あ', 'いい', 'ううう', 'あ', 'いい', "
"'ううう', 'あ', 'いい', 'ううう',\n"
" 'あ',\n"
" ...\n"
" 'ううう', 'あ', 'いい', 'ううう', 'あ', "
"'いい', 'ううう', 'あ', 'いい',\n"
" 'ううう'],\n"
" dtype='object', length=300)"
),
),
],
)
def test_string_index_repr_with_unicode_option(self, index, expected):
# Enable Unicode option -----------------------------------------
with cf.option_context("display.unicode.east_asian_width", True):
result = repr(index)
assert result == expected
def test_cached_properties_not_settable(self):
index = pd.Index([1, 2, 3])
with pytest.raises(AttributeError, match="Can't set attribute"):
index.is_unique = False
@async_mark()
async def test_tab_complete_warning(self, ip):
# https://github.com/pandas-dev/pandas/issues/16409
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.completer import provisionalcompleter
code = "import pandas as pd; idx = pd.Index([1, 2])"
await ip.run_code(code)
# GH 31324 newer jedi version raises Deprecation warning
import jedi
if jedi.__version__ < "0.16.0":
warning = tm.assert_produces_warning(None)
else:
warning = tm.assert_produces_warning(
DeprecationWarning, check_stacklevel=False
)
with warning:
with provisionalcompleter("ignore"):
list(ip.Completer.completions("idx.", 4))
def test_contains_method_removed(self, index):
# GH#30103 method removed for all types except IntervalIndex
if isinstance(index, pd.IntervalIndex):
index.contains(1)
else:
msg = f"'{type(index).__name__}' object has no attribute 'contains'"
with pytest.raises(AttributeError, match=msg):
index.contains(1)
class TestMixedIntIndex(Base):
# Mostly the tests from common.py for which the results differ
# in py2 and py3 because ints and strings are uncomparable in py3
# (GH 13514)
_holder = Index
@pytest.fixture(params=[[0, "a", 1, "b", 2, "c"]], ids=["mixedIndex"])
def index(self, request):
return Index(request.param)
def create_index(self) -> Index:
return Index([0, "a", 1, "b", 2, "c"])
def test_argsort(self):
index = self.create_index()
with pytest.raises(TypeError, match="'>|<' not supported"):
index.argsort()
def test_numpy_argsort(self):
index = self.create_index()
with pytest.raises(TypeError, match="'>|<' not supported"):
np.argsort(index)
def test_copy_name(self):
# Check that "name" argument passed at initialization is honoured
# GH12309
index = self.create_index()
first = type(index)(index, copy=True, name="mario")
second = type(first)(first, copy=False)
# Even though "copy=False", we want a new object.
assert first is not second
tm.assert_index_equal(first, second)
assert first.name == "mario"
assert second.name == "mario"
s1 = Series(2, index=first)
s2 = Series(3, index=second[:-1])
s3 = s1 * s2
assert s3.index.name == "mario"
def test_copy_name2(self):
# Check that adding a "name" parameter to the copy is honored
# GH14302
index = pd.Index([1, 2], name="MyName")
index1 = index.copy()
tm.assert_index_equal(index, index1)
index2 = index.copy(name="NewName")
tm.assert_index_equal(index, index2, check_names=False)
assert index.name == "MyName"
assert index2.name == "NewName"
index3 = index.copy(names=["NewName"])
tm.assert_index_equal(index, index3, check_names=False)
assert index.name == "MyName"
assert index.names == ["MyName"]
assert index3.name == "NewName"
assert index3.names == ["NewName"]
def test_unique_na(self):
idx = pd.Index([2, np.nan, 2, 1], name="my_index")
expected = pd.Index([2, np.nan, 1], name="my_index")
result = idx.unique()
tm.assert_index_equal(result, expected)
def test_logical_compat(self):
index = self.create_index()
assert index.all() == index.values.all()
assert index.any() == index.values.any()
@pytest.mark.parametrize("how", ["any", "all"])
@pytest.mark.parametrize("dtype", [None, object, "category"])
@pytest.mark.parametrize(
"vals,expected",
[
([1, 2, 3], [1, 2, 3]),
([1.0, 2.0, 3.0], [1.0, 2.0, 3.0]),
([1.0, 2.0, np.nan, 3.0], [1.0, 2.0, 3.0]),
(["A", "B", "C"], ["A", "B", "C"]),
(["A", np.nan, "B", "C"], ["A", "B", "C"]),
],
)
def test_dropna(self, how, dtype, vals, expected):
# GH 6194
index = pd.Index(vals, dtype=dtype)
result = index.dropna(how=how)
expected = pd.Index(expected, dtype=dtype)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("how", ["any", "all"])
@pytest.mark.parametrize(
"index,expected",
[
(
pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]),
pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]),
),
(
pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03", pd.NaT]),
pd.DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]),
),
(
pd.TimedeltaIndex(["1 days", "2 days", "3 days"]),
pd.TimedeltaIndex(["1 days", "2 days", "3 days"]),
),
(
pd.TimedeltaIndex([pd.NaT, "1 days", "2 days", "3 days", pd.NaT]),
pd.TimedeltaIndex(["1 days", "2 days", "3 days"]),
),
(
pd.PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"),
pd.PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"),
),
(
pd.PeriodIndex(["2012-02", "2012-04", "NaT", "2012-05"], freq="M"),
pd.PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"),
),
],
)
def test_dropna_dt_like(self, how, index, expected):
result = index.dropna(how=how)
tm.assert_index_equal(result, expected)
def test_dropna_invalid_how_raises(self):
msg = "invalid how option: xxx"
with pytest.raises(ValueError, match=msg):
pd.Index([1, 2, 3]).dropna(how="xxx")
def test_get_combined_index(self):
result = _get_combined_index([])
expected = Index([])
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"index",
[
pd.Index([np.nan]),
pd.Index([np.nan, 1]),
pd.Index([1, 2, np.nan]),
pd.Index(["a", "b", np.nan]),
pd.to_datetime(["NaT"]),
pd.to_datetime(["NaT", "2000-01-01"]),
pd.to_datetime(["2000-01-01", "NaT", "2000-01-02"]),
pd.to_timedelta(["1 day", "NaT"]),
],
)
def test_is_monotonic_na(self, index):
assert index.is_monotonic_increasing is False
assert index.is_monotonic_decreasing is False
assert index._is_strictly_monotonic_increasing is False
assert index._is_strictly_monotonic_decreasing is False
def test_repr_summary(self):
with cf.option_context("display.max_seq_items", 10):
result = repr(pd.Index(np.arange(1000)))
assert len(result) < 200
assert "..." in result
@pytest.mark.parametrize("klass", [Series, DataFrame])
def test_int_name_format(self, klass):
index = Index(["a", "b", "c"], name=0)
result = klass(list(range(3)), index=index)
assert "0" in repr(result)
def test_str_to_bytes_raises(self):
# GH 26447
index = Index([str(x) for x in range(10)])
msg = "^'str' object cannot be interpreted as an integer$"
with pytest.raises(TypeError, match=msg):
bytes(index)
def test_intersect_str_dates(self):
dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]
index1 = Index(dt_dates, dtype=object)
index2 = Index(["aa"], dtype=object)
result = index2.intersection(index1)
expected = Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_index_repr_bool_nan(self):
# GH32146
arr = Index([True, False, np.nan], dtype=object)
exp1 = arr.format()
out1 = ["True", "False", "NaN"]
assert out1 == exp1
exp2 = repr(arr)
out2 = "Index([True, False, nan], dtype='object')"
assert out2 == exp2
@pytest.mark.filterwarnings("ignore:elementwise comparison failed:FutureWarning")
def test_index_with_tuple_bool(self):
# GH34123
# TODO: remove tupleize_cols=False once correct behaviour is restored
# TODO: also this op right now produces FutureWarning from numpy
idx = Index([("a", "b"), ("b", "c"), ("c", "a")], tupleize_cols=False)
result = idx == ("c", "a")
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
class TestIndexUtils:
@pytest.mark.parametrize(
"data, names, expected",
[
([[1, 2, 3]], None, Index([1, 2, 3])),
([[1, 2, 3]], ["name"], Index([1, 2, 3], name="name")),
(
[["a", "a"], ["c", "d"]],
None,
MultiIndex([["a"], ["c", "d"]], [[0, 0], [0, 1]]),
),
(
[["a", "a"], ["c", "d"]],
["L1", "L2"],
MultiIndex([["a"], ["c", "d"]], [[0, 0], [0, 1]], names=["L1", "L2"]),
),
],
)
def test_ensure_index_from_sequences(self, data, names, expected):
result = ensure_index_from_sequences(data, names)
tm.assert_index_equal(result, expected)
def test_ensure_index_mixed_closed_intervals(self):
# GH27172
intervals = [
pd.Interval(0, 1, closed="left"),
pd.Interval(1, 2, closed="right"),
pd.Interval(2, 3, closed="neither"),
pd.Interval(3, 4, closed="both"),
]
result = ensure_index(intervals)
expected = Index(intervals, dtype=object)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"opname",
[
"eq",
"ne",
"le",
"lt",
"ge",
"gt",
"add",
"radd",
"sub",
"rsub",
"mul",
"rmul",
"truediv",
"rtruediv",
"floordiv",
"rfloordiv",
"pow",
"rpow",
"mod",
"divmod",
],
)
def test_generated_op_names(opname, index):
if isinstance(index, ABCIndex) and opname == "rsub":
# pd.Index.__rsub__ does not exist; though the method does exist
# for subclasses. see GH#19723
return
opname = f"__{opname}__"
method = getattr(index, opname)
assert method.__name__ == opname
@pytest.mark.parametrize("index_maker", tm.index_subclass_makers_generator())
def test_index_subclass_constructor_wrong_kwargs(index_maker):
# GH #19348
with pytest.raises(TypeError, match="unexpected keyword argument"):
index_maker(foo="bar")
def test_deprecated_fastpath():
msg = "[Uu]nexpected keyword argument"
with pytest.raises(TypeError, match=msg):
pd.Index(np.array(["a", "b"], dtype=object), name="test", fastpath=True)
with pytest.raises(TypeError, match=msg):
pd.Int64Index(np.array([1, 2, 3], dtype="int64"), name="test", fastpath=True)
with pytest.raises(TypeError, match=msg):
pd.RangeIndex(0, 5, 2, name="test", fastpath=True)
with pytest.raises(TypeError, match=msg):
pd.CategoricalIndex(["a", "b", "c"], name="test", fastpath=True)
def test_shape_of_invalid_index():
# Currently, it is possible to create "invalid" index objects backed by
# a multi-dimensional array (see https://github.com/pandas-dev/pandas/issues/27125
# about this). However, as long as this is not solved in general,this test ensures
# that the returned shape is consistent with this underlying array for
# compat with matplotlib (see https://github.com/pandas-dev/pandas/issues/27775)
idx = pd.Index([0, 1, 2, 3])
with tm.assert_produces_warning(FutureWarning):
# GH#30588 multi-dimensional indexing deprecated
assert idx[:, None].shape == (4, 1)
def test_validate_1d_input():
# GH#27125 check that we do not have >1-dimensional input
msg = "Index data must be 1-dimensional"
arr = np.arange(8).reshape(2, 2, 2)
with pytest.raises(ValueError, match=msg):
pd.Index(arr)
with pytest.raises(ValueError, match=msg):
pd.Float64Index(arr.astype(np.float64))
with pytest.raises(ValueError, match=msg):
pd.Int64Index(arr.astype(np.int64))
with pytest.raises(ValueError, match=msg):
pd.UInt64Index(arr.astype(np.uint64))
df = pd.DataFrame(arr.reshape(4, 2))
with pytest.raises(ValueError, match=msg):
pd.Index(df)
# GH#13601 trying to assign a multi-dimensional array to an index is not
# allowed
ser = pd.Series(0, range(4))
with pytest.raises(ValueError, match=msg):
ser.index = np.array([[2, 3]] * 4)
def test_convert_almost_null_slice(index):
# slice with None at both ends, but not step
key = slice(None, None, "foo")
if isinstance(index, pd.IntervalIndex):
msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
with pytest.raises(ValueError, match=msg):
index._convert_slice_indexer(key, "loc")
else:
msg = "'>=' not supported between instances of 'str' and 'int'"
with pytest.raises(TypeError, match=msg):
index._convert_slice_indexer(key, "loc")
dtlike_dtypes = [
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
pd.DatetimeTZDtype("ns", "Asia/Tokyo"),
pd.PeriodDtype("ns"),
]
@pytest.mark.parametrize("ldtype", dtlike_dtypes)
@pytest.mark.parametrize("rdtype", dtlike_dtypes)
def test_get_indexer_non_unique_wrong_dtype(ldtype, rdtype):
vals = np.tile(3600 * 10 ** 9 * np.arange(3), 2)
def construct(dtype):
if dtype is dtlike_dtypes[-1]:
# PeriodArray will try to cast ints to strings
return pd.DatetimeIndex(vals).astype(dtype)
return pd.Index(vals, dtype=dtype)
left = construct(ldtype)
right = construct(rdtype)
result = left.get_indexer_non_unique(right)
if ldtype is rdtype:
ex1 = np.array([0, 3, 1, 4, 2, 5] * 2, dtype=np.intp)
ex2 = np.array([], dtype=np.intp)
tm.assert_numpy_array_equal(result[0], ex1)
tm.assert_numpy_array_equal(result[1], ex2)
else:
no_matches = np.array([-1] * 6, dtype=np.intp)
tm.assert_numpy_array_equal(result[0], no_matches)
tm.assert_numpy_array_equal(result[1], no_matches)
| bsd-3-clause |
kennethgillen/ansible | lib/ansible/modules/network/ldap_attr.py | 28 | 11014 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Peter Sagerson <[email protected]>
# (c) 2016, Jiri Tyr <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: ldap_attr
short_description: Add or remove LDAP attribute values.
description:
- Add or remove LDAP attribute values.
notes:
- This only deals with attributes on existing entries. To add or remove
whole entries, see M(ldap_entry).
- The default authentication settings will attempt to use a SASL EXTERNAL
bind over a UNIX domain socket. This works well with the default Ubuntu
install for example, which includes a cn=peercred,cn=external,cn=auth ACL
rule allowing root to modify the server configuration. If you need to use
a simple bind to access your server, pass the credentials in I(bind_dn)
and I(bind_pw).
- For I(state=present) and I(state=absent), all value comparisons are
performed on the server for maximum accuracy. For I(state=exact), values
have to be compared in Python, which obviously ignores LDAP matching
rules. This should work out in most cases, but it is theoretically
possible to see spurious changes when target and actual values are
semantically identical but lexically distinct.
version_added: '2.3'
author:
- Jiri Tyr (@jtyr)
requirements:
- python-ldap
options:
bind_dn:
required: false
default: null
description:
- A DN to bind with. If this is omitted, we'll try a SASL bind with
the EXTERNAL mechanism. If this is blank, we'll use an anonymous
bind.
bind_pw:
required: false
default: null
description:
- The password to use with I(bind_dn).
dn:
required: true
description:
- The DN of the entry to modify.
name:
required: true
description:
- The name of the attribute to modify.
server_uri:
required: false
default: ldapi:///
description:
- A URI to the LDAP server. The default value lets the underlying
LDAP client library look for a UNIX domain socket in its default
location.
start_tls:
required: false
choices: ['yes', 'no']
default: 'no'
description:
- If true, we'll use the START_TLS LDAP extension.
state:
required: false
choices: [present, absent, exact]
default: present
description:
- The state of the attribute values. If C(present), all given
values will be added if they're missing. If C(absent), all given
values will be removed if present. If C(exact), the set of values
will be forced to exactly those provided and no others. If
I(state=exact) and I(value) is empty, all values for this
attribute will be removed.
values:
required: true
description:
- The value(s) to add or remove. This can be a string or a list of
strings. The complex argument format is required in order to pass
a list of strings (see examples).
"""
EXAMPLES = """
- name: Configure directory number 1 for example.com
ldap_attr:
dn: olcDatabase={1}hdb,cn=config
name: olcSuffix
values: dc=example,dc=com
state: exact
# The complex argument format is required here to pass a list of ACL strings.
- name: Set up the ACL
ldap_attr:
dn: olcDatabase={1}hdb,cn=config
name: olcAccess
values:
- >-
{0}to attrs=userPassword,shadowLastChange
by self write
by anonymous auth
by dn="cn=admin,dc=example,dc=com" write
by * none'
- >-
{1}to dn.base="dc=example,dc=com"
by dn="cn=admin,dc=example,dc=com" write
by * read
state: exact
- name: Declare some indexes
ldap_attr:
dn: olcDatabase={1}hdb,cn=config
name: olcDbIndex
values: "{{ item }}"
with_items:
- objectClass eq
- uid eq
- name: Set up a root user, which we can use later to bootstrap the directory
ldap_attr:
dn: olcDatabase={1}hdb,cn=config
name: "{{ item.key }}"
values: "{{ item.value }}"
state: exact
with_dict:
olcRootDN: cn=root,dc=example,dc=com
olcRootPW: "{SSHA}tabyipcHzhwESzRaGA7oQ/SDoBZQOGND"
- name: Get rid of an unneeded attribute
ldap_attr:
dn: uid=jdoe,ou=people,dc=example,dc=com
name: shadowExpire
values: ""
state: exact
server_uri: ldap://localhost/
bind_dn: cn=admin,dc=example,dc=com
bind_pw: password
#
# The same as in the previous example but with the authentication details
# stored in the ldap_auth variable:
#
# ldap_auth:
# server_uri: ldap://localhost/
# bind_dn: cn=admin,dc=example,dc=com
# bind_pw: password
- name: Get rid of an unneeded attribute
ldap_attr:
dn: uid=jdoe,ou=people,dc=example,dc=com
name: shadowExpire
values: ""
state: exact
params: "{{ ldap_auth }}"
"""
RETURN = """
modlist:
description: list of modified parameters
returned: success
type: list
sample: '[[2, "olcRootDN", ["cn=root,dc=example,dc=com"]]]'
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
try:
import ldap
import ldap.sasl
HAS_LDAP = True
except ImportError:
HAS_LDAP = False
class LdapAttr(object):
def __init__(self, module):
# Shortcuts
self.module = module
self.bind_dn = self.module.params['bind_dn']
self.bind_pw = self.module.params['bind_pw']
self.dn = self.module.params['dn']
self.name = self.module.params['name']
self.server_uri = self.module.params['server_uri']
self.start_tls = self.module.params['start_tls']
self.state = self.module.params['state']
# Normalize values
if isinstance(self.module.params['values'], list):
self.values = map(str, self.module.params['values'])
else:
self.values = [str(self.module.params['values'])]
# Establish connection
self.connection = self._connect_to_ldap()
def add(self):
values_to_add = filter(self._is_value_absent, self.values)
if len(values_to_add) > 0:
modlist = [(ldap.MOD_ADD, self.name, values_to_add)]
else:
modlist = []
return modlist
def delete(self):
values_to_delete = filter(self._is_value_present, self.values)
if len(values_to_delete) > 0:
modlist = [(ldap.MOD_DELETE, self.name, values_to_delete)]
else:
modlist = []
return modlist
def exact(self):
try:
results = self.connection.search_s(
self.dn, ldap.SCOPE_BASE, attrlist=[self.name])
except ldap.LDAPError:
e = get_exception()
self.module.fail_json(
msg="Cannot search for attribute %s" % self.name,
details=str(e))
current = results[0][1].get(self.name, [])
modlist = []
if frozenset(self.values) != frozenset(current):
if len(current) == 0:
modlist = [(ldap.MOD_ADD, self.name, self.values)]
elif len(self.values) == 0:
modlist = [(ldap.MOD_DELETE, self.name, None)]
else:
modlist = [(ldap.MOD_REPLACE, self.name, self.values)]
return modlist
def _is_value_present(self, value):
""" True if the target attribute has the given value. """
try:
is_present = bool(
self.connection.compare_s(self.dn, self.name, value))
except ldap.NO_SUCH_ATTRIBUTE:
is_present = False
return is_present
def _is_value_absent(self, value):
""" True if the target attribute doesn't have the given value. """
return not self._is_value_present(value)
def _connect_to_ldap(self):
connection = ldap.initialize(self.server_uri)
if self.start_tls:
try:
connection.start_tls_s()
except ldap.LDAPError:
e = get_exception()
self.module.fail_json(msg="Cannot start TLS.", details=str(e))
try:
if self.bind_dn is not None:
connection.simple_bind_s(self.bind_dn, self.bind_pw)
else:
connection.sasl_interactive_bind_s('', ldap.sasl.external())
except ldap.LDAPError:
e = get_exception()
self.module.fail_json(
msg="Cannot bind to the server.", details=str(e))
return connection
def main():
module = AnsibleModule(
argument_spec={
'bind_dn': dict(default=None),
'bind_pw': dict(default='', no_log=True),
'dn': dict(required=True),
'name': dict(required=True),
'params': dict(type='dict'),
'server_uri': dict(default='ldapi:///'),
'start_tls': dict(default=False, type='bool'),
'state': dict(
default='present',
choices=['present', 'absent', 'exact']),
'values': dict(required=True, type='raw'),
},
supports_check_mode=True,
)
if not HAS_LDAP:
module.fail_json(
msg="Missing requried 'ldap' module (pip install python-ldap)")
# Update module parameters with user's parameters if defined
if 'params' in module.params and isinstance(module.params['params'], dict):
module.params.update(module.params['params'])
# Remove the params
module.params.pop('params', None)
# Instantiate the LdapAttr object
ldap = LdapAttr(module)
state = module.params['state']
# Perform action
if state == 'present':
modlist = ldap.add()
elif state == 'absent':
modlist = ldap.delete()
elif state == 'exact':
modlist = ldap.exact()
changed = False
if len(modlist) > 0:
changed = True
if not module.check_mode:
try:
ldap.connection.modify_s(ldap.dn, modlist)
except Exception:
e = get_exception()
module.fail_json(
msg="Attribute action failed.", details=str(e))
module.exit_json(changed=changed, modlist=modlist)
if __name__ == '__main__':
main()
| gpl-3.0 |
cchurch/ansible-modules-core | network/junos/junos_config.py | 11 | 11221 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: junos_config
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage configuration on devices running Juniper JUNOS
description:
- This module provides an implementation for working with the active
configuration running on Juniper JUNOS devices. It provides a set
of arguments for loading configuration, performing rollback operations
and zeroing the active configuration on the device.
extends_documentation_fragment: junos
options:
lines:
description:
- This argument takes a list of C(set) or C(delete) configuration
lines to push into the remote device. Each line must start with
either C(set) or C(delete). This argument is mutually exclusive
with the I(src) argument.
required: false
default: null
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote system. The path can either be a full
system path to the configuration file if the value starts with /
or relative to the root of the implemented role or playbook.
This argument is mutually exclusive with the I(lines) argument.
required: false
default: null
version_added: "2.2"
src_format:
description:
- The I(src_format) argument specifies the format of the configuration
found int I(src). If the I(src_format) argument is not provided,
the module will attempt to determine the format of the configuration
file specified in I(src).
required: false
default: null
choices: ['xml', 'set', 'text', 'json']
version_added: "2.2"
rollback:
description:
- The C(rollback) argument instructs the module to rollback the
current configuration to the identifier specified in the
argument. If the specified rollback identifier does not
exist on the remote device, the module will fail. To rollback
to the most recent commit, set the C(rollback) argument to 0.
required: false
default: null
zeroize:
description:
- The C(zeroize) argument is used to completely sanitize the
remote device configuration back to initial defaults. This
argument will effectively remove all current configuration
statements on the remote device.
required: false
default: null
confirm:
description:
- The C(confirm) argument will configure a time out value for
the commit to be confirmed before it is automatically
rolled back. If the C(confirm) argument is set to False, this
argument is silently ignored. If the value for this argument
is set to 0, the commit is confirmed immediately.
required: false
default: 0
comment:
description:
- The C(comment) argument specifies a text string to be used
when committing the configuration. If the C(confirm) argument
is set to False, this argument is silently ignored.
required: false
default: configured by junos_config
replace:
description:
- The C(replace) argument will instruct the remote device to
replace the current configuration hierarchy with the one specified
in the corresponding hierarchy of the source configuration loaded
from this module.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the I(update) argument to C(replace). This argument
will be removed in a future release.
required: false
choices: ['yes', 'no']
default: false
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. The backup file is written to the C(backup)
folder in the playbook root directory. If the directory does not
exist, it is created.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
requirements:
- junos-eznc
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
netconf:
host: "{{ inventory_hostname }}"
username: ansible
password: Ansible
- name: load configure file into device
junos_config:
src: srx.cfg
comment: update config
provider: "{{ netconf }}"
- name: rollback the configuration to id 10
junos_config:
rollback: 10
provider: "{{ netconf }}"
- name: zero out the current configuration
junos_config:
zeroize: yes
provider: "{{ netconf }}"
- name: confirm a previous commit
junos_config:
provider: "{{ netconf }}"
"""
RETURN = """
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: path
sample: /playbooks/ansible/backup/config.2016-07-16@22:28:34
"""
import json
from xml.etree import ElementTree
import ansible.module_utils.junos
from ansible.module_utils.basic import get_exception
from ansible.module_utils.network import NetworkModule, NetworkError
from ansible.module_utils.netcfg import NetworkConfig
DEFAULT_COMMENT = 'configured by junos_config'
def guess_format(config):
try:
json.loads(config)
return 'json'
except ValueError:
pass
try:
ElementTree.fromstring(config)
return 'xml'
except ElementTree.ParseError:
pass
if config.startswith('set') or config.startswith('delete'):
return 'set'
return 'text'
def config_to_commands(config):
set_format = config.startswith('set') or config.startswith('delete')
candidate = NetworkConfig(indent=4, contents=config, device_os='junos')
if not set_format:
candidate = [c.line for c in candidate.items]
commands = list()
# this filters out less specific lines
for item in candidate:
for index, entry in enumerate(commands):
if item.startswith(entry):
del commands[index]
break
commands.append(item)
else:
commands = str(candidate).split('\n')
return commands
def diff_commands(commands, config):
config = [unicode(c).replace("'", '') for c in config]
updates = list()
visited = set()
for item in commands:
if len(item) > 0:
if not item.startswith('set') and not item.startswith('delete'):
raise ValueError('line must start with either `set` or `delete`')
elif item.startswith('set') and item[4:] not in config:
updates.append(item)
elif item.startswith('delete'):
for entry in config:
if entry.startswith(item[7:]) and item not in visited:
updates.append(item)
visited.add(item)
return updates
def load_config(module, result):
candidate = module.params['lines'] or module.params['src']
if isinstance(candidate, basestring):
candidate = candidate.split('\n')
kwargs = dict()
kwargs['comment'] = module.params['comment']
kwargs['confirm'] = module.params['confirm']
kwargs['replace'] = module.params['replace']
kwargs['commit'] = not module.check_mode
if module.params['src']:
config_format = module.params['src_format'] or guess_format(candidate)
elif module.params['lines']:
config_format = 'set'
kwargs['config_format'] = config_format
# this is done to filter out `delete ...` statements which map to
# nothing in the config as that will cause an exception to be raised
if config_format == 'set':
config = module.config.get_config()
config = config_to_commands(config)
candidate = diff_commands(candidate, config)
diff = module.config.load_config(candidate, **kwargs)
if diff:
result['changed'] = True
result['diff'] = dict(prepared=diff)
def rollback_config(module, result):
rollback = module.params['rollback']
kwargs = dict(comment=module.param['comment'],
commit=not module.check_mode)
diff = module.connection.rollback_config(rollback, **kwargs)
if diff:
result['changed'] = True
result['diff'] = dict(prepared=diff)
def zeroize_config(module, result):
if not module.check_mode:
module.cli.run_commands('request system zeroize')
result['changed'] = True
def confirm_config(module, result):
checkonly = module.check_mode
result['changed'] = module.connection.confirm_commit(checkonly)
def run(module, result):
if module.params['rollback']:
return rollback_config(module, result)
elif module.params['zeroize']:
return zeroize_config(module, result)
elif not any((module.params['src'], module.params['lines'])):
return confirm_config(module, result)
else:
return load_config(module, result)
def main():
""" main entry point for module execution
"""
argument_spec = dict(
lines=dict(type='list'),
src=dict(type='path'),
src_format=dict(choices=['xml', 'text', 'set', 'json']),
# update operations
replace=dict(default=False, type='bool'),
confirm=dict(default=0, type='int'),
comment=dict(default=DEFAULT_COMMENT),
# config operations
backup=dict(type='bool', default=False),
rollback=dict(type='int'),
zeroize=dict(default=False, type='bool'),
transport=dict(default='netconf', choices=['netconf'])
)
mutually_exclusive = [('lines', 'rollback'), ('lines', 'zeroize'),
('rollback', 'zeroize'), ('lines', 'src'),
('src', 'zeroize'), ('src', 'rollback')]
required_if = [('replace', True, ['src'])]
module = NetworkModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
result = dict(changed=False)
if module.params['backup']:
result['__backup__'] = module.config.get_config()
try:
run(module, result)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc), **exc.kwargs)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
projecthamster/hamster-gtk | hamster_gtk/overview/widgets/misc.py | 1 | 5056 | # -*- coding: utf-8 -*-
# This file is part of 'hamster-gtk'.
#
# 'hamster-gtk' is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# 'hamster-gtk' is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with 'hamster-gtk'. If not, see <http://www.gnu.org/licenses/>.
"""This module provide widgets that did not fit in the other modules."""
from __future__ import absolute_import, unicode_literals
from gettext import gettext as _
from gi.repository import GObject, Gtk
from six import text_type
from hamster_gtk.helpers import get_parent_window
from hamster_gtk.misc.dialogs import DateRangeSelectDialog
from hamster_gtk.overview.dialogs import ExportDialog
class HeaderBar(Gtk.HeaderBar):
"""Headerbar used by the overview screen."""
def __init__(self, controller, *args, **kwargs):
"""Initialize headerbar."""
super(HeaderBar, self).__init__(*args, **kwargs)
self.set_show_close_button(True)
self.set_title(_("Overview"))
self._daterange_button = self._get_daterange_button()
self.pack_start(self._get_prev_daterange_button())
self.pack_start(self._get_next_daterange_button())
self.pack_start(self._daterange_button)
self.pack_end(self._get_export_button())
controller.signal_handler.connect('daterange-changed', self._on_daterange_changed)
# Widgets
def _get_export_button(self):
"""Return a button to export facts."""
button = Gtk.Button(_("Export"))
button.connect('clicked', self._on_export_button_clicked)
return button
def _get_daterange_button(self):
"""Return a button that opens the *select daterange* dialog."""
# We add a dummy label which will be set properly once a daterange is
# set.
button = Gtk.Button('')
button.connect('clicked', self._on_daterange_button_clicked)
return button
def _get_prev_daterange_button(self):
"""Return a 'previous dateframe' widget."""
button = Gtk.Button(_("Earlier"))
button.connect('clicked', self._on_previous_daterange_button_clicked)
return button
def _get_next_daterange_button(self):
"""Return a 'next dateframe' widget."""
button = Gtk.Button(_("Later"))
button.connect('clicked', self._on_next_daterange_button_clicked)
return button
# Callbacks
def _on_daterange_button_clicked(self, button):
"""Callback for when the 'daterange' button is clicked."""
parent = get_parent_window(self)
dialog = DateRangeSelectDialog(parent)
response = dialog.run()
if response == Gtk.ResponseType.APPLY:
parent._daterange = dialog.daterange
dialog.destroy()
def _on_daterange_changed(self, sender, daterange):
"""Callback to be triggered if the 'daterange' changed."""
def get_label_text(daterange):
start, end = daterange
if start == end:
text = text_type(start)
else:
text = '{} - {}'.format(start, end)
return text
self._daterange_button.set_label(get_label_text(daterange))
def _on_previous_daterange_button_clicked(self, button):
"""Callback for when the 'previous' button is clicked."""
get_parent_window(self).apply_previous_daterange()
def _on_next_daterange_button_clicked(self, button):
"""Callback for when the 'next' button is clicked."""
get_parent_window(self).apply_next_daterange()
def _on_export_button_clicked(self, button):
"""
Trigger fact export if button clicked.
This is the place to run extra logic about where to save/which format.
``parent._export_facts`` only deals with the actual export.
"""
parent = get_parent_window(self)
dialog = ExportDialog(parent)
response = dialog.run()
if response == Gtk.ResponseType.OK:
parent._export_facts(dialog.get_export_format(), dialog.get_filename())
else:
pass
dialog.destroy()
class Summary(Gtk.Box):
"""A widget that shows categories with highest commutative ``Fact.delta``."""
def __init__(self, category_totals):
"""Initialize widget."""
super(Summary, self).__init__()
for category, total in category_totals:
label = Gtk.Label()
label.set_markup("<b>{}:</b> {} minutes".format(
GObject.markup_escape_text(text_type(category)),
int(total.total_seconds() / 60)))
self.pack_start(label, False, False, 10)
| gpl-3.0 |
healpy/healpy | healpy/newvisufunc.py | 1 | 17516 | __all__ = ["projview", "newprojplot"]
import numpy as np
from .pixelfunc import ang2pix, npix2nside
from .rotator import Rotator
import matplotlib.pyplot as plt
from matplotlib.projections.geo import GeoAxes
from matplotlib.ticker import MultipleLocator, FormatStrFormatter, AutoMinorLocator
import warnings
class ThetaFormatterCounterclockwisePhi(GeoAxes.ThetaFormatter):
"""Convert tick labels from rads to degs and shifts labelling from -180|-90|0|90|180 to conterclockwise periodic 180|90|0|270|180 """
def __call__(self, x, pos=None):
if x != 0:
x *= -1
if x < 0:
x += 2 * np.pi
return super(ThetaFormatterCounterclockwisePhi, self).__call__(x, pos)
class ThetaFormatterClockwisePhi(GeoAxes.ThetaFormatter):
"""Convert tick labels from rads to degs and shifts labelling from -180|-90|0|90|180 to clockwise periodic 180|270|0|90|180 """
def __call__(self, x, pos=None):
if x < 0:
x += 2 * np.pi
# return super(ThetaFormatterShiftPhi, self).__call__(x, pos)
return super(ThetaFormatterClockwisePhi, self).__call__(x, pos)
class ThetaFormatterSymmetricPhi(GeoAxes.ThetaFormatter):
"""Just convert phi ticks from rad to degs and keep the true -180|-90|0|90|180 """
def __call__(self, x, pos=None):
return super(ThetaFormatterSymmetricPhi, self).__call__(x, pos)
class ThetaFormatterTheta(GeoAxes.ThetaFormatter):
"""Convert theta ticks from rads to degs"""
def __call__(self, x, pos=None):
return super(ThetaFormatterTheta, self).__call__(x, pos)
def lonlat(theta, phi):
"""Converts theta and phi to longitude and latitude"""
longitude = np.asarray(phi)
latitude = np.pi / 2 - np.asarray(theta)
return longitude, latitude
def update_dictionary(main_dict, update_dict):
for key, key_val in main_dict.items():
if key in update_dict:
main_dict[key] = update_dict[key]
return main_dict
def projview(
m=None,
rot=None,
coord=None,
unit="",
xsize=1000,
nest=False,
min=None,
max=None,
flip="astro",
format="%g",
cbar=True,
cmap="viridis",
norm=None,
graticule=False,
graticule_labels=False,
return_only_data=False,
projection_type="mollweide",
cb_orientation="horizontal",
xlabel=None,
ylabel=None,
longitude_grid_spacing=60,
latitude_grid_spacing=30,
override_plot_properties=None,
title=None,
xtick_label_color="black",
ytick_label_color="black",
graticule_color=None,
fontsize=None,
phi_convention="counterclockwise",
custom_xtick_labels=None,
custom_ytick_labels=None,
**kwargs
):
"""Plot a healpix map (given as an array) in the chosen projection.
See examples of using this function in the documentation under "Other tutorials".
Overplot points or lines using :func:`newprojplot`.
.. warning::
this function is work in progress, the aim is to reimplement the healpy
plot functions using the new features of matplotlib and remove most
of the custom projection code.
Please report bugs or submit feature requests via Github.
The interface will change in future releases.
Parameters
----------
map : float, array-like or None
An array containing the map, supports masked maps, see the `ma` function.
If None, will display a blank map, useful for overplotting.
rot : scalar or sequence, optional
Describe the rotation to apply.
In the form (lon, lat, psi) (unit: degrees) : the point at
longitude *lon* and latitude *lat* will be at the center. An additional rotation
of angle *psi* around this direction is applied.
coord : sequence of character, optional
Either one of 'G', 'E' or 'C' to describe the coordinate
system of the map, or a sequence of 2 of these to rotate
the map from the first to the second coordinate system.
unit : str, optional
A text describing the unit of the data. Default: ''
xsize : int, optional
The size of the image. Default: 800
nest : bool, optional
If True, ordering scheme is NESTED. Default: False (RING)
min : float, optional
The minimum range value
max : float, optional
The maximum range value
flip : {'astro', 'geo'}, optional
Defines the convention of projection : 'astro' (default, east towards left, west towards right)
or 'geo' (east towards roght, west towards left)
It creates the `healpy_flip` attribute on the Axes to save the convention in the figure.
format : str, optional
The format of the scale label. Default: '%g'
cbar : bool, optional
Display the colorbar. Default: True
norm : {'hist', 'log', None}
Color normalization, hist= histogram equalized color mapping,
log= logarithmic color mapping, default: None (linear color mapping)
kwargs : keywords
any additional keyword is passed to pcolormesh
graticule : bool
add graticule
graticule_labels : bool
longitude and latitude labels
projection_type : {'aitoff', 'hammer', 'lambert', 'mollweide', 'cart', '3d', 'polar'}
type of the plot
cb_orientation : {'horizontal', 'vertical'}
color bar orientation
xlabel : str
set x axis label
ylabel : str
set y axis label
longitude_grid_spacing : float
set x axis grid spacing
latitude_grid_spacing : float
set y axis grid spacing
override_plot_properties : dict
Override the following plot proporties: "cbar_shrink", "cbar_pad", "cbar_label_pad", "figure_width": width, "figure_size_ratio": ratio.
title : str
set title of the plot
lcolor : str
change the color of the longitude tick labels, some color maps make it hard to read black tick labels
fontsize: dict
Override fontsize of labels: "xlabel", "ylabel", "title", "xtick_label", "ytick_label", "cbar_label", "cbar_tick_label".
phi_convention : string
convention on x-axis (phi), 'counterclockwise' (default), 'clockwise', 'symmetrical' (phi as it is truly given)
if `flip` is "geo", `phi_convention` should be set to 'clockwise'.
custom_xtick_labels : list
override x-axis tick labels
custom_ytick_labels : list
override y-axis tick labels
"""
geographic_projections = ["aitoff", "hammer", "lambert", "mollweide"]
if not m is None:
# auto min and max
if min is None:
min = m.min()
if max is None:
max = m.max()
# do this to find how many decimals are in the colorbar labels, so that the padding in the vertical cbar can done properly
def find_number_of_decimals(number):
try:
return len(str(number).split(".")[1])
except:
return 0
# default font sizes
fontsize_defaults = {
"xlabel": 12,
"ylabel": 12,
"title": 14,
"xtick_label": 12,
"ytick_label": 12,
"cbar_label": 12,
"cbar_tick_label": 12,
}
if fontsize is not None:
fontsize_defaults = update_dictionary(fontsize_defaults, fontsize)
# default plot settings
decs = np.max([find_number_of_decimals(min), find_number_of_decimals(max)])
if decs >= 3:
lpad = -27
else:
lpad = -9 * decs
ratio = 0.63
if projection_type == "3d":
if cb_orientation == "vertical":
shrink = 0.55
pad = 0.02
lpad = lpad
width = 11.5
if cb_orientation == "horizontal":
shrink = 0.2
pad = 0
lpad = -10
width = 14
if projection_type in geographic_projections:
if cb_orientation == "vertical":
shrink = 0.6
pad = 0.01
lpad = lpad
width = 10
if cb_orientation == "horizontal":
shrink = 0.6
pad = 0.05
lpad = -8
width = 8.5
if projection_type == "cart":
if cb_orientation == "vertical":
shrink = 1
pad = 0.01
lpad = lpad
width = 9.6
ratio = 0.42
if cb_orientation == "horizontal":
shrink = 0.4
pad = 0.1
lpad = -12
width = 8.8
if xlabel == None:
pad = 0.01
ratio = 0.63
if projection_type == "polar":
if cb_orientation == "vertical":
shrink = 1
pad = 0.01
lpad = lpad
width = 10
if cb_orientation == "horizontal":
shrink = 0.4
pad = 0.01
lpad = 0
width = 12
# pass the default settings to the plot_properties dictionary
plot_properties = {
"cbar_shrink": shrink,
"cbar_pad": pad,
"cbar_label_pad": lpad,
"figure_width": width,
"figure_size_ratio": ratio,
}
if override_plot_properties is not None:
warnings.warn(
"\n *** Overriding default plot properies: " + str(plot_properties) + " ***"
)
plot_properties = update_dictionary(plot_properties, override_plot_properties)
warnings.warn("\n *** New plot properies: " + str(plot_properties) + " ***")
# not implemented features
if not (norm is None):
raise NotImplementedError()
# Create the figure
if not return_only_data: # supress figure creation when only dumping the data
width = width # 8.5
fig = plt.figure(
figsize=(
plot_properties["figure_width"],
plot_properties["figure_width"] * plot_properties["figure_size_ratio"],
)
)
if projection_type == "cart":
ax = fig.add_subplot(111)
else:
ax = fig.add_subplot(111, projection=projection_type)
# FIXME: make a more general axes creation that works also with subplots
# ax = plt.gcf().add_axes((.125, .1, .9, .9), projection="mollweide")
# remove white space around the image
plt.subplots_adjust(left=0.02, right=0.98, top=0.95, bottom=0.05)
# end if not
if graticule and graticule_labels:
plt.subplots_adjust(left=0.04, right=0.98, top=0.95, bottom=0.05)
# allow callers to override the hold state by passing hold=True|False
# washold = ax.ishold() # commented out
hold = kwargs.pop("hold", None)
# if hold is not None:
# ax.hold(hold)
# try:
ysize = xsize // 2
theta = np.linspace(np.pi, 0, ysize)
phi = np.linspace(-np.pi, np.pi, xsize)
longitude = np.radians(np.linspace(-180, 180, xsize))
if flip == "astro":
longitude = longitude[::-1]
if not return_only_data:
# set property on ax so it can be used in newprojplot
ax.healpy_flip = flip
latitude = np.radians(np.linspace(-90, 90, ysize))
# project the map to a rectangular matrix xsize x ysize
PHI, THETA = np.meshgrid(phi, theta)
# coord or rotation
if coord or rot:
r = Rotator(coord=coord, rot=rot, inv=True)
THETA, PHI = r(THETA.flatten(), PHI.flatten())
THETA = THETA.reshape(ysize, xsize)
PHI = PHI.reshape(ysize, xsize)
nside = npix2nside(len(m))
if not m is None:
grid_pix = ang2pix(nside, THETA, PHI, nest=nest)
grid_map = m[grid_pix]
# plot
if return_only_data: # exit here when dumping the data
return [longitude, latitude, grid_map]
if projection_type != "3d": # test for 3d plot
ret = plt.pcolormesh(
longitude,
latitude,
grid_map,
vmin=min,
vmax=max,
rasterized=True,
cmap=cmap,
shading="auto",
**kwargs
)
elif projection_type == "3d": # test for 3d plot
LONGITUDE, LATITUDE = np.meshgrid(longitude, latitude)
ret = ax.plot_surface(
LONGITUDE,
LATITUDE,
grid_map,
cmap=cmap,
vmin=min,
vmax=max,
rasterized=True,
**kwargs
)
# graticule
if graticule_color is None:
plt.grid(graticule)
else:
plt.grid(graticule, color=graticule_color)
if graticule:
if projection_type in geographic_projections:
longitude_grid_spacing = longitude_grid_spacing # deg 60
ax.set_longitude_grid(longitude_grid_spacing)
ax.set_latitude_grid(latitude_grid_spacing)
ax.set_longitude_grid_ends(90)
else:
longitude_grid_spacing = longitude_grid_spacing # deg
latitude_grid_spacing = latitude_grid_spacing # deg
ax.xaxis.set_major_locator(
MultipleLocator(np.deg2rad(longitude_grid_spacing))
) # longitude
ax.yaxis.set_major_locator(
MultipleLocator(np.deg2rad(latitude_grid_spacing))
) # lattitude
# labelling
if graticule_labels & graticule:
if phi_convention == "counterclockwise":
xtick_formatter = ThetaFormatterCounterclockwisePhi(longitude_grid_spacing)
elif phi_convention == "clockwise":
xtick_formatter = ThetaFormatterClockwisePhi(longitude_grid_spacing)
elif phi_convention == "symmetrical":
xtick_formatter = ThetaFormatterSymmetricPhi(longitude_grid_spacing)
ax.xaxis.set_major_formatter(xtick_formatter)
ax.yaxis.set_major_formatter(ThetaFormatterTheta(latitude_grid_spacing))
if custom_xtick_labels is not None:
try:
ax.xaxis.set_ticklabels(custom_xtick_labels)
except:
warnings.warn(
"Put names for all "
+ str(len(ax.xaxis.get_ticklabels()))
+ " x-tick labels!. No re-labelling done."
)
if custom_ytick_labels is not None:
try:
ax.yaxis.set_ticklabels(custom_ytick_labels)
except:
warnings.warn(
"Put names for all "
+ str(len(ax.yaxis.get_ticklabels()))
+ " y-tick labels!. No re-labelling done."
)
if not graticule:
# remove longitude and latitude labels
ax.xaxis.set_ticklabels([])
ax.yaxis.set_ticklabels([])
ax.tick_params(axis=u"both", which=u"both", length=0)
ax.set_title(title, fontsize=fontsize_defaults["title"])
# tick font size
ax.tick_params(
axis="x", labelsize=fontsize_defaults["xtick_label"], colors=xtick_label_color
)
ax.tick_params(
axis="y", labelsize=fontsize_defaults["ytick_label"], colors=ytick_label_color
)
# colorbar
if projection_type == "cart":
ax.set_aspect(1)
extend = "neither"
if min > np.min(m):
extend = "min"
if max < np.max(m):
extend = "max"
if min > np.min(m) and max < np.max(m):
extend = "both"
if cbar:
cb = fig.colorbar(
ret,
orientation=cb_orientation,
shrink=plot_properties["cbar_shrink"],
pad=plot_properties["cbar_pad"],
ticks=[min, max],
extend=extend,
)
if cb_orientation == "horizontal":
cb.ax.xaxis.set_label_text(unit, fontsize=fontsize_defaults["cbar_label"])
cb.ax.tick_params(axis="x", labelsize=fontsize_defaults["cbar_tick_label"])
cb.ax.xaxis.labelpad = plot_properties["cbar_label_pad"]
if cb_orientation == "vertical":
cb.ax.yaxis.set_label_text(unit, fontsize=fontsize_defaults["cbar_label"])
cb.ax.tick_params(axis="y", labelsize=fontsize_defaults["cbar_tick_label"])
cb.ax.yaxis.labelpad = plot_properties["cbar_label_pad"]
# workaround for issue with viewers, see colorbar docstring
cb.solids.set_edgecolor("face")
ax.set_xlabel(xlabel, fontsize=fontsize_defaults["xlabel"])
ax.set_ylabel(ylabel, fontsize=fontsize_defaults["ylabel"])
plt.draw()
# except:
# pass
return ret
def newprojplot(theta, phi, fmt=None, **kwargs):
"""newprojplot is a wrapper around :func:`matplotlib.Axes.plot` to support
colatitude theta and longitude phi and take into account the longitude convention
(see the `flip` keyword of :func:`projview`)
You can call this function as::
newprojplot(theta, phi) # plot a line going through points at coord (theta, phi)
newprojplot(theta, phi, 'bo') # plot 'o' in blue at coord (theta, phi)
Parameters
----------
theta, phi : float, array-like
Coordinates of point to plot in radians.
fmt : str
A format string (see :func:`matplotlib.Axes.plot` for details)
Notes
-----
Other keywords are passed to :func:`matplotlib.Axes.plot`.
"""
import matplotlib.pyplot as plt
ax = plt.gca()
flip = getattr(ax, "healpy_flip", "astro")
longitude, latitude = lonlat(theta, phi)
if flip == "astro":
longitude = longitude * -1
if fmt is None:
ret = plt.plot(longitude, latitude, **kwargs)
else:
ret = plt.plot(longitude, latitude, fmt, **kwargs)
return ret
| gpl-2.0 |
codenote/chromium-test | tools/diagnose-me.py | 50 | 3016 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Diagnose some common system configuration problems on Linux, and
suggest fixes."""
import os
import subprocess
import sys
all_checks = []
def Check(name):
"""Decorator that defines a diagnostic check."""
def wrap(func):
all_checks.append((name, func))
return func
return wrap
@Check("/usr/bin/ld is not gold")
def CheckSystemLd():
proc = subprocess.Popen(['/usr/bin/ld', '-v'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
if 'GNU gold' in stdout:
return ("When /usr/bin/ld is gold, system updates can silently\n"
"corrupt your graphics drivers.\n"
"Try 'sudo apt-get remove binutils-gold'.\n")
return None
@Check("random lds are not in the $PATH")
def CheckPathLd():
proc = subprocess.Popen(['which', '-a', 'ld'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
instances = stdout.split()
if len(instances) > 1:
return ("You have multiple 'ld' binaries in your $PATH:\n"
+ '\n'.join(' - ' + i for i in instances) + "\n"
"You should delete all of them but your system one.\n"
"gold is hooked into your build via gyp.\n")
return None
@Check("/usr/bin/ld doesn't point to gold")
def CheckLocalGold():
# Check /usr/bin/ld* symlinks.
for path in ('ld.bfd', 'ld'):
path = '/usr/bin/' + path
try:
target = os.readlink(path)
except OSError, e:
if e.errno == 2:
continue # No such file
if e.errno == 22:
continue # Not a symlink
raise
if '/usr/local/gold' in target:
return ("%s is a symlink into /usr/local/gold.\n"
"It's difficult to make a recommendation, because you\n"
"probably set this up yourself. But you should make\n"
"/usr/bin/ld be the standard linker, which you likely\n"
"renamed /usr/bin/ld.bfd or something like that.\n" % path)
return None
@Check("random ninja binaries are not in the $PATH")
def CheckPathNinja():
proc = subprocess.Popen(['which', 'ninja'], stdout=subprocess.PIPE)
stdout = proc.communicate()[0]
if not 'depot_tools' in stdout:
return ("The ninja binary in your path isn't from depot_tools:\n"
+ " " + stdout +
"Remove custom ninjas from your path so that the one\n"
"in depot_tools is used.\n")
return None
def RunChecks():
for name, check in all_checks:
sys.stdout.write("* Checking %s: " % name)
sys.stdout.flush()
error = check()
if not error:
print "ok"
else:
print "FAIL"
print error
if __name__ == '__main__':
RunChecks()
| bsd-3-clause |
indykish/heroku-buildpack-python | vendor/distribute-0.6.36/setuptools/command/setopt.py | 167 | 5053 | import distutils, os
from setuptools import Command
from distutils.util import convert_path
from distutils import log
from distutils.errors import *
__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
def config_file(kind="local"):
"""Get the filename of the distutils, local, global, or per-user config
`kind` must be one of "local", "global", or "user"
"""
if kind=='local':
return 'setup.cfg'
if kind=='global':
return os.path.join(
os.path.dirname(distutils.__file__),'distutils.cfg'
)
if kind=='user':
dot = os.name=='posix' and '.' or ''
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
raise ValueError(
"config_file() type must be 'local', 'global', or 'user'", kind
)
def edit_config(filename, settings, dry_run=False):
"""Edit a configuration file to include `settings`
`settings` is a dictionary of dictionaries or ``None`` values, keyed by
command/section name. A ``None`` value means to delete the entire section,
while a dictionary lists settings to be changed or deleted in that section.
A setting of ``None`` means to delete that setting.
"""
from ConfigParser import RawConfigParser
log.debug("Reading configuration from %s", filename)
opts = RawConfigParser()
opts.read([filename])
for section, options in settings.items():
if options is None:
log.info("Deleting section [%s] from %s", section, filename)
opts.remove_section(section)
else:
if not opts.has_section(section):
log.debug("Adding new section [%s] to %s", section, filename)
opts.add_section(section)
for option,value in options.items():
if value is None:
log.debug("Deleting %s.%s from %s",
section, option, filename
)
opts.remove_option(section,option)
if not opts.options(section):
log.info("Deleting empty [%s] section from %s",
section, filename)
opts.remove_section(section)
else:
log.debug(
"Setting %s.%s to %r in %s",
section, option, value, filename
)
opts.set(section,option,value)
log.info("Writing %s", filename)
if not dry_run:
f = open(filename,'w'); opts.write(f); f.close()
class option_base(Command):
"""Abstract base class for commands that mess with config files"""
user_options = [
('global-config', 'g',
"save options to the site-wide distutils.cfg file"),
('user-config', 'u',
"save options to the current user's pydistutils.cfg file"),
('filename=', 'f',
"configuration file to use (default=setup.cfg)"),
]
boolean_options = [
'global-config', 'user-config',
]
def initialize_options(self):
self.global_config = None
self.user_config = None
self.filename = None
def finalize_options(self):
filenames = []
if self.global_config:
filenames.append(config_file('global'))
if self.user_config:
filenames.append(config_file('user'))
if self.filename is not None:
filenames.append(self.filename)
if not filenames:
filenames.append(config_file('local'))
if len(filenames)>1:
raise DistutilsOptionError(
"Must specify only one configuration file option",
filenames
)
self.filename, = filenames
class setopt(option_base):
"""Save command-line options to a file"""
description = "set an option in setup.cfg or another config file"
user_options = [
('command=', 'c', 'command to set an option for'),
('option=', 'o', 'option to set'),
('set-value=', 's', 'value of the option'),
('remove', 'r', 'remove (unset) the value'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.command = None
self.option = None
self.set_value = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.command is None or self.option is None:
raise DistutilsOptionError("Must specify --command *and* --option")
if self.set_value is None and not self.remove:
raise DistutilsOptionError("Must specify --set-value or --remove")
def run(self):
edit_config(
self.filename, {
self.command: {self.option.replace('-','_'):self.set_value}
},
self.dry_run
)
| mit |
ntt-pf-lab/backup_openstackx | openstackx/auth/tokens.py | 1 | 1872 | from openstackx.api import base
class Tenant(base.Resource):
def __repr__(self):
return "<Tenant %s>" % self._info
@property
def id(self):
return self._info['id']
@property
def description(self):
return self._info['description']
@property
def enabled(self):
return self._info['enabled']
class Token(base.Resource):
def __repr__(self):
return "<Token %s>" % self._info
@property
def id(self):
return self._info['token']['id']
@property
def username(self):
try:
return self._info['user']['username']
except:
return "?"
@property
def tenant_id(self):
try:
return self._info['user']['tenantId']
except:
return "?"
def delete(self):
self.manager.delete(self)
class TokenManager(base.ManagerWithFind):
resource_class = Token
def create(self, tenant, username, password):
params = {"auth": {"passwordCredentials": {"username": username,
"password": password},
"tenantId": tenant}}
return self._create('tokens', params, "access")
def create_scoped_with_token(self, tenant, token):
params = {"auth": {"tenantId": tenant, "tokenId": token}}
return self._create('tokens', params, "access")
class TenantManager(base.ManagerWithFind):
resource_class = Tenant
def for_token(self, token):
# FIXME(ja): now that tenants & tokens are separate managers we shouldn't
# need the uglyness of setting token this way?
orig = self.api.connection.auth_token
self.api.connection.auth_token = token
rval = self._list('tenants', "tenants")
self.api.connection.auth_token = orig
return rval
| bsd-3-clause |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/django/contrib/contenttypes/models.py | 49 | 6717 | from __future__ import unicode_literals
from django.apps import apps
from django.db import models
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
class ContentTypeManager(models.Manager):
use_in_migrations = True
def __init__(self, *args, **kwargs):
super(ContentTypeManager, self).__init__(*args, **kwargs)
# Cache shared by all the get_for_* methods to speed up
# ContentType retrieval.
self._cache = {}
def get_by_natural_key(self, app_label, model):
try:
ct = self._cache[self.db][(app_label, model)]
except KeyError:
ct = self.get(app_label=app_label, model=model)
self._add_to_cache(self.db, ct)
return ct
def _get_opts(self, model, for_concrete_model):
if for_concrete_model:
model = model._meta.concrete_model
return model._meta
def _get_from_cache(self, opts):
key = (opts.app_label, opts.model_name)
return self._cache[self.db][key]
def get_for_model(self, model, for_concrete_model=True):
"""
Returns the ContentType object for a given model, creating the
ContentType if necessary. Lookups are cached so that subsequent lookups
for the same model don't hit the database.
"""
opts = self._get_opts(model, for_concrete_model)
try:
return self._get_from_cache(opts)
except KeyError:
pass
# The ContentType entry was not found in the cache, therefore we
# proceed to load or create it.
try:
# Start with get() and not get_or_create() in order to use
# the db_for_read (see #20401).
ct = self.get(app_label=opts.app_label, model=opts.model_name)
except self.model.DoesNotExist:
# Not found in the database; we proceed to create it. This time
# use get_or_create to take care of any race conditions.
ct, created = self.get_or_create(
app_label=opts.app_label,
model=opts.model_name,
)
self._add_to_cache(self.db, ct)
return ct
def get_for_models(self, *models, **kwargs):
"""
Given *models, returns a dictionary mapping {model: content_type}.
"""
for_concrete_models = kwargs.pop('for_concrete_models', True)
# Final results
results = {}
# models that aren't already in the cache
needed_app_labels = set()
needed_models = set()
needed_opts = set()
for model in models:
opts = self._get_opts(model, for_concrete_models)
try:
ct = self._get_from_cache(opts)
except KeyError:
needed_app_labels.add(opts.app_label)
needed_models.add(opts.model_name)
needed_opts.add(opts)
else:
results[model] = ct
if needed_opts:
cts = self.filter(
app_label__in=needed_app_labels,
model__in=needed_models
)
for ct in cts:
model = ct.model_class()
if model._meta in needed_opts:
results[model] = ct
needed_opts.remove(model._meta)
self._add_to_cache(self.db, ct)
for opts in needed_opts:
# These weren't in the cache, or the DB, create them.
ct = self.create(
app_label=opts.app_label,
model=opts.model_name,
)
self._add_to_cache(self.db, ct)
results[ct.model_class()] = ct
return results
def get_for_id(self, id):
"""
Lookup a ContentType by ID. Uses the same shared cache as get_for_model
(though ContentTypes are obviously not created on-the-fly by get_by_id).
"""
try:
ct = self._cache[self.db][id]
except KeyError:
# This could raise a DoesNotExist; that's correct behavior and will
# make sure that only correct ctypes get stored in the cache dict.
ct = self.get(pk=id)
self._add_to_cache(self.db, ct)
return ct
def clear_cache(self):
"""
Clear out the content-type cache. This needs to happen during database
flushes to prevent caching of "stale" content type IDs (see
django.contrib.contenttypes.management.update_contenttypes for where
this gets called).
"""
self._cache.clear()
def _add_to_cache(self, using, ct):
"""Insert a ContentType into the cache."""
# Note it's possible for ContentType objects to be stale; model_class() will return None.
# Hence, there is no reliance on model._meta.app_label here, just using the model fields instead.
key = (ct.app_label, ct.model)
self._cache.setdefault(using, {})[key] = ct
self._cache.setdefault(using, {})[ct.id] = ct
@python_2_unicode_compatible
class ContentType(models.Model):
app_label = models.CharField(max_length=100)
model = models.CharField(_('python model class name'), max_length=100)
objects = ContentTypeManager()
class Meta:
verbose_name = _('content type')
verbose_name_plural = _('content types')
db_table = 'django_content_type'
unique_together = (('app_label', 'model'),)
def __str__(self):
return self.name
@property
def name(self):
model = self.model_class()
if not model:
return self.model
return force_text(model._meta.verbose_name)
def model_class(self):
"Returns the Python model class for this type of content."
try:
return apps.get_model(self.app_label, self.model)
except LookupError:
return None
def get_object_for_this_type(self, **kwargs):
"""
Returns an object of this type for the keyword arguments given.
Basically, this is a proxy around this object_type's get_object() model
method. The ObjectNotExist exception, if thrown, will not be caught,
so code that calls this method should catch it.
"""
return self.model_class()._base_manager.using(self._state.db).get(**kwargs)
def get_all_objects_for_this_type(self, **kwargs):
"""
Returns all objects of this type for the keyword arguments given.
"""
return self.model_class()._base_manager.using(self._state.db).filter(**kwargs)
def natural_key(self):
return (self.app_label, self.model)
| mit |
Princeton-CDH/derrida-django | derrida/people/migrations/0001_initial.py | 1 | 3871 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-06 19:54
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('places', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('notes', models.TextField(blank=True)),
('start_year', models.PositiveIntegerField(blank=True, null=True)),
('end_year', models.PositiveIntegerField(blank=True, null=True)),
('authorized_name', models.CharField(max_length=255)),
('viaf_id', models.URLField(blank=True, null=True)),
('sort_name', models.CharField(blank=True, max_length=255)),
('family_group', models.CharField(blank=True, max_length=255)),
],
options={
'ordering': ['authorized_name'],
'verbose_name_plural': 'People',
},
),
migrations.CreateModel(
name='Relationship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('notes', models.TextField(blank=True)),
('start_year', models.PositiveIntegerField(blank=True, null=True)),
('end_year', models.PositiveIntegerField(blank=True, null=True)),
('from_person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='from_relationships', to='people.Person')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='RelationshipType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('notes', models.TextField(blank=True)),
('is_symmetric', models.BooleanField(default=False)),
],
options={
'ordering': ['name'],
'abstract': False,
},
),
migrations.CreateModel(
name='Residence',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('notes', models.TextField(blank=True)),
('start_year', models.PositiveIntegerField(blank=True, null=True)),
('end_year', models.PositiveIntegerField(blank=True, null=True)),
('person', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='people.Person')),
('place', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='places.Place')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='relationship',
name='relationship_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='people.RelationshipType'),
),
migrations.AddField(
model_name='relationship',
name='to_person',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='to_relationships', to='people.Person'),
),
migrations.AddField(
model_name='person',
name='relationships',
field=models.ManyToManyField(related_name='related_to', through='people.Relationship', to='people.Person'),
),
]
| apache-2.0 |
lz1988/company-site | django/conf/locale/ru/formats.py | 107 | 1134 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y г.'
TIME_FORMAT = 'G:i:s'
DATETIME_FORMAT = 'j E Y г. G:i:s'
YEAR_MONTH_FORMAT = 'F Y г.'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y', # '25.10.06'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
earonesty/bitcoin | test/functional/p2p-segwit.py | 5 | 90114 | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import CECKey, CPubKey
import time
import random
from binascii import hexlify
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_PERIOD = 144
VB_ACTIVATION_THRESHOLD = 108
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
# Calculate the virtual size of a witness block:
# (base + witness/4)
def get_virtual_size(witness_block):
base_size = len(witness_block.serialize())
total_size = len(witness_block.serialize(with_witness=True))
# the "+3" is so we round up
vsize = int((3*base_size + total_size + 3)/4)
return vsize
class TestNode(NodeConnCB):
def __init__(self):
super().__init__()
self.getdataset = set()
def on_getdata(self, conn, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.send_message(msg_inv(inv=[CInv(1, tx.sha256)]))
self.wait_for_getdata(timeout)
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with mininode_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [ CBlockHeader(block) ]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(2, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata()
def request_block(self, blockhash, inv_type, timeout=60):
with mininode_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
def test_transaction_acceptance(self, tx, with_witness, accepted, reason=None):
tx_message = msg_tx(tx)
if with_witness:
tx_message = msg_witness_tx(tx)
self.send_message(tx_message)
self.sync_with_ping()
assert_equal(tx.hash in self.connection.rpc.getrawmempool(), accepted)
if (reason != None and not accepted):
# Check the rejection reason as well.
with mininode_lock:
assert_equal(self.last_message["reject"].reason, reason)
# Test whether a witness block had the correct effect on the tip
def test_witness_block(self, block, accepted, with_witness=True):
if with_witness:
self.send_message(msg_witness_block(block))
else:
self.send_message(msg_block(block))
self.sync_with_ping()
assert_equal(self.connection.rpc.getbestblockhash() == block.hash, accepted)
# Used to keep track of anyone-can-spend outputs that we can use in the tests
class UTXO(object):
def __init__(self, sha256, n, nValue):
self.sha256 = sha256
self.n = n
self.nValue = nValue
# Helper for getting the script associated with a P2PKH
def GetP2PKHScript(pubkeyhash):
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
# Add signature for a P2PK witness program.
def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key):
tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value)
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script]
txTo.rehash()
class SegWitTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-whitelist=127.0.0.1"], ["-whitelist=127.0.0.1", "-acceptnonstdtxn=0"], ["-whitelist=127.0.0.1", "-bip9params=segwit:0:0"]]
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
self.sync_all()
''' Helpers '''
# Build a block on top of node0's tip.
def build_next_block(self, nVersion=4):
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = nVersion
block.rehash()
return block
# Adds list of transactions to block, adds witness commitment, then solves.
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
return
''' Individual tests '''
def test_witness_services(self):
self.log.info("Verifying NODE_WITNESS service bit")
assert((self.test_node.connection.nServices & NODE_WITNESS) != 0)
# See if sending a regular transaction works, and create a utxo
# to use in later tests.
def test_non_witness_transaction(self):
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
self.log.info("Testing non-witness transaction")
block = self.build_next_block(nVersion=1)
block.solve()
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping() # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_tx(tx).serialize(), msg_witness_tx(tx).serialize())
self.test_node.send_message(msg_witness_tx(tx))
self.test_node.sync_with_ping() # make sure the tx was processed
assert(tx.hash in self.nodes[0].getrawmempool())
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49*100000000))
self.nodes[0].generate(1)
# Verify that blocks with witnesses are rejected before activation.
def test_unnecessary_witness_before_segwit_activation(self):
self.log.info("Testing behavior of unnecessary witnesses")
# For now, rely on earlier tests to have created at least one utxo for
# us to use
assert(len(self.utxo) > 0)
assert(get_bip9_status(self.nodes[0], 'segwit')['status'] != 'active')
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert(tx.sha256 != tx.calc_sha256(with_witness=True))
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(nVersion=(VB_TOP_BITS|(1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
self.test_node.test_witness_block(block, accepted=False)
# TODO: fix synchronization so we can test reject reason
# Right now, bitcoind delays sending reject messages for blocks
# until the future, making synchronization here difficult.
#assert_equal(self.test_node.last_message["reject"].reason, "unexpected-witness")
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_message(msg_block(block))
self.test_node.sync_with_ping()
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
sync_blocks(self.nodes)
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey))
tx2.rehash()
self.test_node.test_transaction_acceptance(tx2, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness before
# segwit activation doesn't blind a node to a transaction. Transactions
# rejected for having a witness before segwit activation shouldn't be added
# to the rejection cache.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptPubKey))
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
tx3.rehash()
# Note that this should be rejected for the premature witness reason,
# rather than a policy check, since segwit hasn't activated yet.
self.std_node.test_transaction_acceptance(tx3, True, False, b'no-witness-yet')
# If we send without witness, it should be accepted.
self.std_node.test_transaction_acceptance(tx3, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program])))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, CScript([OP_TRUE])))
tx4.rehash()
self.test_node.test_transaction_acceptance(tx3, False, True)
self.test_node.test_transaction_acceptance(tx4, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx4.sha256, 0, tx4.vout[0].nValue))
# Mine enough blocks for segwit's vb state to be 'started'.
def advance_to_segwit_started(self):
height = self.nodes[0].getblockcount()
# Will need to rewrite the tests here if we are past the first period
assert(height < VB_PERIOD - 1)
# Genesis block is 'defined'.
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'defined')
# Advance to end of period, status should now be 'started'
self.nodes[0].generate(VB_PERIOD-height-1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Mine enough blocks to lock in segwit, but don't activate.
# TODO: we could verify that lockin only happens at the right threshold of
# signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_lockin(self):
height = self.nodes[0].getblockcount()
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
# Advance to end of period, and verify lock-in happens at the end
self.nodes[0].generate(VB_PERIOD-1)
height = self.nodes[0].getblockcount()
assert((height % VB_PERIOD) == VB_PERIOD - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'started')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
# Mine enough blocks to activate segwit.
# TODO: we could verify that activation only happens at the right threshold
# of signalling blocks, rather than just at the right period boundary.
def advance_to_segwit_active(self):
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(VB_PERIOD - (height%VB_PERIOD) - 2)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'locked_in')
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'segwit')['status'], 'active')
# This test can only be run after segwit has activated
def test_witness_commitments(self):
self.log.info("Testing witness commitments")
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
self.test_node.test_witness_block(block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
self.test_node.test_witness_block(block_2, accepted=True)
# Now test commitments with actual transactions
assert (len(self.utxo) > 0)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
self.test_node.test_witness_block(block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue-1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
self.test_node.test_witness_block(block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_block_malleability(self):
self.log.info("Testing witness block malleability")
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a'*5000000)
assert(get_virtual_size(block) > MAX_BLOCK_BASE_SIZE)
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert(get_virtual_size(block) < MAX_BLOCK_BASE_SIZE)
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() == block.hash)
# Now make sure that malleating the witness nonce doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ]
self.test_node.test_witness_block(block, accepted=False)
# Changing the witness nonce doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ]
self.test_node.test_witness_block(block, accepted=True)
def test_witness_block_size(self):
self.log.info("Testing witness block size limit")
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert(len(self.utxo) > 0)
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP]*NUM_DROPS + [OP_TRUE])
witness_hash = uint256_from_str(sha256(witness_program))
scriptPubKey = CScript([OP_0, ser_uint256(witness_hash)])
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value/NUM_OUTPUTS)
for i in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, scriptPubKey))
parent_tx.vout[0].nValue -= 50000
assert(parent_tx.vout[0].nValue > 0)
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for i in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a'*195]*(2*NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize)*4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes+1, 55)
block.vtx[-1].wit.vtxinwit[int(i/(2*NUM_DROPS))].scriptWitness.stack[i%(2*NUM_DROPS)] = b'a'*(195+extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert(len(block.serialize(True)) > 2*1024*1024)
self.test_node.test_witness_block(block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(cur_length-1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
self.test_node.test_witness_block(block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
# submitblock will try to add the nonce automatically, so that mining
# software doesn't need to worry about doing so itself.
def test_submit_block(self):
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert(self.nodes[0].getbestblockhash() != block.hash)
# Now redo commitment with the standard nonce, but let bitcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
self.nodes[0].submitblock(bytes_to_hex_str(block.serialize(True)))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
self.nodes[0].submitblock(bytes_to_hex_str(block_2.serialize(True)))
# Tip should not advance!
assert(self.nodes[0].getbestblockhash() != block_2.hash)
# Consensus tests of extra witness data in a transaction.
def test_extra_witness_data(self):
self.log.info("Testing extra witness data in tx")
assert(len(self.utxo) > 0)
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-2000, scriptPubKey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
self.test_node.test_witness_block(block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [ CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program ]
tx2.wit.vtxinwit[1].scriptWitness.stack = [ CScript([OP_TRUE]) ]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
self.test_node.test_witness_block(block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_push_length(self):
''' Should only allow up to 520 byte pushes in witness stack '''
self.log.info("Testing maximum witness push size")
MAX_SCRIPT_ELEMENT_SIZE = 520
assert(len(self.utxo))
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [ b'a'*(MAX_SCRIPT_ELEMENT_SIZE+1), witness_program ]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
self.test_node.test_witness_block(block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_max_witness_program_length(self):
# Can create witness outputs that are long, but can't be greater than
# 10k bytes to successfully spend
self.log.info("Testing maximum witness program length")
assert(len(self.utxo))
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a'*520]*19 + [OP_DROP]*63 + [OP_TRUE])
assert(len(long_witness_program) == MAX_PROGRAM_LENGTH+1)
long_witness_hash = sha256(long_witness_program)
long_scriptPubKey = CScript([OP_0, long_witness_hash])
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, long_scriptPubKey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE])
assert(len(witness_program) == MAX_PROGRAM_LENGTH)
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx.vout[0] = CTxOut(tx.vout[0].nValue, scriptPubKey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a']*43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_input_length(self):
''' Ensure that vin length must match vtxinwit length '''
self.log.info("Testing witness input length")
assert(len(self.utxo))
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
nValue = self.utxo[0].nValue
for i in range(10):
tx.vout.append(CTxOut(int(nValue/10), scriptPubKey))
tx.vout[0].nValue -= 1000
assert(tx.vout[0].nValue >= 0)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(nValue-3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [ witness_program ]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
def test_witness_tx_relay_before_segwit_activation(self):
self.log.info("Testing relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert(self.old_node.last_message["getdata"].inv[0].type == 1)
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
try:
self.test_node.announce_tx_and_wait_for_getdata(tx, timeout=2)
self.log.error("Error: duplicate tx getdata!")
assert(False)
except AssertionError as e:
pass
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.old_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
# After segwit activates, verify that mempool:
# - rejects transactions with unnecessary/extra witnesses
# - accepts transactions with valid witnesses
# and that witness transactions are relayed to non-upgraded peers.
def test_tx_relay_after_segwit_activation(self):
self.log.info("Testing relay of witness transactions")
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a' ]
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
p2sh_pubkey = hash160(p2sh_program)
witness_program2 = CScript([b'a'*400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue-1000, CScript([OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
vsize = (len(tx3.serialize_with_witness()) + 3*len(tx3.serialize_without_witness()) + 3) / 4
assert_equal(raw_tx["vsize"], vsize)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], hexlify(witness_program).decode('ascii'))
assert(vsize != raw_tx["size"])
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
# Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG
# This is true regardless of segwit activation.
# Also test that we don't ask for blocks from unupgraded peers
def test_block_relay(self, segwit_activated):
self.log.info("Testing block relay")
blocktype = 2|MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
self.test_node.test_witness_block(block1, True)
block2 = self.build_next_block(nVersion=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
self.test_node.test_witness_block(block2, True)
block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
self.test_node.test_witness_block(block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if segwit_activated == False:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height+1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2|MSG_WITNESS_FLAG)
assert_equal(block.serialize(True), wit_block.serialize(True))
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
self.test_node.test_witness_block(block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2|MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(True), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(True), block.serialize(True))
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize(True)))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3*len(block.serialize(False)) + len(block.serialize(True))
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(nVersion=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [ CBlockHeader(block4) ]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert(block4.sha256 not in self.old_node.getdataset)
# V0 segwit outputs should be standard after activation, but not before.
def test_standardness_v0(self, segwit_activated):
self.log.info("Testing standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
assert(len(self.utxo))
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
p2sh_pubkey = hash160(witness_program)
p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-1000, p2sh_scriptPubKey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
self.test_node.test_transaction_acceptance(p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-10000, scriptPubKey)]
tx.vout.append(CTxOut(8000, scriptPubKey)) # Might burn this later
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=segwit_activated)
# Now create something that looks like a P2PKH output. This won't be spendable.
scriptPubKey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
if segwit_activated:
# if tx was accepted, then we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
else:
# if tx wasn't accepted, we just re-spend the p2sh output we started with.
tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, scriptPubKey)]
tx2.rehash()
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=segwit_activated)
# Now update self.utxo for later tests.
tx3 = CTransaction()
if segwit_activated:
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
else:
# tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, witness_program)]
tx3.rehash()
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
# Verify that future segwit upgraded transactions are non-standard,
# but valid in blocks. Can run this before and after segwit activation.
def test_segwit_versions(self):
self.log.info("Testing standardness/consensus for segwit versions (0-16)")
assert(len(self.utxo))
NUM_TESTS = 17 # will test OP_0, OP1, ..., OP_16
if (len(self.utxo) < NUM_TESTS):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_TESTS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
sync_blocks(self.nodes)
temp_utxo = []
tx = CTransaction()
count = 0
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16+1)) + [OP_0]:
count += 1
# First try to spend to a future version segwit scriptPubKey.
scriptPubKey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue-1000, scriptPubKey)]
tx.rehash()
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
sync_blocks(self.nodes)
assert(len(self.nodes[0].getrawmempool()) == 0)
# Finally, verify that version 0 -> version 1 transactions
# are non-standard
scriptPubKey = CScript([CScriptOp(OP_1), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue-1000, scriptPubKey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo back to an OP_TRUE output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_message["reject"].reason)
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
def test_premature_coinbase_witness_spend(self):
self.log.info("Testing premature coinbase witness spend")
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
block.vtx[0].vout[0].scriptPubKey = scriptPubKey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
self.test_node.test_witness_block(block2, accepted=True)
sync_blocks(self.nodes)
def test_signature_version_1(self):
self.log.info("Testing segwit signature hash version 1")
key = CECKey()
key.set_secretbytes(b"9")
pubkey = CPubKey(key.get_pubkey())
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
# First create a witness output for use in the tests.
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [ 0, SIGHASH_ANYONECANPAY ]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key)
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Too-small input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=False)
# Now try correct value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_TESTS
for i in range(NUM_TESTS):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert(len(temp_utxos) > num_inputs)
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for i in range(num_outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_P2PK_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
self.test_node.test_witness_block(block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, scriptPKH))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
self.test_node.test_witness_block(block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests. Just spend everything in
# temp_utxos to a corresponding entry in self.utxos
tx = CTransaction()
index = 0
for i in temp_utxos:
# Just spend to our usual anyone-can-spend output
# Use SIGHASH_SINGLE|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.vout.append(CTxOut(i.nValue, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx, index, SIGHASH_SINGLE|SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
# Test P2SH wrapped witness programs.
def test_p2sh_witness(self, segwit_activated):
self.log.info("Testing P2SH witness transactions")
assert(len(self.utxo))
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
witness_hash = sha256(witness_program)
p2wsh_pubkey = CScript([OP_0, witness_hash])
p2sh_witness_hash = hash160(p2wsh_pubkey)
scriptPubKey = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
# Verify mempool acceptance and block validity
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True, with_witness=segwit_activated)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), scriptSig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = scriptSig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ]
# Verify mempool acceptance
self.test_node.test_transaction_acceptance(spend_tx, with_witness=True, accepted=segwit_activated)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're before activation, then sending this without witnesses
# should be valid. If we're after activation, then sending this with
# witnesses should be valid.
if segwit_activated:
self.test_node.test_witness_block(block, accepted=True)
else:
self.test_node.test_witness_block(block, accepted=True, with_witness=False)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
# Test the behavior of starting up a segwit-aware node after the softfork
# has activated. As segwit requires different block data than pre-segwit
# nodes would have stored, this requires special handling.
# To enable this test, pass --oldbinary=<path-to-pre-segwit-bitcoind> to
# the test.
def test_upgrade_after_activation(self, node, node_id):
self.log.info("Testing software upgrade after softfork activation")
assert(node_id != 0) # node0 is assumed to be a segwit-active bitcoind
# Make sure the nodes are all up
sync_blocks(self.nodes)
# Restart with the new binary
stop_node(node, node_id)
self.nodes[node_id] = start_node(node_id, self.options.tmpdir)
connect_nodes(self.nodes[0], node_id)
sync_blocks(self.nodes)
# Make sure that this peer thinks segwit has activated.
assert(get_bip9_status(node, 'segwit')['status'] == "active")
# Make sure this peers blocks match those of node0.
height = node.getblockcount()
while height >= 0:
block_hash = node.getblockhash(height)
assert_equal(block_hash, self.nodes[0].getblockhash(height))
assert_equal(self.nodes[0].getblock(block_hash), node.getblock(block_hash))
height -= 1
def test_witness_sigops(self):
'''Ensure sigop counting is correct inside witnesses.'''
self.log.info("Testing sigops limit")
assert(len(self.utxo))
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG]*5 + [OP_CHECKSIG]*193 + [OP_ENDIF])
witness_hash = sha256(witness_program)
scriptPubKey = CScript([OP_0, witness_hash])
sigops_per_script = 20*5 + 193*1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert(extra_sigops_available < 100) # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available + 1) + [OP_ENDIF])
witness_hash_toomany = sha256(witness_program_toomany)
scriptPubKey_toomany = CScript([OP_0, witness_hash_toomany])
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG]*(extra_sigops_available) + [OP_ENDIF])
witness_hash_justright = sha256(witness_program_justright)
scriptPubKey_justright = CScript([OP_0, witness_hash_justright])
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for i in range(outputs):
tx.vout.append(CTxOut(split_value, scriptPubKey))
tx.vout[-2].scriptPubKey = scriptPubKey_toomany
tx.vout[-1].scriptPubKey = scriptPubKey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
self.test_node.test_witness_block(block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs-1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program ]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_toomany ]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
self.test_node.test_witness_block(block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
scriptPubKey_checksigs = CScript([OP_CHECKSIG]*checksig_count)
tx2.vout.append(CTxOut(0, scriptPubKey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
self.test_node.test_witness_block(block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
self.test_node.test_witness_block(block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs-1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
self.test_node.test_witness_block(block_5, accepted=True)
# TODO: test p2sh sigop counting
def test_getblocktemplate_before_lockin(self):
self.log.info("Testing getblocktemplate setting of segwit versionbit (before lockin)")
# Node0 is segwit aware, node2 is not.
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate()
block_version = gbt_results['version']
# If we're not indicating segwit support, we will still be
# signalling for segwit activation.
assert_equal((block_version & (1 << VB_WITNESS_BIT) != 0), node == self.nodes[0])
# If we don't specify the segwit rule, then we won't get a default
# commitment.
assert('default_witness_commitment' not in gbt_results)
# Workaround:
# Can either change the tip, or change the mempool and wait 5 seconds
# to trigger a recomputation of getblocktemplate.
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
# Using mocktime lets us avoid sleep()
sync_mempools(self.nodes)
self.nodes[0].setmocktime(int(time.time())+10)
self.nodes[2].setmocktime(int(time.time())+10)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules" : ["segwit"]})
block_version = gbt_results['version']
if node == self.nodes[2]:
# If this is a non-segwit node, we should still not get a witness
# commitment, nor a version bit signalling segwit.
assert_equal(block_version & (1 << VB_WITNESS_BIT), 0)
assert('default_witness_commitment' not in gbt_results)
else:
# For segwit-aware nodes, check the version bit and the witness
# commitment are correct.
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
assert('default_witness_commitment' in gbt_results)
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, bytes_to_hex_str(script))
# undo mocktime
self.nodes[0].setmocktime(0)
self.nodes[2].setmocktime(0)
# Uncompressed pubkeys are no longer supported in default relay policy,
# but (for now) are still valid in blocks.
def test_uncompressed_pubkey(self):
self.log.info("Testing uncompressed pubkeys")
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = CECKey()
key.set_secretbytes(b"9")
key.set_compressed(False)
pubkey = CPubKey(key.get_pubkey())
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
assert(len(self.utxo) > 0)
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
scriptPKH = CScript([OP_0, pubkeyhash])
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue-1000, scriptPKH))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
self.test_node.test_witness_block(block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
witness_hash = sha256(witness_program)
scriptWSH = CScript([OP_0, witness_hash])
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptWSH))
script = GetP2PKHScript(pubkeyhash)
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ]
tx2.rehash()
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
self.test_node.test_witness_block(block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
p2sh_witness_hash = hash160(scriptWSH)
scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
scriptSig = CScript([scriptWSH])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptP2SH))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
self.test_node.test_witness_block(block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
scriptPubKey = GetP2PKHScript(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, scriptPubKey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
self.test_node.test_transaction_acceptance(tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
self.test_node.test_witness_block(block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue-1000, CScript([OP_TRUE])))
(sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL)
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
self.test_node.test_transaction_acceptance(tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
self.test_node.test_witness_block(block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
def test_non_standard_witness(self):
self.log.info("Testing detection of non-standard P2WSH witness")
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
assert(len(self.utxo))
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = CScript([OP_0, sha256(i)])
p2sh = hash160(p2wsh)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
self.test_node.test_transaction_acceptance(p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
self.std_node.test_transaction_acceptance(p2sh_txs[0], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
self.test_node.test_transaction_acceptance(p2sh_txs[2], True, True)
self.std_node.test_transaction_acceptance(p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
self.std_node.test_transaction_acceptance(p2sh_txs[3], True, False, b'bad-witness-nonstandard')
self.test_node.test_transaction_acceptance(p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode() # sets NODE_WITNESS|NODE_NETWORK
self.old_node = TestNode() # only NODE_NETWORK
self.std_node = TestNode() # for testing node1 (fRequireStandard=true)
self.p2p_connections = [self.test_node, self.old_node]
self.connections = []
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node, services=NODE_NETWORK|NODE_WITNESS))
self.connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.old_node, services=NODE_NETWORK))
self.connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], self.std_node, services=NODE_NETWORK|NODE_WITNESS))
self.test_node.add_connection(self.connections[0])
self.old_node.add_connection(self.connections[1])
self.std_node.add_connection(self.connections[2])
NetworkThread().start() # Start up network handling in another thread
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
# Test logic begins here
self.test_node.wait_for_verack()
self.log.info("Starting tests before segwit lock in:")
self.test_witness_services() # Verifies NODE_WITNESS
self.test_non_witness_transaction() # non-witness tx's are accepted
self.test_unnecessary_witness_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
# Advance to segwit being 'started'
self.advance_to_segwit_started()
sync_blocks(self.nodes)
self.test_getblocktemplate_before_lockin()
sync_blocks(self.nodes)
# At lockin, nothing should change.
self.log.info("Testing behavior post lockin, pre-activation")
self.advance_to_segwit_lockin()
# Retest unnecessary witnesses
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_block_relay(segwit_activated=False)
self.test_p2sh_witness(segwit_activated=False)
self.test_standardness_v0(segwit_activated=False)
sync_blocks(self.nodes)
# Now activate segwit
self.log.info("Testing behavior after segwit activation")
self.advance_to_segwit_active()
sync_blocks(self.nodes)
# Test P2SH witness handling again
self.test_p2sh_witness(segwit_activated=True)
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay(segwit_activated=True)
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0(segwit_activated=True)
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness()
sync_blocks(self.nodes)
self.test_upgrade_after_activation(self.nodes[2], 2)
self.test_witness_sigops()
if __name__ == '__main__':
SegWitTest().main()
| mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/idlelib/Bindings.py | 7 | 2976 | """Define the menu contents, hotkeys, and event bindings.
There is additional configuration information in the EditorWindow class (and
subclasses): the menus are created there based on the menu_specs (class)
variable, and menus not created are silently skipped in the code here. This
makes it possible, for example, to define a Debug menu which is only present in
the PythonShell window, and a Format menu which is only present in the Editor
windows.
"""
from idlelib.configHandler import idleConf
# Warning: menudefs is altered in macosxSupport.overrideRootMenu()
# after it is determined that an OS X Aqua Tk is in use,
# which cannot be done until after Tk() is first called.
# Do not alter the 'file', 'options', or 'help' cascades here
# without altering overrideRootMenu() as well.
# TODO: Make this more robust
menudefs = [
# underscore prefixes character to underscore
('file', [
('_New File', '<<open-new-window>>'),
('_Open...', '<<open-window-from-file>>'),
('Open _Module...', '<<open-module>>'),
('Class _Browser', '<<open-class-browser>>'),
('_Path Browser', '<<open-path-browser>>'),
None,
('_Save', '<<save-window>>'),
('Save _As...', '<<save-window-as-file>>'),
('Save Cop_y As...', '<<save-copy-of-window-as-file>>'),
None,
('Prin_t Window', '<<print-window>>'),
None,
('_Close', '<<close-window>>'),
('E_xit', '<<close-all-windows>>'),
]),
('edit', [
('_Undo', '<<undo>>'),
('_Redo', '<<redo>>'),
None,
('Cu_t', '<<cut>>'),
('_Copy', '<<copy>>'),
('_Paste', '<<paste>>'),
('Select _All', '<<select-all>>'),
None,
('_Find...', '<<find>>'),
('Find A_gain', '<<find-again>>'),
('Find _Selection', '<<find-selection>>'),
('Find in Files...', '<<find-in-files>>'),
('R_eplace...', '<<replace>>'),
('Go to _Line', '<<goto-line>>'),
]),
('format', [
('_Indent Region', '<<indent-region>>'),
('_Dedent Region', '<<dedent-region>>'),
('Comment _Out Region', '<<comment-region>>'),
('U_ncomment Region', '<<uncomment-region>>'),
('Tabify Region', '<<tabify-region>>'),
('Untabify Region', '<<untabify-region>>'),
('Toggle Tabs', '<<toggle-tabs>>'),
('New Indent Width', '<<change-indentwidth>>'),
]),
('run', [
('Python Shell', '<<open-python-shell>>'),
]),
('shell', [
('_View Last Restart', '<<view-restart>>'),
('_Restart Shell', '<<restart-shell>>'),
None,
('_Interrupt Execution', '<<interrupt-execution>>'),
]),
('debug', [
('_Go to File/Line', '<<goto-file-line>>'),
('!_Debugger', '<<toggle-debugger>>'),
('_Stack Viewer', '<<open-stack-viewer>>'),
('!_Auto-open Stack Viewer', '<<toggle-jit-stack-viewer>>'),
]),
('options', [
('Configure _IDLE', '<<open-config-dialog>>'),
None,
]),
('help', [
('_About IDLE', '<<about-idle>>'),
None,
('_IDLE Help', '<<help>>'),
('Python _Docs', '<<python-docs>>'),
]),
]
default_keydefs = idleConf.GetCurrentKeySet()
| gpl-3.0 |
johndpope/tensorflow | tensorflow/python/kernel_tests/shape_ops_test.py | 12 | 22678 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for various tensorflow.ops.tf."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import importer
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import gradients_impl
from tensorflow.python.platform import test
# TODO(zongheng): it'd be great to factor out this function and various random
# SparseTensor gen funcs.
def _sparsify(x, thresh=0.5, index_dtype=np.int64):
x[x < thresh] = 0
non_zero = np.where(x)
x_indices = np.vstack(non_zero).astype(index_dtype).T
x_values = x[non_zero]
x_shape = x.shape
return sparse_tensor.SparseTensor(
indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values)
class ShapeOpsTest(test.TestCase):
def _compareShape(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.shape(x)
tf_ans_64 = array_ops.shape(x, out_type=dtypes.int64)
result = tf_ans.eval()
result_64 = tf_ans_64.eval()
self.assertAllEqual(np_ans, result)
self.assertAllEqual(np_ans, result_64)
self.assertShapeEqual(np_ans, tf_ans)
def _compareShapeSparse(self, x_np, use_gpu=False):
np_ans = np.array(np.shape(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.shape(x_tf)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareShapeN(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.test_session(use_gpu=use_gpu) as sess:
tf_ans = array_ops.shape_n([x, x, x])
tf_ans_64 = array_ops.shape_n([x, x, x], out_type=dtypes.int64)
result = sess.run(tf_ans)
result_64 = sess.run(tf_ans_64)
for i in range(3):
self.assertAllEqual(np_ans, result[i])
self.assertAllEqual(np_ans, result_64[i])
self.assertShapeEqual(np_ans, tf_ans[i])
def _compareRank(self, x, use_gpu=False):
np_ans = np.asarray(np.ndim(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.rank(x)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareRankSparse(self, x_np, use_gpu=False):
np_ans = np.asarray(np.ndim(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.rank(x_tf)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareSize(self, x, use_gpu=False):
np_ans = np.asarray(np.size(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.size(x)
result = tf_ans.eval()
tf_ans_64 = array_ops.size(x, out_type=dtypes.int64)
result_64 = tf_ans_64.eval()
self.assertAllEqual(np_ans, result)
self.assertAllEqual(np_ans, result_64)
self.assertShapeEqual(np_ans, tf_ans)
def _compareSizeSparse(self, x_np, use_gpu=False):
np_ans = np.asarray(np.size(x_np))
x_tf, unused_nnz = _sparsify(x_np)
with self.test_session(use_gpu=use_gpu):
tf_ans = array_ops.size(x_tf)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _testCpu(self, x):
self._compareShape(x, use_gpu=False)
self._compareShapeN(x, use_gpu=False)
self._compareRank(x, use_gpu=False)
self._compareSize(x, use_gpu=False)
self._compareShapeSparse(x, use_gpu=False)
self._compareRankSparse(x, use_gpu=False)
self._compareSizeSparse(x, use_gpu=False)
def _testGpu(self, x):
self._compareShape(x, use_gpu=True)
self._compareShapeN(x, use_gpu=True)
self._compareRank(x, use_gpu=True)
self._compareSize(x, use_gpu=True)
self._compareShapeSparse(x, use_gpu=True)
self._compareRankSparse(x, use_gpu=True)
self._compareSizeSparse(x, use_gpu=True)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testBasic(self):
self._testAll(np.random.randn(2))
self._testAll(np.random.randn(2, 3))
self._testAll(np.random.randn(2, 3, 5))
self._testAll(np.random.randn(2, 3, 5, 7))
self._testAll(np.random.randn(2, 3, 5, 7, 11))
self._testAll(np.random.randn(2, 3, 5, 7, 11, 13))
# Disabled because it takes too long to run, but manually verified
# as passing at time of writing.
def _test64BitOutput(self):
with self.test_session():
inp = array_ops.zeros([2**31])
num_elements = array_ops.size_internal(
inp, optimize=False, out_type=dtypes.int64)
self.assertEqual(2**31, num_elements.eval())
# Too large for tf.int32 output.
with self.assertRaises(errors_impl.InvalidArgumentError):
with self.test_session():
inp = array_ops.zeros([2**31])
num_elements = array_ops.size_internal(
inp, optimize=False, out_type=dtypes.int32)
self.assertEqual(2**31, num_elements.eval())
def _compareExpandDims(self, x, dim, use_gpu):
np_ans = np.expand_dims(x, axis=dim)
with self.test_session(use_gpu=use_gpu):
tensor = array_ops.expand_dims(x, dim)
tf_ans = tensor.eval()
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareExpandDimsAll(self, x, dim):
self._compareExpandDims(x, dim, False)
self._compareExpandDims(x, dim, True)
def testExpandDims(self):
self._compareExpandDimsAll(np.zeros([2]), 0)
self._compareExpandDimsAll(np.zeros([2]), 1)
self._compareExpandDimsAll(np.zeros([2]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), 0)
self._compareExpandDimsAll(np.zeros([2, 3]), 1)
self._compareExpandDimsAll(np.zeros([2, 3]), 2)
self._compareExpandDimsAll(np.zeros([2, 3]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 0)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -4)
def testExpandDimsErrors(self):
with self.test_session():
self.assertRaises(ValueError, array_ops.expand_dims,
np.zeros([2, 3, 5]), -5)
self.assertRaises(ValueError, array_ops.expand_dims,
np.zeros([2, 3, 5]), 4)
def testExpandDimsGradient(self):
with self.test_session():
inp = constant_op.constant(
np.random.rand(4, 2).astype("f"), dtype=dtypes.float32)
squeezed = array_ops.expand_dims(inp, 1)
err = gradient_checker.compute_gradient_error(inp, [4, 2], squeezed,
[4, 1, 2])
self.assertLess(err, 1e-3)
def testExpandDimsScalar(self):
with self.test_session():
inp = constant_op.constant(7)
self.assertAllEqual([7], array_ops.expand_dims(inp, 0).eval())
self.assertAllEqual([7], array_ops.expand_dims(inp, -1).eval())
def _compareSqueeze(self, x, squeeze_dims, use_gpu):
with self.test_session(use_gpu=use_gpu):
if squeeze_dims:
np_ans = np.squeeze(x, axis=tuple(squeeze_dims))
tensor = array_ops.squeeze(x, squeeze_dims)
tf_ans = tensor.eval()
else:
np_ans = np.squeeze(x)
tensor = array_ops.squeeze(x)
tf_ans = tensor.eval()
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareSqueezeAll(self, x, squeeze_dims=None):
if squeeze_dims is None:
squeeze_dims = []
self._compareSqueeze(x, squeeze_dims, False)
self._compareSqueeze(x, squeeze_dims, True)
def testSqueeze(self):
# Nothing to squeeze.
self._compareSqueezeAll(np.zeros([2]))
self._compareSqueezeAll(np.zeros([2, 3]))
# Squeeze the middle element away.
self._compareSqueezeAll(np.zeros([2, 1, 2]))
# Squeeze on both ends.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]))
def testSqueezeSpecificDimension(self):
# Positive squeeze dim index.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [2, 4])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0, 4, 2])
# Negative squeeze dim index.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-1])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5, -1])
def testSqueezeAllOnes(self):
# Numpy squeezes a 1 element tensor into a zero dimensional tensor.
# Verify that we do the same.
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
tensor = array_ops.squeeze(np.zeros([1, 1, 1]), [])
self.assertEqual(np.shape(1), tensor.get_shape())
tf_ans = tensor.eval()
self.assertEqual(np.shape(1), tf_ans.shape)
def testSqueezeOnlyOnes(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
input_1x1x3 = np.zeros([1, 1, 3])
self._compareSqueezeAll(input_1x1x3)
self._compareSqueezeAll(input_1x1x3, [0])
self._compareSqueezeAll(input_1x1x3, [1])
self.assertRaises(ValueError, array_ops.squeeze, input_1x1x3, [2])
def testSqueezeErrors(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [-4])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [0, -4])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [3])
self.assertRaises(ValueError, array_ops.squeeze,
np.zeros([1, 2, 1]), [2, 3])
def testSqueezeGradient(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
a = array_ops.reshape(inp, [4, 1, 2])
squeezed = array_ops.squeeze(a, [])
err = gradient_checker.compute_gradient_error(a, [4, 1, 2], squeezed,
[4, 2])
self.assertLess(err, 1e-3)
def testSqueezeGradientWithSqueezeDims(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
a = array_ops.reshape(inp, [4, 1, 2, 1])
squeezed = array_ops.squeeze(a, [1])
err = gradient_checker.compute_gradient_error(a, [4, 1, 2, 1], squeezed,
[4, 2, 1])
self.assertLess(err, 1e-3)
def testSqueezeWithUnknownShape(self):
with self.test_session():
a = array_ops.placeholder(dtypes.float32, shape=[2, None])
squeezed = array_ops.squeeze(a, [1])
self.assertEqual([2], squeezed.get_shape().as_list())
squeezed = array_ops.squeeze(a)
self.assertEqual(None, squeezed.get_shape())
self.assertRaises(ValueError, array_ops.squeeze, a, [0])
self.assertRaises(ValueError, array_ops.squeeze, a, [100])
class TileTest(test.TestCase):
def testScalar(self):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
a = constant_op.constant(7, shape=[], dtype=dtypes.float32)
tiled = array_ops.tile(a, [])
result = tiled.eval()
self.assertEqual(result.shape, ())
self.assertEqual([], tiled.get_shape())
self.assertEqual(7, result)
def testSimple(self):
with self.test_session():
inp = np.random.rand(4, 1).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, [1, 4])
result = tiled.eval()
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertTrue((result == np.tile(inp, (1, 4))).all())
def testIdentityTileAndGrad(self):
with self.test_session():
inp = np.random.rand(4, 1).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, [1, 1])
result = tiled.eval()
self.assertEqual(result.shape, (4, 1))
self.assertEqual([4, 1], tiled.get_shape())
self.assertTrue((result == np.tile(inp, (1, 1))).all())
def testEmpty(self):
with self.test_session():
inp = np.random.rand(2, 3).astype(np.float32)
a = constant_op.constant(inp)
tiled = array_ops.tile(a, [5, 0])
result = tiled.eval()
self.assertEqual(result.shape, (10, 0))
self.assertEqual([10, 0], tiled.get_shape())
def testUnknownInputShape(self):
"""Importing can call _TileShape without shape of <multiples> known."""
with self.test_session():
inp = array_ops.placeholder(dtypes.float32) # unknown shape
multiples = constant_op.constant([1, 2, 3, 4], dtype=np.int32)
tiled = array_ops.tile(inp, multiples)
gdef = tiled.graph.as_graph_def()
# Move the tile op to the start of the graph so that shapes of its inputs
# are not available when the shape function runs on import.
swapped = False
for i, n in enumerate(gdef.node):
if n.op == "Tile":
# Swap tile op to be first in gdef.node
assert i != 0
new_node = node_def_pb2.NodeDef()
new_node.CopyFrom(gdef.node[i])
gdef.node[i].CopyFrom(gdef.node[0])
gdef.node[0].CopyFrom(new_node)
swapped = True
assert swapped
tiled_imported, = importer.import_graph_def(
gdef, return_elements=[tiled.name])
self.assertEqual(4, tiled_imported.get_shape().ndims)
def testTypes(self):
types_to_test = {
"bool": (dtypes.bool, bool),
"float32": (dtypes.float32, float),
"float64": (dtypes.float64, float),
"complex64": (dtypes.complex64, complex),
"complex128": (dtypes.complex128, complex),
"uint8": (dtypes.uint8, int),
"int32": (dtypes.int32, int),
"int64": (dtypes.int64, int),
bytes: (dtypes.string, bytes)
}
for dtype_np, (dtype_tf, cast) in types_to_test.items():
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 1).astype(dtype_np)
a = constant_op.constant(
[cast(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtype_tf)
tiled = array_ops.tile(a, [1, 4])
result = tiled.eval()
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertAllEqual(result, np.tile(inp, (1, 4)))
def testInvalidDim(self):
with self.test_session():
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtypes.float32)
# Wrong length of multiples.
with self.assertRaises(ValueError):
array_ops.tile(a, [1, 4, 2])
# Wrong rank for multiples.
with self.assertRaises(ValueError):
array_ops.tile(a, [[2, 3], [3, 4]]).eval()
def _RunAndVerifyResult(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
# Random dims of rank 5
input_shape = np.random.randint(1, 4, size=5)
inp = np.random.rand(*input_shape).astype("f")
a = constant_op.constant(
[float(x) for x in inp.ravel(order="C")],
shape=input_shape,
dtype=dtypes.float32)
multiples = np.random.randint(1, 4, size=5).astype(np.int32)
tiled = array_ops.tile(a, multiples)
result = tiled.eval()
self.assertTrue((np.array(multiples) * np.array(inp.shape) == np.array(
result.shape)).all())
self.assertAllEqual(result, np.tile(inp, tuple(multiples)))
self.assertShapeEqual(result, tiled)
def testRandom(self):
for _ in range(5):
self._RunAndVerifyResult(use_gpu=False)
for _ in range(5):
self._RunAndVerifyResult(use_gpu=True)
def testGradientSimpleReduction(self):
with self.test_session():
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 4])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = grad.eval()
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
def testGradientStridedReduction(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = grad.eval()
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertTrue((np.abs(expected - result) < 1e-3).all())
def testGradientSimpleReductionOnGPU(self):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 1).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 4])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
result = grad.eval()
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
def testGradientStridedReductionOnGPU(self):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = constant_op.constant(
[float(x) for x in grad_inp.flatten()], shape=grad_shape)
grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0]
result = grad.eval()
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertAllClose(expected, result, 1e-3)
def _RunAndVerifyGradientResult(self, input_shape, multiples):
for use_gpu in False, True:
with self.test_session(use_gpu=use_gpu):
# Random values
inp = np.asarray(np.random.rand(*input_shape))
a = constant_op.constant(inp, dtype=dtypes.float64)
tiled = array_ops.tile(a, multiples)
grad_shape = list(np.array(multiples) * np.array(inp.shape))
err = gradient_checker.compute_gradient_error(
a, list(input_shape), tiled, grad_shape, x_init_value=inp)
print("tile(float) error = ", err)
self.assertLess(err, 1e-3)
def testGradientRandomScalar(self):
self._RunAndVerifyGradientResult([], [])
def testGradientRandom(self):
self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 1, 1, 1, 1])
self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 2, 1, 3, 1])
self._RunAndVerifyGradientResult([2, 3, 1, 1, 3], [3, 1, 1, 2, 2])
self._RunAndVerifyGradientResult([2, 1, 3, 3, 2], [1, 3, 3, 1, 2])
def testGradientStridedReductionGC(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
a = constant_op.constant(
[float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32)
tiled = array_ops.tile(a, [1, 2])
err = gradient_checker.compute_gradient_error(a, [4, 2], tiled, [4, 4])
self.assertLess(err, 1e-3)
def testShapeFunctionEdgeCases(self):
# Unknown multiples shape.
inp = constant_op.constant(0.0, shape=[4, 4, 4, 4])
tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input shape.
inp = array_ops.placeholder(dtypes.float32)
tiled = array_ops.tile(inp, [2, 2, 2, 2])
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input and multiples shape.
inp = array_ops.placeholder(dtypes.float32)
tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32))
self.assertIs(None, tiled.get_shape().ndims)
# Known input and partially known multiples.
inp = constant_op.constant(0.0, shape=[1, 1])
tiled = array_ops.tile(inp, [array_ops.placeholder(dtypes.int32), 7])
self.assertEqual([None, 7], tiled.get_shape().as_list())
# Mismatched input rank and multiples length.
inp = array_ops.placeholder(dtypes.float32, shape=[None, None])
with self.assertRaises(ValueError):
tiled = array_ops.tile(
inp, array_ops.placeholder(
dtypes.int32, shape=[3]))
if __name__ == "__main__":
test.main()
| apache-2.0 |
ideasman42/nirw-search | _misc/readme_update_helptext.py | 1 | 1880 | #!/usr/bin/env python3
# GPL License, Version 3.0 or later
import os
import subprocess
import textwrap
import re
BASE_DIR = os.path.join(os.path.dirname(__file__), "..")
COMMAND_NAME = 'nirw-search'
def patch_help_test(help_output):
help_output = help_output.replace(
'usage: ' + COMMAND_NAME,
'usage::\n'
'\n'
' ' + COMMAND_NAME,
)
help_output = help_output.replace(
"{auto,always,never}", "<auto,always,never>",
)
return help_output
def main():
p = subprocess.run(
[
'python3',
os.path.join(BASE_DIR, COMMAND_NAME),
'--help',
],
stdout=subprocess.PIPE,
)
help_output = (
p.stdout.decode('utf-8').rstrip() +
'\n\n'
)
# strip trailing space
help_output = re.sub(r'[ \t]+(\n|\Z)', r'\1', help_output)
help_output = patch_help_test(help_output)
# Try write reStructuredText directly!
# help_output = textwrap.indent(help_output, ' ')
help_output = (
'\nOutput of ``' + COMMAND_NAME + ' --help``\n\n' +
help_output
)
with open('readme.rst', 'r', encoding='utf-8') as f:
data = f.read()
help_begin_text = '.. BEGIN HELP TEXT'
help_end_text = '.. END HELP TEXT'
help_begin_index = data.find(help_begin_text)
help_end_index = data.find(help_end_text, help_begin_index)
if help_begin_index == -1:
print('Error: {!r} not found'.format(help_begin_text))
return
if help_end_index == -1:
print('Error: {!r} not found'.format(help_end_text))
return
help_begin_index += len(help_begin_text) + 1
data_update = data[:help_begin_index] + help_output + data[help_end_index:]
with open('readme.rst', 'w', encoding='utf-8') as f:
f.write(data_update)
if __name__ == "__main__":
main()
| gpl-3.0 |
andresgz/django | tests/migrations/test_graph.py | 99 | 11829 | import warnings
from django.db.migrations.exceptions import (
CircularDependencyError, NodeNotFoundError,
)
from django.db.migrations.graph import RECURSION_DEPTH_WARNING, MigrationGraph
from django.test import SimpleTestCase
from django.utils.encoding import force_text
class GraphTests(SimpleTestCase):
"""
Tests the digraph structure.
"""
def test_simple_graph(self):
"""
Tests a basic dependency graph:
app_a: 0001 <-- 0002 <--- 0003 <-- 0004
/
app_b: 0001 <-- 0002 <-/
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_a", "0004"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_dependency("app_a.0004", ("app_a", "0004"), ("app_a", "0003"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_b", "0002"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_b", "0001"))
# Test root migration case
self.assertEqual(
graph.forwards_plan(("app_a", "0001")),
[('app_a', '0001')],
)
# Test branch B only
self.assertEqual(
graph.forwards_plan(("app_b", "0002")),
[("app_b", "0001"), ("app_b", "0002")],
)
# Test whole graph
self.assertEqual(
graph.forwards_plan(("app_a", "0004")),
[
('app_b', '0001'), ('app_b', '0002'), ('app_a', '0001'),
('app_a', '0002'), ('app_a', '0003'), ('app_a', '0004'),
],
)
# Test reverse to b:0002
self.assertEqual(
graph.backwards_plan(("app_b", "0002")),
[('app_a', '0004'), ('app_a', '0003'), ('app_b', '0002')],
)
# Test roots and leaves
self.assertEqual(
graph.root_nodes(),
[('app_a', '0001'), ('app_b', '0001')],
)
self.assertEqual(
graph.leaf_nodes(),
[('app_a', '0004'), ('app_b', '0002')],
)
def test_complex_graph(self):
"""
Tests a complex dependency graph:
app_a: 0001 <-- 0002 <--- 0003 <-- 0004
\ \ / /
app_b: 0001 <-\ 0002 <-X /
\ \ /
app_c: \ 0001 <-- 0002 <-
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_a", "0004"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_node(("app_c", "0001"), None)
graph.add_node(("app_c", "0002"), None)
graph.add_dependency("app_a.0004", ("app_a", "0004"), ("app_a", "0003"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_b", "0002"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_b", "0001"))
graph.add_dependency("app_a.0004", ("app_a", "0004"), ("app_c", "0002"))
graph.add_dependency("app_c.0002", ("app_c", "0002"), ("app_c", "0001"))
graph.add_dependency("app_c.0001", ("app_c", "0001"), ("app_b", "0001"))
graph.add_dependency("app_c.0002", ("app_c", "0002"), ("app_a", "0002"))
# Test branch C only
self.assertEqual(
graph.forwards_plan(("app_c", "0002")),
[('app_b', '0001'), ('app_c', '0001'), ('app_a', '0001'), ('app_a', '0002'), ('app_c', '0002')],
)
# Test whole graph
self.assertEqual(
graph.forwards_plan(("app_a", "0004")),
[
('app_b', '0001'), ('app_c', '0001'), ('app_a', '0001'),
('app_a', '0002'), ('app_c', '0002'), ('app_b', '0002'),
('app_a', '0003'), ('app_a', '0004'),
],
)
# Test reverse to b:0001
self.assertEqual(
graph.backwards_plan(("app_b", "0001")),
[
('app_a', '0004'), ('app_c', '0002'), ('app_c', '0001'),
('app_a', '0003'), ('app_b', '0002'), ('app_b', '0001'),
],
)
# Test roots and leaves
self.assertEqual(
graph.root_nodes(),
[('app_a', '0001'), ('app_b', '0001'), ('app_c', '0001')],
)
self.assertEqual(
graph.leaf_nodes(),
[('app_a', '0004'), ('app_b', '0002'), ('app_c', '0002')],
)
def test_circular_graph(self):
"""
Tests a circular dependency graph.
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency("app_a.0001", ("app_a", "0001"), ("app_b", "0002"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_b", "0001"))
graph.add_dependency("app_b.0001", ("app_b", "0001"), ("app_a", "0003"))
# Test whole graph
self.assertRaises(
CircularDependencyError,
graph.forwards_plan, ("app_a", "0003"),
)
def test_circular_graph_2(self):
graph = MigrationGraph()
graph.add_node(('A', '0001'), None)
graph.add_node(('C', '0001'), None)
graph.add_node(('B', '0001'), None)
graph.add_dependency('A.0001', ('A', '0001'), ('B', '0001'))
graph.add_dependency('B.0001', ('B', '0001'), ('A', '0001'))
graph.add_dependency('C.0001', ('C', '0001'), ('B', '0001'))
self.assertRaises(
CircularDependencyError,
graph.forwards_plan, ('C', '0001')
)
def test_graph_recursive(self):
graph = MigrationGraph()
root = ("app_a", "1")
graph.add_node(root, None)
expected = [root]
for i in range(2, 750):
parent = ("app_a", str(i - 1))
child = ("app_a", str(i))
graph.add_node(child, None)
graph.add_dependency(str(i), child, parent)
expected.append(child)
leaf = expected[-1]
forwards_plan = graph.forwards_plan(leaf)
self.assertEqual(expected, forwards_plan)
backwards_plan = graph.backwards_plan(root)
self.assertEqual(expected[::-1], backwards_plan)
def test_graph_iterative(self):
graph = MigrationGraph()
root = ("app_a", "1")
graph.add_node(root, None)
expected = [root]
for i in range(2, 1000):
parent = ("app_a", str(i - 1))
child = ("app_a", str(i))
graph.add_node(child, None)
graph.add_dependency(str(i), child, parent)
expected.append(child)
leaf = expected[-1]
with warnings.catch_warnings(record=True) as w:
forwards_plan = graph.forwards_plan(leaf)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(str(w[-1].message), RECURSION_DEPTH_WARNING)
self.assertEqual(expected, forwards_plan)
with warnings.catch_warnings(record=True) as w:
backwards_plan = graph.backwards_plan(root)
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, RuntimeWarning))
self.assertEqual(str(w[-1].message), RECURSION_DEPTH_WARNING)
self.assertEqual(expected[::-1], backwards_plan)
def test_plan_invalid_node(self):
"""
Tests for forwards/backwards_plan of nonexistent node.
"""
graph = MigrationGraph()
message = "Node ('app_b', '0001') not a valid node"
with self.assertRaisesMessage(NodeNotFoundError, message):
graph.forwards_plan(("app_b", "0001"))
with self.assertRaisesMessage(NodeNotFoundError, message):
graph.backwards_plan(("app_b", "0001"))
def test_missing_parent_nodes(self):
"""
Tests for missing parent nodes.
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
msg = "Migration app_a.0001 dependencies reference nonexistent parent node ('app_b', '0002')"
with self.assertRaisesMessage(NodeNotFoundError, msg):
graph.add_dependency("app_a.0001", ("app_a", "0001"), ("app_b", "0002"))
def test_missing_child_nodes(self):
"""
Tests for missing child nodes.
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
msg = "Migration app_a.0002 dependencies reference nonexistent child node ('app_a', '0002')"
with self.assertRaisesMessage(NodeNotFoundError, msg):
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
def test_infinite_loop(self):
"""
Tests a complex dependency graph:
app_a: 0001 <-
\
app_b: 0001 <- x 0002 <-
/ \
app_c: 0001<- <------------- x 0002
And apply squashing on app_c.
"""
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_node(("app_c", "0001_squashed_0002"), None)
graph.add_dependency("app_b.0001", ("app_b", "0001"), ("app_c", "0001_squashed_0002"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_a", "0001"))
graph.add_dependency("app_b.0002", ("app_b", "0002"), ("app_b", "0001"))
graph.add_dependency("app_c.0001_squashed_0002", ("app_c", "0001_squashed_0002"), ("app_b", "0002"))
with self.assertRaises(CircularDependencyError):
graph.forwards_plan(("app_c", "0001_squashed_0002"))
def test_stringify(self):
graph = MigrationGraph()
self.assertEqual(force_text(graph), "Graph: 0 nodes, 0 edges")
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_dependency("app_a.0002", ("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency("app_a.0003", ("app_a", "0003"), ("app_b", "0002"))
self.assertEqual(force_text(graph), "Graph: 5 nodes, 3 edges")
self.assertEqual(repr(graph), "<MigrationGraph: nodes=5, edges=3>")
| bsd-3-clause |
ex1usive-m4d/TemplateDocx | controllers/phpdocx/lib/openoffice/openoffice.org/basis3.4/program/python-core-2.6.1/lib/encodings/unicode_internal.py | 827 | 1196 | """ Python 'unicode-internal' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.unicode_internal_encode
decode = codecs.unicode_internal_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.unicode_internal_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.unicode_internal_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='unicode-internal',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| bsd-3-clause |
galtys/odoo | addons/website/models/ir_actions.py | 363 | 3074 | # -*- coding: utf-8 -*-
import urlparse
from openerp.http import request
from openerp.osv import fields, osv
class actions_server(osv.Model):
""" Add website option in server actions. """
_name = 'ir.actions.server'
_inherit = ['ir.actions.server']
def _compute_website_url(self, cr, uid, id, website_path, xml_id, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url', context=context)
link = website_path or xml_id or (id and '%d' % id) or ''
if base_url and link:
path = '%s/%s' % ('/website/action', link)
return '%s' % urlparse.urljoin(base_url, path)
return ''
def _get_website_url(self, cr, uid, ids, name, args, context=None):
res = dict.fromkeys(ids, False)
for action in self.browse(cr, uid, ids, context=context):
if action.state == 'code' and action.website_published:
res[action.id] = self._compute_website_url(cr, uid, action.id, action.website_path, action.xml_id, context=context)
return res
_columns = {
'xml_id': fields.function(
osv.osv.get_xml_id, type='char', string="External ID",
help="ID of the action if defined in a XML file"),
'website_path': fields.char('Website Path'),
'website_url': fields.function(
_get_website_url, type='char', string='Website URL',
help='The full URL to access the server action through the website.'),
'website_published': fields.boolean(
'Available on the Website', copy=False,
help='A code server action can be executed from the website, using a dedicated'
'controller. The address is <base>/website/action/<website_path>.'
'Set this field as True to allow users to run this action. If it'
'set to is False the action cannot be run through the website.'),
}
def on_change_website_path(self, cr, uid, ids, website_path, xml_id, context=None):
values = {
'website_url': self._compute_website_url(cr, uid, ids and ids[0] or None, website_path, xml_id, context=context)
}
return {'value': values}
def _get_eval_context(self, cr, uid, action, context=None):
""" Override to add the request object in eval_context. """
eval_context = super(actions_server, self)._get_eval_context(cr, uid, action, context=context)
if action.state == 'code':
eval_context['request'] = request
return eval_context
def run_action_code_multi(self, cr, uid, action, eval_context=None, context=None):
""" Override to allow returning response the same way action is already
returned by the basic server action behavior. Note that response has
priority over action, avoid using both. """
res = super(actions_server, self).run_action_code_multi(cr, uid, action, eval_context, context)
if 'response' in eval_context:
return eval_context['response']
return res
| agpl-3.0 |
schumi2004/NOT_UPDATED_Sick-Beard-Dutch | lib/requests/compat.py | 289 | 2433 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import charade as chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try:
import simplejson as json
except ImportError:
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
| gpl-3.0 |
Distrotech/PyQt-x11 | examples/animation/stickman/stickman_rc3.py | 5 | 44472 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: Wed Mar 20 11:18:27 2013
# by: The Resource Compiler for PyQt (Qt v4.8.4)
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore
qt_resource_data = b"\
\x00\x00\x05\x1c\
\xff\
\xff\xff\xff\x00\x00\x00\x05\x00\x00\x00\x10\x00\x00\x00\x00\x00\
\x00\x00\x00\xc0\x62\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\xc0\x59\x00\x00\x00\x00\x00\x00\xc0\x49\x00\x00\x00\
\x00\x00\x00\xc0\x49\x00\x00\x00\x00\x00\x00\x40\x49\x00\x00\x00\
\x00\x00\x00\xc0\x49\x00\x00\x00\x00\x00\x00\xc0\x39\x00\x00\x00\
\x00\x00\x00\x40\x49\x00\x00\x00\x00\x00\x00\x40\x39\x00\x00\x00\
\x00\x00\x00\x40\x49\x00\x00\x00\x00\x00\x00\xc0\x59\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc0\x5f\x40\x00\x00\
\x00\x00\x00\x40\x49\x00\x00\x00\x00\x00\x00\x40\x59\x00\x00\x00\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x40\x5f\x40\x00\x00\
\x00\x00\x00\x40\x49\x00\x00\x00\x00\x00\x00\xc0\x41\x80\x00\x00\
\x00\x00\x00\x40\x52\xc0\x00\x00\x00\x00\x00\x40\x41\x80\x00\x00\
\x00\x00\x00\x40\x52\xc0\x00\x00\x00\x00\x00\xc0\x39\x00\x00\x00\
\x00\x00\x00\x40\x69\x00\x00\x00\x00\x00\x00\xc0\x3e\x00\x00\x00\
\x00\x00\x00\x40\x72\xc0\x00\x00\x00\x00\x00\x40\x39\x00\x00\x00\
\x00\x00\x00\x40\x69\x00\x00\x00\x00\x00\x00\x40\x3e\x00\x00\x00\
\x00\x00\x00\x40\x72\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x10\xc0\
\x3c\x89\x78\xef\x64\x41\xf9\xc0\x66\xd9\xe8\x90\xe1\x7d\x15\xc0\
\x31\xe3\x24\x6e\x1a\x35\x4b\xc0\x60\xbe\xa8\xcb\xa3\x98\xe8\xc0\
\x4b\x26\x9c\xdd\x08\xb6\xce\xc0\x52\x55\xf3\x5e\x1e\x9a\xcc\x40\
\x45\x5c\xfa\x9b\x0f\x7d\x05\xc0\x58\x63\x42\x06\x4d\xc8\xaa\xc0\
\x17\x5f\xc1\xb2\xd2\x1c\x6d\x40\x31\x9d\xf7\x2f\xd9\x40\x6c\x40\
\x45\x56\x51\xec\xae\x7d\xee\x40\x16\x28\xbe\x3a\x2e\x6a\x36\xc0\
\x55\x8e\x3f\x19\x86\x36\x44\xc0\x24\x7d\xe2\xe5\x56\x5d\x8e\xc0\
\x5d\x38\xe5\xb0\x63\x56\x32\x40\x42\x3f\x2b\xb1\x33\xe6\xab\x40\
\x47\x27\x75\x17\xb2\xff\xe1\xc0\x65\x05\x80\x69\xc9\x45\xe4\x40\
\x46\xa5\x8b\x3a\x6c\xa7\xa8\xc0\x6c\x02\x0f\xa6\x73\xff\xbc\xc0\
\x23\x07\xbb\xb5\xb5\x51\x7d\x40\x46\x24\x27\xde\x37\xae\x65\x40\
\x4d\x34\xca\x20\x80\xfa\x8f\x40\x3b\x64\xbd\x21\x5f\x80\x81\xc0\
\x37\xe9\x56\x97\x59\x09\x15\x40\x65\x1b\x2f\x86\xb4\x04\x4d\xc0\
\x3c\xb5\xb5\xc5\x5a\xd5\x30\x40\x70\xcd\xc0\x3b\x0e\xef\xd6\x40\
\x43\xce\x38\x01\x33\x34\xdd\x40\x62\xec\x1a\xba\x71\x62\x00\x40\
\x3f\x5e\x72\xef\x9e\x8c\x25\x40\x6f\x65\x3a\x4d\xed\xc0\xd9\x00\
\x00\x00\x10\xc0\x3c\x89\xb8\xa7\xeb\xa9\x01\xc0\x66\xda\x0d\x17\
\x53\x31\x17\xc0\x31\xe1\x8c\xf0\xba\xdf\x75\xc0\x60\xbe\xd1\xa6\
\x64\x43\x82\xc0\x4b\x26\x88\x07\x92\xca\xbf\xc0\x52\x56\x94\xaa\
\x30\x5d\x72\x40\x45\x5d\x52\x6d\x57\x9c\xd9\xc0\x58\x62\xcc\x60\
\x74\xfe\x54\xc0\x17\x69\xa4\x0b\x5f\xcf\x0e\x40\x31\xa0\xd0\x0e\
\xd0\x62\xed\x40\x45\x54\xc3\x45\xee\xd2\xcc\x40\x16\x23\xd3\x38\
\xd6\x69\x28\xc0\x55\x8e\x03\x71\x6e\x9d\x31\xc0\x24\x82\xdb\x79\
\xc3\x1b\x6e\xc0\x5d\x38\x8a\xc7\x14\xca\x74\x40\x42\x3e\x16\x8f\
\x10\x50\x33\x40\x47\x27\x55\xb2\x6a\xa5\x2d\xc0\x65\x05\x3e\xa8\
\x00\x01\x15\x40\x46\xa5\x8f\xfd\x66\x67\x23\xc0\x6c\x01\xce\x0e\
\xaa\x42\x46\xc0\x23\x05\x39\x8c\x95\xa5\x0f\x40\x46\x26\xe0\xb5\
\x34\x5e\x5e\x40\x4d\x34\xad\x03\x49\x41\x5e\x40\x3b\x5f\xf9\x28\
\x4b\xa6\x62\xc0\x37\xea\x6c\x93\xfc\xa6\xb3\x40\x65\x1b\xec\xb0\
\x7b\xa8\xfc\xc0\x3c\xb5\xfe\xd2\xbb\xf3\x4b\x40\x70\xce\x1f\x6d\
\x37\x5e\x7d\x40\x43\xce\xcc\x1c\x47\x26\xc2\x40\x62\xeb\x73\x80\
\x3a\x8b\xaf\x40\x3f\x5e\xe2\xb7\xae\x5f\xb6\x40\x6f\x64\x91\x2c\
\x26\xca\x21\x00\x00\x00\x10\xc0\x38\x01\x30\x6b\xba\xdc\x1b\xc0\
\x64\xeb\xbe\xf4\x99\xf2\x9b\xc0\x13\xa3\x04\x1c\x30\xdb\xb6\xc0\
\x5e\x4a\x12\x9f\x80\xef\xb8\xc0\x48\xf6\x8c\x7d\xe9\xbd\xf2\xc0\
\x50\xa8\x10\x7e\x26\x5b\x6d\x40\x48\xcf\x7a\x59\xa1\x24\xc5\xc0\
\x53\x09\x35\x0a\x19\xb4\xbb\xc0\x2f\x0b\x6a\x35\x97\xd3\x7e\x40\
\x3e\x88\x20\x7b\xc1\xb4\x2f\x40\x41\x20\x0b\x54\x51\x2e\x7e\x40\
\x39\xc9\x9e\xf7\xb2\xd8\xc6\xc0\x55\xfe\xc6\x23\xb0\x4f\x25\xc0\
\x1c\x1c\x60\x3d\x66\x92\xe7\xc0\x5d\xba\xce\x5e\x2a\x54\x25\x40\
\x43\xc4\x24\x4f\x00\x90\x62\x40\x58\x0b\xce\x4b\xd9\x22\xe1\xc0\
\x36\xed\xde\xb4\x40\x2c\x73\x40\x5a\xd1\x4e\xef\xc4\x03\x6b\x40\
\x3f\xdc\xbc\xa5\x13\x81\x98\xc0\x37\x1b\xea\x35\xa6\x66\xff\x40\
\x4c\x2e\x2d\x1e\x28\x01\x86\x40\x47\x49\xa3\x1a\xd5\xe2\xf0\x40\
\x48\xdd\x6f\xf6\xf0\x01\x68\xc0\x38\xc1\x32\x29\x64\xfb\x5b\x40\
\x66\xb7\xea\x61\x5e\xb1\x57\xc0\x3d\x45\x4c\x15\xef\x6f\x50\x40\
\x71\x9c\x53\x80\x4b\x64\x0c\x40\x42\x8c\x98\x55\x36\x48\x2a\x40\
\x65\xd8\xb5\x79\xf6\x83\x89\x40\x3d\xa6\xc3\x54\x49\x2b\x98\x40\
\x71\x29\xea\x7e\x10\x5d\xc3\x00\x00\x00\x10\x40\x31\xc2\x13\x60\
\x88\x8b\x4e\xc0\x68\x3c\xec\xdf\xc9\x5c\x2f\x40\x19\x5a\x83\xca\
\x1f\xc9\xc5\xc0\x62\x27\x36\xb4\x9b\x52\x60\xc0\x48\x60\x10\xa3\
\xc6\x74\xb4\xc0\x59\x39\x43\xd2\xf9\x7b\x4b\x40\x49\x55\xa6\x3b\
\x8b\xb1\xc9\xc0\x56\x88\xbb\x33\x01\xdc\x6b\xc0\x41\x53\xb3\x9c\
\x85\xff\x25\x3f\xf3\x70\x84\x8f\xed\xe9\x9b\x40\x2e\x1c\xbe\xb0\
\x48\x52\x79\x40\x1a\x5f\x89\xdb\x5b\x45\xc0\xc0\x50\xc0\xce\xa2\
\xc3\xe5\x44\xc0\x65\x26\x9c\x40\xed\xa0\x1a\xc0\x54\xa2\x72\xde\
\x2f\x48\xc9\xc0\x6b\xdd\x16\xce\x9e\x56\xb3\x40\x55\xdc\x36\x69\
\xb4\x6e\x69\xc0\x3d\xbd\x2f\xf0\x4e\x71\xd7\x40\x5a\x39\x0b\xe0\
\xb9\x21\xc7\x40\x37\x5e\x81\xe7\xe6\x46\x06\xc0\x47\xa4\x74\xb8\
\xaf\xb7\xee\x40\x38\xfe\xaa\x2d\x0e\x97\x7e\x40\x36\x4f\x1c\x98\
\xc9\x38\x7d\x40\x40\x43\x0a\x66\x30\x52\xdc\xc0\x3f\xd3\xf0\xcb\
\x87\x9d\x38\x40\x62\xae\x04\x10\x53\xde\x8c\xc0\x3d\x00\x25\x84\
\x92\xd2\x45\x40\x6f\x30\xbc\x58\x1f\xbc\xdf\x40\x41\x44\xa6\xc0\
\x0b\x29\xd8\x40\x63\xaa\x6a\x7a\x0d\xd9\x34\x40\x3e\xab\x98\x09\
\xd9\x42\x57\x40\x70\x16\x03\x15\xe6\x2f\x0a\
\x00\x00\x01\x0c\
\xff\
\xff\xff\xff\x00\x00\x00\x01\x00\x00\x00\x10\xc0\x72\x75\xd0\xbc\
\x4a\x7e\x3b\x40\x6e\xa8\x3a\x76\xcc\x4a\x4f\xc0\x6e\xc4\x15\x8d\
\xdb\x8a\x81\x40\x6d\x91\x95\x4a\xaf\x90\x71\xc0\x69\x4c\xfe\x50\
\x6c\x77\x6e\x40\x72\x41\xf5\x29\xa3\x44\xca\xc0\x67\xd1\xbb\x6b\
\x66\xf4\xfc\x40\x68\x1a\x7c\x41\xab\x27\xf0\xc0\x59\x09\x41\xf0\
\xd6\x29\x49\x40\x71\x72\x68\x78\x60\x60\x84\xc0\x57\x8e\x4b\x36\
\x5e\x8b\x6f\x40\x6c\xb0\x0b\xce\x10\xc5\x22\xc0\x70\xf5\x12\xcb\
\xa7\x78\x95\x40\x71\x44\x4e\x7a\x50\x85\x9f\xc0\x74\x72\x66\x5a\
\x1e\x1b\xae\x40\x71\x17\xf1\xe4\xf6\x7f\x7e\xc0\x64\x2e\x57\x5b\
\xe3\x6f\x6c\x40\x70\x14\x53\x5e\x5b\x6a\x7a\xc0\x60\x2d\xc9\x35\
\x26\xaf\xd0\x40\x72\xf1\x84\xb6\xf0\xd8\xae\xc0\x53\x20\x21\x2d\
\x64\x5b\xb3\x40\x72\x40\xa0\x97\xbf\x55\x8f\xc0\x51\x0d\xcf\x0f\
\x1b\x48\x3c\x40\x6b\xd1\x05\xa8\x2a\xdf\x62\x40\x44\x77\x7b\xec\
\x05\x04\x13\x40\x6f\x01\xfc\x1b\x65\x69\x69\x40\x61\x21\xf2\x32\
\xa1\xd8\xd9\x40\x71\x41\x25\x0f\x44\xd0\xa8\x40\x4c\x88\xe7\x4e\
\xbb\xeb\xb8\x40\x6c\x7b\xe4\x15\x28\x7a\x0a\x40\x62\x87\x8c\xab\
\xb5\x95\xf1\x40\x70\xd4\x4b\x4c\x03\x9d\x83\
\x00\x00\x19\x6c\
\xff\
\xff\xff\xff\x00\x00\x00\x19\x00\x00\x00\x10\x40\x38\x4c\xca\x3f\
\x4a\x24\xde\xc0\x62\x97\x1b\x32\x01\x5d\xf3\x40\x3a\x05\x25\x01\
\xce\x14\x45\xc0\x58\xb0\x20\xdd\x20\x86\x54\xc0\x40\xc3\x31\xca\
\x2d\xe7\xe8\xc0\x4e\x4e\xc6\x85\x2f\x62\xd8\x40\x50\x0a\x03\xb2\
\x01\xc4\x5d\xc0\x43\x9a\x8e\xca\x3e\xac\x2f\xc0\x3e\x83\x6a\xb1\
\x60\x3b\xb1\x40\x45\x35\x2f\x6e\x30\x18\x5c\x40\x32\x53\x99\xcb\
\xc5\x8d\xfc\x40\x4a\x8f\xc7\x9e\xd3\xec\x98\xc0\x44\x7e\x7d\xc3\
\xb1\x2d\x7e\xc0\x60\x5d\xcb\xb7\xd6\xde\xd5\x40\x22\xa2\xe4\x37\
\x5a\x12\x12\xc0\x63\x69\xe1\x79\x61\x98\x47\x40\x50\x78\x0c\x5a\
\xfa\x18\xc4\xc0\x5b\x79\x6e\x7b\xba\xd3\x77\x40\x3c\x78\x0c\xfd\
\x1b\x24\x7a\xc0\x62\xee\x0e\xa0\x41\x5e\x50\xc0\x46\xd1\x51\x1c\
\xc7\x57\xf6\x40\x50\x2c\x59\x00\x05\x07\x96\x40\x36\xbd\x5a\xba\
\x55\x33\x3c\x40\x53\xeb\xd5\xdc\x95\x84\x02\xc0\x64\x7c\xe6\xa7\
\x82\xd4\x73\x40\x5a\x98\x3a\x9d\x99\xd2\x25\xc0\x67\x9f\x24\xfc\
\xa8\x8b\x88\x40\x69\x6a\x0c\xdc\x04\x07\xad\x40\x61\x88\x18\xa6\
\x50\x33\xdf\x40\x5e\xdc\xf7\xac\x43\x1d\x93\x40\x60\xa1\xe2\xfa\
\x78\x8e\x27\x40\x6b\xea\x33\x3e\x3c\x8e\x55\x00\x00\x00\x10\x40\
\x38\x4c\xcb\x75\x7c\x04\xe4\xc0\x62\x97\x16\xcb\x46\x05\x9e\x40\
\x3a\x04\xf9\x90\xa0\xa1\x90\xc0\x58\xb0\x1c\x43\xe6\x16\xc6\xc0\
\x40\xc3\x37\x29\xb0\xad\x71\xc0\x4e\x4e\xa0\x29\xf8\xaf\x63\x40\
\x50\x0a\x04\xf7\x7a\x75\xf9\xc0\x43\x9a\xa3\xc7\xff\x8e\xe7\xc0\
\x3e\x83\x24\xc7\x88\x06\x31\x40\x45\x35\x0f\x82\x42\x98\x23\x40\
\x32\x53\xbe\x35\xfa\x10\xf1\x40\x4a\x8f\xeb\x86\x7e\xa9\xe7\xc0\
\x44\x7e\x73\x44\xb1\xe1\x51\xc0\x60\x5d\xc2\x5a\x55\xc7\x63\x40\
\x22\xa2\xcb\x69\x59\x50\x4d\xc0\x63\x69\xe0\xb9\x0f\x32\xde\x40\
\x50\x78\x10\xb9\x1c\xd7\xaa\xc0\x5b\x79\x7a\x66\xb3\x79\x6b\x40\
\x3c\x77\xfd\x1b\x3d\x06\x10\xc0\x62\xee\x10\xd4\xaa\xf0\xf0\xc0\
\x46\xd1\x4e\xde\x36\xe2\x04\x40\x50\x2c\x31\x73\xb4\xd5\x4d\x40\
\x36\xbd\x01\xb5\xb5\x00\xbf\x40\x53\xeb\xec\x32\x09\x0f\xf3\xc0\
\x64\x7c\xde\xd4\xf9\xea\x5a\x40\x5a\x98\x39\x7f\xc7\xf1\x43\xc0\
\x67\x9f\x24\x5c\x76\xd5\xf3\x40\x69\x6a\x0a\x70\x9e\x4e\xa4\x40\
\x61\x88\x12\x1f\x05\xdd\x03\x40\x5e\xdc\xf4\xca\xc1\xac\x79\x40\
\x60\xa1\xe3\x0c\x44\xff\x6e\x40\x6b\xea\x32\x47\x27\x58\xb5\x00\
\x00\x00\x10\x40\x38\x4c\xcb\x75\x7c\x04\xe4\xc0\x62\x97\x16\xcb\
\x46\x05\x9e\x40\x3a\x04\xf9\x90\xa0\xa1\x90\xc0\x58\xb0\x1c\x43\
\xe6\x16\xc6\xc0\x40\xc3\x37\x29\xb0\xad\x71\xc0\x4e\x4e\xa0\x29\
\xf8\xaf\x63\x40\x50\x0a\x04\xf7\x7a\x75\xf9\xc0\x43\x9a\xa3\xc7\
\xff\x8e\xe7\xc0\x3e\x83\x24\xc7\x88\x06\x31\x40\x45\x35\x0f\x82\
\x42\x98\x23\x40\x32\x53\xbe\x35\xfa\x10\xf1\x40\x4a\x8f\xeb\x86\
\x7e\xa9\xe7\xc0\x44\x7e\x73\x44\xb1\xe1\x51\xc0\x60\x5d\xc2\x5a\
\x55\xc7\x63\x40\x22\xa2\xcb\x69\x59\x50\x4d\xc0\x63\x69\xe0\xb9\
\x0f\x32\xde\x40\x50\x78\x10\xb9\x1c\xd7\xaa\xc0\x5b\x79\x7a\x66\
\xb3\x79\x6b\x40\x3c\x77\xfd\x1b\x3d\x06\x10\xc0\x62\xee\x10\xd4\
\xaa\xf0\xf0\xc0\x46\xd1\x4e\xde\x36\xe2\x04\x40\x50\x2c\x31\x73\
\xb4\xd5\x4d\x40\x36\xbd\x01\xb5\xb5\x00\xbf\x40\x53\xeb\xec\x32\
\x09\x0f\xf3\xc0\x64\x7c\xde\xd4\xf9\xea\x5a\x40\x5a\x98\x39\x7f\
\xc7\xf1\x43\xc0\x67\x9f\x24\x5c\x76\xd5\xf3\x40\x69\x6a\x0a\x70\
\x9e\x4e\xa4\x40\x61\x88\x12\x1f\x05\xdd\x03\x40\x5e\xdc\xf4\xca\
\xc1\xac\x79\x40\x60\xa1\xe3\x0c\x44\xff\x6e\x40\x6b\xea\x32\x47\
\x27\x58\xb5\x00\x00\x00\x10\x40\x38\x4c\xc9\x02\x38\xef\xcd\xc0\
\x62\x97\x1f\xb1\x2b\xf9\x2a\x40\x3a\x05\x52\xd2\x95\xd7\xab\xc0\
\x58\xb0\x25\xaa\x53\xdb\xfb\xc0\x40\xc3\x2b\xf2\x52\x75\x9c\xc0\
\x4e\x4e\xee\xcf\x9b\x90\x20\x40\x50\x0a\x02\x68\x2b\x0a\x79\xc0\
\x43\x9a\x79\x01\x1a\xe6\xb9\xc0\x3e\x83\xb4\x36\x4e\x91\x1d\x40\
\x45\x35\x51\x17\x06\xbb\x13\x40\x32\x53\x73\x48\x82\xb8\xb3\x40\
\x4a\x8f\xa1\xde\xcc\x91\x1f\xc0\x44\x7e\x88\xc8\x99\xcc\xe3\xc0\
\x60\x5d\xd5\x8c\xe2\xd4\xd6\x40\x22\xa2\xfe\x41\x73\x93\xb3\xc0\
\x63\x69\xe2\x43\x44\x69\xd7\x40\x50\x78\x07\xd5\xb0\x97\xb1\xc0\
\x5b\x79\x62\x25\xed\x42\x8d\x40\x3c\x78\x1d\x6d\x64\xc2\xa2\xc0\
\x62\xee\x0c\x58\x0f\x63\xac\xc0\x46\xd1\x53\xa5\xee\xcf\x4a\x40\
\x50\x2c\x82\x94\x9a\xd5\x79\x40\x36\xbd\xb8\x76\x84\x73\x91\x40\
\x53\xeb\xbe\x59\x7d\x1d\xee\xc0\x64\x7c\xee\xed\xf6\x5d\x09\x40\
\x5a\x98\x3b\xcb\xed\xe6\x6c\xc0\x67\x9f\x25\xa6\x1c\x6e\xe9\x40\
\x69\x6a\x0f\x6b\x3b\xad\xe5\x40\x61\x88\x1f\x86\x72\xb7\xce\x40\
\x5e\xdc\xfa\xb5\x02\x69\xbf\x40\x60\xa1\xe2\xe7\xb9\x5c\x6a\x40\
\x6b\xea\x34\x42\x75\x16\xa6\x00\x00\x00\x10\x40\x38\x4c\xc8\xf1\
\x73\x46\x04\xc0\x62\x97\x1f\xee\x0c\xf5\x37\x40\x3a\x05\x55\x49\
\x9a\x94\xdf\xc0\x58\xb0\x25\xec\x1f\xd6\x66\xc0\x40\xc3\x2b\xa0\
\x8b\xcc\x68\xc0\x4e\x4e\xf0\xf9\x4c\x7d\xc5\x40\x50\x0a\x02\x56\
\xcf\x1b\x74\xc0\x43\x9a\x77\xd7\x9d\x67\xb9\xc0\x3e\x83\xb8\x29\
\x68\x18\xac\x40\x45\x35\x52\xe6\x88\x0c\x08\x40\x32\x53\x71\x35\
\x56\x27\xbe\x40\x4a\x8f\x9f\xd7\xb3\x3a\x64\xc0\x44\x7e\x89\x5f\
\xfb\xd6\xa6\xc0\x60\x5d\xd6\x13\xf6\xcd\x94\x40\x22\xa2\xff\xa7\
\x2c\xa7\x01\xc0\x63\x69\xe2\x4e\x19\xcc\x87\x40\x50\x78\x07\x98\
\x0d\x09\xcb\xc0\x5b\x79\x61\x7d\xbc\xc0\x33\x40\x3c\x78\x1e\x4d\
\x88\xf4\x35\xc0\x62\xee\x0c\x38\xf1\xc4\xe8\xc0\x46\xd1\x53\xca\
\x0b\x08\xfb\x40\x50\x2c\x84\xd0\x5d\x55\x8c\x40\x36\xbd\xbd\x80\
\x5a\xad\x4a\x40\x53\xeb\xbd\x15\xf5\x07\xab\xc0\x64\x7c\xef\x60\
\x1c\x2f\x98\x40\x5a\x98\x3b\xdc\x37\xef\xe9\xc0\x67\x9f\x25\xaf\
\x3d\xbf\xd4\x40\x69\x6a\x0f\x8e\x89\x77\x9f\x40\x61\x88\x1f\xe5\
\x0f\x7e\xd1\x40\x5e\xdc\xfa\xde\xc3\x4f\x44\x40\x60\xa1\xe2\xe6\
\xb7\x5f\xeb\x40\x6b\xea\x34\x50\x71\xf2\x5e\x00\x00\x00\x10\x40\
\x38\x4c\xc8\xe4\xe9\x4a\xe6\xc0\x62\x97\x20\x1b\x90\x2c\x4f\x40\
\x3a\x05\x57\x22\x0c\x24\x7d\xc0\x58\xb0\x26\x1d\x5d\x86\x5d\xc0\
\x40\xc3\x2b\x63\x3b\xcf\x8c\xc0\x4e\x4e\xf2\x97\xc3\x66\xc7\x40\
\x50\x0a\x02\x49\xd6\x8c\x4d\xc0\x43\x9a\x76\xf9\x0d\x28\xdf\xc0\
\x3e\x83\xbb\x1e\x53\x10\x07\x40\x45\x35\x54\x41\x8c\xf4\x10\x40\
\x32\x53\x6f\xa7\x9a\x5e\xd5\x40\x4a\x8f\x9e\x53\x1a\xe6\x20\xc0\
\x44\x7e\x89\xd1\x4c\x49\xec\xc0\x60\x5d\xd6\x79\x12\xe1\x41\x40\
\x22\xa3\x00\xb2\xf0\x65\xf7\xc0\x63\x69\xe2\x56\x35\xbe\x3d\x40\
\x50\x78\x07\x69\xf1\x50\x4e\xc0\x5b\x79\x60\xff\xec\xcb\x67\x40\
\x3c\x78\x1e\xf5\x33\x9c\xb3\xc0\x62\xee\x0c\x21\xab\x29\xe4\xc0\
\x46\xd1\x53\xe5\x27\x99\x97\x40\x50\x2c\x86\x7c\x61\xad\xa6\x40\
\x36\xbd\xc1\x45\xf8\xa0\x3f\x40\x53\xeb\xbc\x23\xbf\xbf\x32\xc0\
\x64\x7c\xef\xb5\x95\x62\x2d\x40\x5a\x98\x3b\xe8\x6a\x6d\xa5\xc0\
\x67\x9f\x25\xb6\x13\xea\x8f\x40\x69\x6a\x0f\xa8\xf8\xea\x0c\x40\
\x61\x88\x20\x2b\xe3\xfb\xb4\x40\x5e\xdc\xfa\xfe\x05\x65\x16\x40\
\x60\xa1\xe2\xe5\xf6\x3c\x86\x40\x6b\xea\x34\x5a\xea\xb2\xfe\x00\
\x00\x00\x10\x40\x38\x4c\xc8\xd4\x84\xf3\x4d\xc0\x62\x97\x20\x57\
\x0f\x3b\xd7\x40\x3a\x05\x59\x8c\x8a\x5e\xce\xc0\x58\xb0\x26\x5d\
\xcd\x2f\x15\xc0\x40\xc3\x2b\x12\xda\xcc\x87\xc0\x4e\x4e\xf4\xb6\
\x3e\xa1\x52\x40\x50\x0a\x02\x38\xe4\x10\x8c\xc0\x43\x9a\x75\xd5\
\xe6\xa1\x5c\xc0\x3e\x83\xbe\xfd\x1d\x6c\x0f\x40\x45\x35\x56\x07\
\xd6\x3e\xb6\x40\x32\x53\x6d\x9e\xdc\x7b\x7c\x40\x4a\x8f\x9c\x56\
\x73\x8e\x7b\xc0\x44\x7e\x8a\x65\x9b\x9a\xc2\xc0\x60\x5d\xd6\xfd\
\x68\xf0\x6a\x40\x22\xa3\x02\x11\x65\xf2\x80\xc0\x63\x69\xe2\x60\
\xd2\xcd\x1f\x40\x50\x78\x07\x2d\xa2\x09\x53\xc0\x5b\x79\x60\x5b\
\x5c\xc5\x6c\x40\x3c\x78\x1f\xd0\x82\x80\xbc\xc0\x62\xee\x0c\x03\
\x39\x4a\xd4\xc0\x46\xd1\x54\x08\xbe\xbb\x9e\x40\x50\x2c\x88\xac\
\xa3\x4c\x7c\x40\x36\xbd\xc6\x35\xfc\x61\x64\x40\x53\xeb\xba\xe6\
\xb1\x05\x48\xc0\x64\x7c\xf0\x25\x7e\x80\x23\x40\x5a\x98\x3b\xf8\
\x62\xb1\x2e\xc0\x67\x9f\x25\xbf\x07\x67\xde\x40\x69\x6a\x0f\xcb\
\x95\x87\xec\x40\x61\x88\x20\x88\x9b\xc5\x2f\x40\x5e\xdc\xfb\x26\
\xf0\x4e\x91\x40\x60\xa1\xe2\xe4\xf9\x69\xcb\x40\x6b\xea\x34\x68\
\x9f\xdc\x0a\x00\x00\x00\x10\x40\x38\x4c\xc8\xcf\x45\x6d\x70\xc0\
\x62\x97\x20\x6a\x1b\x9c\x23\x40\x3a\x05\x5a\x52\xc4\x7b\x1c\xc0\
\x58\xb0\x26\x72\x72\x66\xea\xc0\x40\xc3\x2a\xf9\x10\xed\xb9\xc0\
\x4e\x4e\xf5\x64\x15\x69\xb0\x40\x50\x0a\x02\x33\x77\xa5\x56\xc0\
\x43\x9a\x75\x78\xa3\x77\x6b\xc0\x3e\x83\xc0\x3a\xa2\x9b\x18\x40\
\x45\x35\x56\x99\x6e\xfb\xd2\x40\x32\x53\x6c\xf7\xf3\x6f\x59\x40\
\x4a\x8f\x9b\xb3\x71\xd9\x7e\xc0\x44\x7e\x8a\x95\x22\x08\xc3\xc0\
\x60\x5d\xd7\x27\xd0\xff\x24\x40\x22\xa3\x02\x81\xb3\x6a\x1c\xc0\
\x63\x69\xe2\x64\x39\x77\x1e\x40\x50\x78\x07\x1a\x50\xf1\xd4\xc0\
\x5b\x79\x60\x26\xa7\x76\x2b\x40\x3c\x78\x20\x16\xc0\xae\xfd\xc0\
\x62\xee\x0b\xf9\x78\xf7\x9a\xc0\x46\xd1\x54\x14\x2c\xd4\x30\x40\
\x50\x2c\x89\x60\x2e\x92\x45\x40\x36\xbd\xc7\xcb\x13\x78\x86\x40\
\x53\xeb\xba\x81\x14\xb0\x4f\xc0\x64\x7c\xf0\x49\x5d\x6b\x81\x40\
\x5a\x98\x3b\xfd\x81\x19\xae\xc0\x67\x9f\x25\xc1\xe5\xe5\x65\x40\
\x69\x6a\x0f\xd6\xad\x9f\xef\x40\x61\x88\x20\xa6\x52\x94\x24\x40\
\x5e\xdc\xfb\x34\x0d\x4b\xd1\x40\x60\xa1\xe2\xe4\xa8\x63\x78\x40\
\x6b\xea\x34\x6d\x04\x78\x67\x00\x00\x00\x10\x40\x38\x4c\xc8\xc3\
\x53\xf0\xd7\xc0\x62\x97\x20\x95\x74\xbf\xc7\x40\x3a\x05\x5c\x16\
\x3e\x0f\x2d\xc0\x58\xb0\x26\xa1\x74\xe5\x62\xc0\x40\xc3\x2a\xbe\
\x48\x1a\xc9\xc0\x4e\x4e\xf6\xef\xf7\x35\xd6\x40\x50\x0a\x02\x27\
\x21\x30\x73\xc0\x43\x9a\x74\xa4\x51\xd1\x0d\xc0\x3e\x83\xc3\x0d\
\xc2\x0e\xf4\x40\x45\x35\x57\xe5\x09\x34\x5f\x40\x32\x53\x6b\x7b\
\xc7\x91\x61\x40\x4a\x8f\x9a\x40\x37\x7d\xc1\xc0\x44\x7e\x8b\x01\
\x5c\x12\x71\xc0\x60\x5d\xd7\x88\x63\x00\x6f\x40\x22\xa3\x03\x81\
\x71\x1e\xdd\xc0\x63\x69\xe2\x6b\xf8\x2e\x8c\x40\x50\x78\x06\xee\
\x57\xe8\x39\xc0\x5b\x79\x5f\xae\xab\x35\x22\x40\x3c\x78\x20\xb6\
\xa7\x72\x0b\xc0\x62\xee\x0b\xe3\x46\x4f\xd6\xc0\x46\xd1\x54\x2e\
\x40\x0a\x1b\x40\x50\x2c\x8a\xf9\x12\xa8\xf6\x40\x36\xbd\xcb\x65\
\xa5\xcc\xd1\x40\x53\xeb\xb9\x99\xaa\xcf\x03\xc0\x64\x7c\xf0\x9b\
\x11\xbc\x05\x40\x5a\x98\x3c\x09\x29\xdc\x73\xc0\x67\x9f\x25\xc8\
\x6e\xde\xfd\x40\x69\x6a\x0f\xef\xf2\x9d\x38\x40\x61\x88\x20\xe9\
\xfe\xa9\x62\x40\x5e\xdc\xfb\x51\xea\xa9\x9f\x40\x60\xa1\xe2\xe3\
\xef\xdb\xd6\x40\x6b\xea\x34\x77\x05\xb5\xb1\x00\x00\x00\x10\x40\
\x38\x4c\xc8\xc2\x67\xdd\x57\xc0\x62\x97\x20\x98\xcd\x94\x1a\x40\
\x3a\x05\x5c\x39\x1f\x5d\xdb\xc0\x58\xb0\x26\xa5\x16\x7d\x84\xc0\
\x40\xc3\x2a\xb9\xbc\xba\x60\xc0\x4e\x4e\xf7\x0e\x8c\x4a\xb1\x40\
\x50\x0a\x02\x26\x2d\x64\x0a\xc0\x43\x9a\x74\x93\xeb\xe8\x2d\xc0\
\x3e\x83\xc3\x45\x9f\x4d\x2f\x40\x45\x35\x57\xfe\xa7\x9e\xb3\x40\
\x32\x53\x6b\x5e\x68\x2f\x32\x40\x4a\x8f\x9a\x23\x89\xc3\x22\xc0\
\x44\x7e\x8b\x09\xb8\x5e\x6c\xc0\x60\x5d\xd7\x8f\xd8\xc8\x46\x40\
\x22\xa3\x03\x95\x32\xa9\x80\xc0\x63\x69\xe2\x6c\x91\x58\xc3\x40\
\x50\x78\x06\xea\xf2\x89\xaa\xc0\x5b\x79\x5f\xa5\x67\x05\xe0\x40\
\x3c\x78\x20\xc3\x00\xcb\xb0\xc0\x62\xee\x0b\xe1\x8f\x72\x2a\xc0\
\x46\xd1\x54\x30\x44\x61\xe5\x40\x50\x2c\x8b\x18\xa9\x44\x23\x40\
\x36\xbd\xcb\xac\xeb\xcc\x19\x40\x53\xeb\xb9\x87\xca\x0f\x26\xc0\
\x64\x7c\xf0\xa1\x61\xc8\xea\x40\x5a\x98\x3c\x0a\x10\x78\x9d\xc0\
\x67\x9f\x25\xc8\xf0\x21\x1c\x40\x69\x6a\x0f\xf1\xe6\x6c\xcd\x40\
\x61\x88\x20\xef\x39\x09\x1b\x40\x5e\xdc\xfb\x54\x39\x4e\x8a\x40\
\x60\xa1\xe2\xe3\xe1\x9a\x53\x40\x6b\xea\x34\x77\xcb\x94\x20\x00\
\x00\x00\x10\x40\x38\x4c\xc8\xc1\x32\xe4\xf9\xc0\x62\x97\x20\x9d\
\x2e\xf8\xde\x40\x3a\x05\x5c\x66\xc6\xea\x0b\xc0\x58\xb0\x26\xa9\
\xd7\x32\x7d\xc0\x40\xc3\x2a\xb3\xc9\xdf\x14\xc0\x4e\x4e\xf7\x36\
\x93\x96\x7f\x40\x50\x0a\x02\x24\xee\x51\x65\xc0\x43\x9a\x74\x7e\
\x75\x91\x91\xc0\x3e\x83\xc3\x8e\xbe\x00\x43\x40\x45\x35\x58\x20\
\x2f\xea\x4a\x40\x32\x53\x6b\x37\xf6\x02\x6f\x40\x4a\x8f\x99\xfe\
\x00\x4b\x9f\xc0\x44\x7e\x8b\x14\xa9\xbf\x4b\xc0\x60\x5d\xd7\x99\
\x9c\x71\xc0\x40\x22\xa3\x03\xaf\x0e\x58\x32\xc0\x63\x69\xe2\x6d\
\x59\xd1\xf7\x40\x50\x78\x06\xe6\x80\xb1\x85\xc0\x5b\x79\x5f\x99\
\x46\x45\x3d\x40\x3c\x78\x20\xd3\x2a\x6b\xb5\xc0\x62\xee\x0b\xdf\
\x51\x0d\x21\xc0\x46\xd1\x54\x32\xe8\x5b\x66\x40\x50\x2c\x8b\x42\
\x01\xb4\x84\x40\x36\xbd\xcc\x0a\x35\xc8\xee\x40\x53\xeb\xb9\x70\
\x63\x9a\x1b\xc0\x64\x7c\xf0\xa9\xa5\x0c\xbe\x40\x5a\x98\x3c\x0b\
\x3e\x51\xa0\xc0\x67\x9f\x25\xc9\x99\x51\x18\x40\x69\x6a\x0f\xf4\
\x74\xa1\xea\x40\x61\x88\x20\xf6\x10\xd1\x9d\x40\x5e\xdc\xfb\x57\
\x3e\x65\x10\x40\x60\xa1\xe2\xe3\xce\xf1\x84\x40\x6b\xea\x34\x78\
\xce\x91\x2c\x00\x00\x00\x10\x40\x38\x4c\xc8\xc1\x32\xe4\xf9\xc0\
\x62\x97\x20\x9d\x2e\xf8\xde\x40\x3a\x05\x5c\x66\xc6\xea\x0b\xc0\
\x58\xb0\x26\xa9\xd7\x32\x7d\xc0\x40\xc3\x2a\xb3\xc9\xdf\x14\xc0\
\x4e\x4e\xf7\x36\x93\x96\x7f\x40\x50\x0a\x02\x24\xee\x51\x65\xc0\
\x43\x9a\x74\x7e\x75\x91\x91\xc0\x3e\x83\xc3\x8e\xbe\x00\x43\x40\
\x45\x35\x58\x20\x2f\xea\x4a\x40\x32\x53\x6b\x37\xf6\x02\x6f\x40\
\x4a\x8f\x99\xfe\x00\x4b\x9f\xc0\x44\x7e\x8b\x14\xa9\xbf\x4b\xc0\
\x60\x5d\xd7\x99\x9c\x71\xc0\x40\x22\xa3\x03\xaf\x0e\x58\x32\xc0\
\x63\x69\xe2\x6d\x59\xd1\xf7\x40\x50\x78\x06\xe6\x80\xb1\x85\xc0\
\x5b\x79\x5f\x99\x46\x45\x3d\x40\x3c\x78\x20\xd3\x2a\x6b\xb5\xc0\
\x62\xee\x0b\xdf\x51\x0d\x21\xc0\x46\xd1\x54\x32\xe8\x5b\x66\x40\
\x50\x2c\x8b\x42\x01\xb4\x84\x40\x36\xbd\xcc\x0a\x35\xc8\xee\x40\
\x53\xeb\xb9\x70\x63\x9a\x1b\xc0\x64\x7c\xf0\xa9\xa5\x0c\xbe\x40\
\x5a\x98\x3c\x0b\x3e\x51\xa0\xc0\x67\x9f\x25\xc9\x99\x51\x18\x40\
\x69\x6a\x0f\xf4\x74\xa1\xea\x40\x61\x88\x20\xf6\x10\xd1\x9d\x40\
\x5e\xdc\xfb\x57\x3e\x65\x10\x40\x60\xa1\xe2\xe3\xce\xf1\x84\x40\
\x6b\xea\x34\x78\xce\x91\x2c\x00\x00\x00\x10\x40\x38\x4c\xc8\xbf\
\xcf\xfc\x3c\xc0\x62\x97\x20\xa2\x37\x18\xbc\x40\x3a\x05\x5c\x9b\
\x39\x94\x7a\xc0\x58\xb0\x26\xaf\x4c\xea\x97\xc0\x40\xc3\x2a\xac\
\xf4\x31\x63\xc0\x4e\x4e\xf7\x64\x8f\x9a\x25\x40\x50\x0a\x02\x23\
\x7f\xd0\x06\xc0\x43\x9a\x74\x65\xcd\xff\xd0\xc0\x3e\x83\xc3\xe2\
\xbe\x25\x12\x40\x45\x35\x58\x46\xb5\xb3\x85\x40\x32\x53\x6b\x0b\
\xcb\x0d\x92\x40\x4a\x8f\x99\xd2\xe0\xed\x42\xc0\x44\x7e\x8b\x21\
\x3b\xf0\xf6\xc0\x60\x5d\xd7\xa4\xd4\x07\xaf\x40\x22\xa3\x03\xcc\
\xc2\xf6\xcf\xc0\x63\x69\xe2\x6e\x40\x1f\x33\x40\x50\x78\x06\xe1\
\x65\x9e\x9c\xc0\x5b\x79\x5f\x8b\x57\xc0\xf2\x40\x3c\x78\x20\xe5\
\xbb\x6d\x77\xc0\x62\xee\x0b\xdc\xbd\x3a\x54\xc0\x46\xd1\x54\x35\
\xf1\x1f\xf9\x40\x50\x2c\x8b\x71\x81\x27\x09\x40\x36\xbd\xcc\x75\
\x61\x96\xaf\x40\x53\xeb\xb9\x55\x81\xb9\x30\xc0\x64\x7c\xf0\xb3\
\x23\x22\x31\x40\x5a\x98\x3c\x0c\x99\x17\x47\xc0\x67\x9f\x25\xca\
\x5b\xaf\x44\x40\x69\x6a\x0f\xf7\x64\x34\xea\x40\x61\x88\x20\xfd\
\xed\x47\xed\x40\x5e\xdc\xfb\x5a\xb6\x86\x41\x40\x60\xa1\xe2\xe3\
\xb9\x81\xe1\x40\x6b\xea\x34\x79\xf8\x18\x55\x00\x00\x00\x10\x40\
\x38\x4c\xc8\xc0\x4e\x4f\x3d\xc0\x62\x97\x20\xa0\x6c\x9c\x10\x40\
\x3a\x05\x5c\x88\x8e\x66\x1c\xc0\x58\xb0\x26\xad\x5b\x68\x24\xc0\
\x40\xc3\x2a\xaf\x63\x02\x46\xc0\x4e\x4e\xf7\x54\x31\x68\xc3\x40\
\x50\x0a\x02\x24\x02\x43\xaf\xc0\x43\x9a\x74\x6e\x94\x89\x25\xc0\
\x3e\x83\xc3\xc4\xd7\xce\x09\x40\x45\x35\x58\x38\xff\x69\xa6\x40\
\x32\x53\x6b\x1b\x83\xc1\xd0\x40\x4a\x8f\x99\xe2\x3a\x5a\x44\xc0\
\x44\x7e\x8b\x1c\xc2\x70\xf4\xc0\x60\x5d\xd7\xa0\xd5\xe7\x48\x40\
\x22\xa3\x03\xc2\x30\x23\x5c\xc0\x63\x69\xe2\x6d\xee\x25\xab\x40\
\x50\x78\x06\xe3\x36\xdb\x68\xc0\x5b\x79\x5f\x90\x4d\x36\x90\x40\
\x3c\x78\x20\xdf\x1f\xa6\xa9\xc0\x62\xee\x0b\xdd\xa8\x15\x8e\xc0\
\x46\xd1\x54\x34\xdc\x9c\x16\x40\x50\x2c\x8b\x60\x99\x08\x6c\x40\
\x36\xbd\xcc\x4f\x3b\xe3\xde\x40\x53\xeb\xb9\x5f\x13\x4e\xf5\xc0\
\x64\x7c\xf0\xaf\xc2\x25\x0e\x40\x5a\x98\x3c\x0c\x1d\xa8\x48\xc0\
\x67\x9f\x25\xca\x16\x7f\xd6\x40\x69\x6a\x0f\xf6\x58\xaf\x08\x40\
\x61\x88\x20\xfb\x20\xf3\x53\x40\x5e\xdc\xfb\x59\x7a\x65\xa7\x40\
\x60\xa1\xe2\xe3\xc1\x23\x31\x40\x6b\xea\x34\x79\x8e\x30\xd7\x00\
\x00\x00\x10\x40\x38\x4c\xc8\xc0\x24\xc9\x42\xc0\x62\x97\x20\xa1\
\x03\x51\x04\x40\x3a\x05\x5c\x8e\xb1\x4a\x18\xc0\x58\xb0\x26\xad\
\xfe\xf0\x2a\xc0\x40\xc3\x2a\xae\x96\x4c\x84\xc0\x4e\x4e\xf7\x59\
\x92\xb9\xe9\x40\x50\x0a\x02\x23\xd7\x61\x93\xc0\x43\x9a\x74\x6b\
\xb2\x17\xa4\xc0\x3e\x83\xc3\xce\xab\xca\x54\x40\x45\x35\x58\x3d\
\x81\x3f\x26\x40\x32\x53\x6b\x16\x58\xd6\x2c\x40\x4a\x8f\x99\xdd\
\x2e\xc0\x07\xc0\x44\x7e\x8b\x1e\x3a\xf7\xb4\xc0\x60\x5d\xd7\xa2\
\x25\xe0\x7c\x40\x22\xa3\x03\xc5\xa9\xdf\x80\xc0\x63\x69\xe2\x6e\
\x09\x17\x9f\x40\x50\x78\x06\xe2\x9d\xee\x8d\xc0\x5b\x79\x5f\x8e\
\xab\xef\xf2\x40\x3c\x78\x20\xe1\x4b\xbe\x79\xc0\x62\xee\x0b\xdd\
\x5a\xe2\xd6\xc0\x46\xd1\x54\x35\x37\x7f\x10\x40\x50\x2c\x8b\x66\
\x27\xb0\x54\x40\x36\xbd\xcc\x5b\xc5\xe0\xc4\x40\x53\xeb\xb9\x5b\
\xee\x21\x36\xc0\x64\x7c\xf0\xb0\xde\x77\x49\x40\x5a\x98\x3c\x0c\
\x46\x3a\xd9\xc0\x67\x9f\x25\xca\x2d\x3d\x92\x40\x69\x6a\x0f\xf6\
\xb0\x9e\x4b\x40\x61\x88\x20\xfc\x0c\x68\x9b\x40\x5e\xdc\xfb\x59\
\xe2\x4e\xeb\x40\x60\xa1\xe2\xe3\xbe\xa1\x23\x40\x6b\xea\x34\x79\
\xb1\x00\x66\x00\x00\x00\x10\x40\x38\x4c\xc8\xbf\xee\x70\x14\xc0\
\x62\x97\x20\xa1\xc8\x92\x26\x40\x3a\x05\x5c\x96\xb9\x6c\xbd\xc0\
\x58\xb0\x26\xae\xd4\xfb\x26\xc0\x40\xc3\x2a\xad\x8a\x57\xfb\xc0\
\x4e\x4e\xf7\x60\x9d\x78\x70\x40\x50\x0a\x02\x23\x9f\x41\xd1\xc0\
\x43\x9a\x74\x67\xeb\x91\x0b\xc0\x3e\x83\xc3\xdb\x88\xea\x29\x40\
\x45\x35\x58\x43\x67\x7a\x47\x40\x32\x53\x6b\x0f\x95\x49\xb5\x40\
\x4a\x8f\x99\xd6\x94\x32\x19\xc0\x44\x7e\x8b\x20\x27\xcb\x9d\xc0\
\x60\x5d\xd7\xa3\xdd\xa0\x33\x40\x22\xa3\x03\xca\x36\x6d\xdb\xc0\
\x63\x69\xe2\x6e\x2c\x5c\x37\x40\x50\x78\x06\xe1\xd5\xc5\xef\xc0\
\x5b\x79\x5f\x8c\x89\xc7\x6e\x40\x3c\x78\x20\xe4\x23\x98\x6b\xc0\
\x62\xee\x0b\xdc\xf5\xd8\x14\xc0\x46\xd1\x54\x35\xae\x76\x39\x40\
\x50\x2c\x8b\x6d\x6d\xc6\x31\x40\x36\xbd\xcc\x6c\x2f\x5e\xd3\x40\
\x53\xeb\xb9\x57\xd0\x3e\xef\xc0\x64\x7c\xf0\xb2\x52\x9c\x24\x40\
\x5a\x98\x3c\x0c\x7b\x55\xad\xc0\x67\x9f\x25\xca\x4b\x01\x8e\x40\
\x69\x6a\x0f\xf7\x23\xb6\xf6\x40\x61\x88\x20\xfd\x40\x98\x5e\x40\
\x5e\xdc\xfb\x5a\x6a\x50\xcc\x40\x60\xa1\xe2\xe3\xbb\x58\xc4\x40\
\x6b\xea\x34\x79\xde\x90\x91\x00\x00\x00\x10\x40\x38\x4c\xc8\xbf\
\xdd\x05\x9e\xc0\x62\x97\x20\xa2\x07\xc7\xe2\x40\x3a\x05\x5c\x99\
\x4c\x59\xcf\xc0\x58\xb0\x26\xaf\x19\x92\x90\xc0\x40\xc3\x2a\xad\
\x34\x79\x79\xc0\x4e\x4e\xf7\x62\xdf\x2a\xa1\x40\x50\x0a\x02\x23\
\x8d\x45\xef\xc0\x43\x9a\x74\x66\xb5\xd7\xe9\xc0\x3e\x83\xc3\xdf\
\xa8\x34\xb2\x40\x45\x35\x58\x45\x4b\x6f\x42\x40\x32\x53\x6b\x0d\
\x6a\x68\xe4\x40\x4a\x8f\x99\xd4\x76\x73\x36\xc0\x44\x7e\x8b\x20\
\xc5\xb9\x45\xc0\x60\x5d\xd7\xa4\x6a\x8b\x7a\x40\x22\xa3\x03\xcb\
\xab\x9d\x74\xc0\x63\x69\xe2\x6e\x37\xa9\x75\x40\x50\x78\x06\xe1\
\x95\xa1\xdb\xc0\x5b\x79\x5f\x8b\xda\xc3\x2a\x40\x3c\x78\x20\xe5\
\x0c\xd5\xed\xc0\x62\xee\x0b\xdc\xd5\x77\x11\xc0\x46\xd1\x54\x35\
\xd4\x96\x1e\x40\x50\x2c\x8b\x6f\xc2\x7c\x13\x40\x36\xbd\xcc\x71\
\x71\xc0\xb2\x40\x53\xeb\xb9\x56\x7e\x86\x5a\xc0\x64\x7c\xf0\xb2\
\xc9\xdd\x72\x40\x5a\x98\x3c\x0c\x8c\x5a\x33\xc0\x67\x9f\x25\xca\
\x54\x8b\x69\x40\x69\x6a\x0f\xf7\x48\x99\x09\x40\x61\x88\x20\xfd\
\xa3\x5a\xc4\x40\x5e\xdc\xfb\x5a\x95\xe6\x4c\x40\x60\xa1\xe2\xe3\
\xba\x4b\x77\x40\x6b\xea\x34\x79\xed\x2a\x63\x00\x00\x00\x10\x40\
\x38\x4c\xc8\xbf\xa9\x3c\xf7\xc0\x62\x97\x20\xa2\xc3\xba\x2f\x40\
\x3a\x05\x5c\xa0\xf4\xb7\xf1\xc0\x58\xb0\x26\xaf\xe5\x96\xe4\xc0\
\x40\xc3\x2a\xac\x34\xe6\xf4\xc0\x4e\x4e\xf7\x69\x95\xca\xcd\x40\
\x50\x0a\x02\x23\x57\xdb\x46\xc0\x43\x9a\x74\x63\x1c\x4c\x49\xc0\
\x3e\x83\xc3\xeb\xeb\x58\x7a\x40\x45\x35\x58\x4a\xeb\x0b\xde\x40\
\x32\x53\x6b\x06\xf7\xe1\xda\x40\x4a\x8f\x99\xce\x2a\xed\x2d\xc0\
\x44\x7e\x8b\x22\x9b\x7b\xcb\xc0\x60\x5d\xd7\xa6\x0d\xc7\x6c\x40\
\x22\xa3\x03\xd0\x01\xd2\x5f\xc0\x63\x69\xe2\x6e\x59\x48\xae\x40\
\x50\x78\x06\xe0\xd6\xe9\xc6\xc0\x5b\x79\x5f\x89\xd1\xfd\x18\x40\
\x3c\x78\x20\xe7\xc2\xd5\xed\xc0\x62\xee\x0b\xdc\x75\x1f\x54\xc0\
\x46\xd1\x54\x36\x45\xdf\xf2\x40\x50\x2c\x8b\x76\xb1\x88\xd7\x40\
\x36\xbd\xcc\x81\x16\xe5\x42\x40\x53\xeb\xb9\x52\x91\xe6\x77\xc0\
\x64\x7c\xf0\xb4\x2c\x9e\xd1\x40\x5a\x98\x3c\x0c\xbe\x99\x62\xc0\
\x67\x9f\x25\xca\x70\xda\x68\x40\x69\x6a\x0f\xf7\xb6\x0f\x69\x40\
\x61\x88\x20\xfe\xc9\x33\xea\x40\x5e\xdc\xfb\x5b\x17\x3b\x26\x40\
\x60\xa1\xe2\xe3\xb7\x2f\x1f\x40\x6b\xea\x34\x7a\x18\x57\x7f\x00\
\x00\x00\x10\x40\x39\x3a\xfb\x80\x37\xf3\x20\xc0\x61\xfc\xcb\xa3\
\xcd\x26\xb3\x40\x36\xe1\xc0\x3b\x54\x6a\xc7\xc0\x57\x7d\x1f\x83\
\x86\xf3\x1c\xc0\x42\x8a\xc6\x53\xae\xe3\x90\xc0\x4c\x3d\xf4\x90\
\xee\x6c\x3f\x40\x4e\x2d\x2a\x66\xb7\xf4\x4a\xc0\x40\xfe\x98\xb5\
\x59\x0c\x29\xc0\x41\x9c\x26\x3c\x27\x7a\x60\x40\x47\x49\xd1\x47\
\xd2\xac\x49\x40\x2a\xff\x46\x2d\x2a\xf5\xdd\x40\x4c\xe9\x81\x90\
\xd0\x9c\x7c\xc0\x43\xec\x12\xf4\xe3\xc3\x79\xc0\x5f\xc9\x03\x0f\
\xff\x7b\x76\x40\x21\x4c\x2e\x53\x8d\xf5\xb6\xc0\x63\x5e\x70\x2e\
\xf1\x12\x4a\x40\x60\x35\xaf\x9d\x23\xb9\xb3\xc0\x47\xf5\xbd\xee\
\x88\x2b\x80\x40\x5f\x27\xde\x05\x0a\xe0\xcd\xc0\x59\xe5\xec\x76\
\x6e\x20\x9e\xc0\x49\x4b\x2f\xeb\x9c\x1a\x49\x40\x51\x2b\xee\x40\
\x71\xae\x98\x40\x31\x9e\x54\x8b\xa5\x0f\x9e\x40\x55\x1b\xb6\xcb\
\xa1\xcb\x36\xc0\x65\x42\x19\x74\x05\x9a\xf9\x40\x5a\xb0\xf1\x49\
\x6b\xdf\x29\xc0\x67\xab\x60\x4e\xa0\x19\x85\x40\x69\xa0\x71\xdc\
\x2e\x28\xe1\x40\x61\x1f\xc5\xf2\x50\xd7\x8a\x40\x5e\xb5\x77\xa5\
\x7e\x2d\xd8\x40\x60\xa2\xa4\x00\xc9\xca\x1b\x40\x6b\xdc\x49\x66\
\x6f\x08\x2d\x00\x00\x00\x10\x40\x39\x6d\xdd\x13\x57\xa1\xd5\xc0\
\x61\x88\xae\xf7\xb1\x07\xb9\x40\x3d\xcf\xf4\x8d\x6a\x17\x82\xc0\
\x56\x9d\xa9\x7a\xdd\x4a\xee\xc0\x3f\xd1\x0f\xe0\xc8\xca\x1b\xc0\
\x4b\xe5\xb8\x99\x8a\x88\x81\x40\x50\x1e\xc0\x45\x2f\x34\x10\xc0\
\x3c\xd5\xaf\x28\x25\x54\xfa\xc0\x41\x59\xfd\x10\xd6\x49\xbe\x40\
\x47\x9e\x2c\x2b\x96\x3e\x8e\x40\x2a\xe2\xd5\x8c\x54\xd6\x15\x40\
\x4e\x5c\xbb\xef\xb1\x92\x66\xc0\x43\xbf\xfb\xa6\x99\x4b\x21\xc0\
\x5f\x85\x8a\xa8\x9e\x7f\x25\x40\x20\xe3\xcd\x5f\xf2\x68\xe2\xc0\
\x63\x5a\x9f\x77\xe2\x62\x79\x40\x60\xd6\xe3\xac\xcc\x5f\x90\xc0\
\x34\xa5\x13\xa5\x48\xae\xc2\x40\x64\xe3\x80\x6b\x21\x27\x96\xc0\
\x50\x8d\x0b\x23\xa9\xc3\xe4\xc0\x49\x89\xcb\x5b\xbe\x67\xd3\x40\
\x51\x26\xf3\x40\xd2\x0d\xc1\x40\x30\x53\x3d\xec\x2b\xcb\xe3\x40\
\x55\xdf\xb6\x87\x83\x72\xc9\xc0\x65\x50\x90\x7a\x79\xec\x90\x40\
\x5a\xb3\x5d\xfd\x75\x1b\xbb\xc0\x67\xac\x23\x85\x4c\x3e\xc1\x40\
\x69\xa4\x50\xfd\xc1\x1a\xbe\x40\x61\x15\x08\x2c\x04\x80\x35\x40\
\x5e\xb2\x16\xd8\x69\xeb\x99\x40\x60\xa2\xb0\x72\x32\x30\x48\x40\
\x6b\xdb\x00\x4c\xb4\x72\x34\x00\x00\x00\x10\x40\x39\x79\xe6\xd8\
\x96\x75\x3a\xc0\x61\xc0\x16\x90\x87\x0e\xed\x40\x38\x8f\xec\xd9\
\xc7\x51\xb7\xc0\x57\x00\xb8\x99\x9b\x0c\x77\xc0\x42\x03\xf3\x87\
\x9c\x0b\x95\xc0\x4b\xc8\x06\x2f\x17\xcb\x6f\x40\x4e\x81\x5b\x7b\
\x07\x5f\xac\xc0\x3f\x6b\x1b\x49\x9f\xef\xa8\xc0\x41\xf5\x1a\xbf\
\x81\x76\xdc\x40\x47\xc1\xb0\x33\x3e\x15\x87\x40\x29\x35\xe9\x42\
\xe0\x6f\x26\x40\x4d\xcb\x37\x6d\xa6\xc4\xbd\xc0\x43\xc2\xd2\x31\
\x5b\xf1\xae\xc0\x5f\x8b\xf9\x65\x4b\xb3\xb9\x40\x20\xf2\x38\xea\
\x61\xb5\x68\xc0\x63\x5b\x25\xb1\x29\x73\x44\x40\x60\x74\xb0\xf1\
\xaf\x14\x87\xc0\x3c\x2b\x5b\x9f\xd5\xcc\x3a\x40\x5f\xd1\xff\x1a\
\xb3\xb4\x69\xc0\x54\xf9\x9f\x72\x33\xf9\x81\xc0\x49\xd3\xfa\x57\
\xad\xf2\x84\x40\x51\x56\xe6\x30\xad\x36\xf7\x40\x30\x45\x7a\x8e\
\xd7\x55\xf9\x40\x55\x90\xcf\xcc\x04\x2a\x2d\xc0\x65\x6a\x17\x30\
\x58\x3c\xd3\x40\x5a\xb7\x3e\x31\xfb\xc0\xbc\xc0\x67\xad\x5a\x2e\
\xcc\x97\xbb\x40\x69\xaa\xd2\x12\x76\xcf\xc6\x40\x61\x08\x27\xfd\
\xad\x8d\xa9\x40\x5e\xad\xed\x6b\x24\xb9\x5c\x40\x60\xa2\xbe\x9a\
\x9d\xff\x01\x40\x6b\xd9\x5b\x1b\x78\x99\x99\x00\x00\x00\x10\x40\
\x3a\x1c\xba\xdc\xfa\x1a\x5d\xc0\x60\xf5\xc8\x41\xf4\x13\xe6\x40\
\x41\xfa\x03\xbb\xf4\x1d\x4a\xc0\x55\xaa\x26\xe9\xec\x4c\xf0\xc0\
\x3b\x21\x4c\xc6\x54\x3f\x5c\xc0\x4b\x5b\x50\x0c\x7c\x8f\x98\x40\
\x50\xf9\x7f\xcb\x19\x0b\xae\xc0\x37\x93\x4b\x4f\x3b\xf3\x58\xc0\
\x41\x41\x68\xf8\x0c\x13\x54\x40\x48\x0c\x99\x85\x2c\x7f\x15\x40\
\x2a\x01\x64\x6f\x7e\xaa\x70\x40\x4f\xd5\xa4\x26\x6b\xe0\x61\xc0\
\x43\x74\x8b\x1b\xb1\x98\xf2\xc0\x5f\x1b\xe4\x8e\x02\x7c\x6d\x40\
\x20\x42\xe5\x35\xba\xbe\x4c\xc0\x63\x54\x64\xe8\xa8\x03\x5e\x40\
\x61\x53\x48\x12\x1c\xc4\xf0\xc0\x38\x89\xd7\xe7\xcb\xc4\x43\x40\
\x65\x76\x6e\x3f\xaf\x20\x22\xc0\x51\x65\xa7\xda\xaa\x26\x75\xc0\
\x49\xe6\x46\x82\xd0\x24\xb9\x40\x51\x2f\x67\xdc\xf2\x10\xf5\x40\
\x2d\x70\x6e\xe1\xf3\xcb\xfa\x40\x56\xa2\x91\x0e\xe0\x3f\x9d\xc0\
\x65\x68\x72\x72\x08\xfb\x25\x40\x5a\xb7\x12\xd7\xa9\xc0\x26\xc0\
\x67\xad\x49\xf8\x95\x22\x31\x40\x69\xaa\x71\xed\xc5\x2f\x89\x40\
\x60\xfd\xd6\x05\x90\x17\x3d\x40\x5e\xab\x02\x86\x36\xeb\xc5\x40\
\x60\xa2\xc7\x56\x35\x46\xe9\x40\x6b\xd8\x35\x4f\xe4\xc4\x52\x00\
\x00\x00\x10\x40\x3a\x22\xa0\xc0\xc8\x8c\x68\xc0\x61\x7a\xe1\x71\
\x57\xe2\xd0\x40\x37\x88\x57\x2b\xb4\x27\x65\xc0\x56\x7a\x19\x92\
\x2d\x24\x6c\xc0\x42\xaa\x2f\xed\x9b\x21\x34\xc0\x4a\xf4\xa5\x85\
\xef\xfb\x8d\x40\x4d\xc3\xc6\xac\x45\x1a\x79\xc0\x3d\x0b\xa3\xc1\
\xb3\x70\xbf\xc0\x42\xfd\x76\x03\x11\x38\x1a\x40\x48\x95\x81\x53\
\xec\x54\xe6\x40\x24\xe6\xbc\x4c\xf9\xb4\xf7\x40\x4e\xcc\x4f\xe2\
\x80\x6a\x4e\xc0\x43\x7b\x0d\x6e\x7d\x0a\xc0\xc0\x5f\x26\x9b\xe0\
\x90\x2d\x70\x40\x20\x57\xdc\x86\xe0\xd3\xec\xc0\x63\x55\x29\x47\
\x39\xb3\x08\x40\x60\x47\xa0\x6e\xe2\x7b\xe5\xc0\x3d\x8e\x03\x79\
\xb3\xc5\xd8\x40\x59\xf4\xd9\x6e\xa3\x7d\xca\xc0\x53\xb4\xae\x8b\
\xb8\xe6\x2d\xc0\x4a\xf0\xb2\x63\x73\x25\x8f\x40\x51\xb9\x89\xae\
\x4e\xeb\xcd\x40\x2b\xd9\x2c\xa9\xd0\xb1\x70\x40\x56\x13\x03\x84\
\xe7\x17\x50\xc0\x65\xbe\x60\xdc\xb9\x87\x4b\x40\x5a\xc4\x9a\x9e\
\x26\x0d\xb2\xc0\x67\xb0\xd3\x88\x9b\xee\x09\x40\x69\xbf\x4a\x43\
\x04\x51\xc2\x40\x60\xd2\x70\x29\x96\x40\xc4\x40\x5e\x9e\x5d\xed\
\x9c\x03\x41\x40\x60\xa2\xe4\x5f\xbf\xef\x51\x40\x6b\xd2\xd4\x01\
\xf5\xda\x8f\x00\x00\x00\x10\x40\x39\x93\x07\x18\x7c\x00\x57\xc0\
\x61\xed\x38\x48\x04\xf6\x05\x40\x39\xce\xd4\x3d\x16\x4f\xe1\xc0\
\x57\x5a\x79\x80\xd1\xee\x90\xc0\x41\x5a\xb0\x5b\xd9\x84\xa7\xc0\
\x4c\x6b\x4b\xa6\xc5\x05\x67\x40\x4f\x31\x11\x79\x69\xca\xc4\xc0\
\x40\x72\xd8\x87\x3f\x4a\xdb\xc0\x41\x30\x33\x06\x0e\xbd\x6d\x40\
\x47\x1e\x92\xf6\x5f\x87\x9d\x40\x2c\x56\xb7\x92\x4c\x0f\x54\x40\
\x4d\x1a\xcc\x86\x22\xf3\x62\xc0\x43\xed\x68\x86\x6d\xf1\xf7\xc0\
\x5f\xd7\x21\x49\x55\x4f\x8d\x40\x21\x78\xc7\xca\xdd\xae\x33\xc0\
\x63\x60\x08\x68\x37\x94\x7d\x40\x55\x61\x16\x46\xec\xb7\xbc\xc0\
\x58\xed\xdd\x9f\xdd\x31\xf6\x40\x4d\x34\xd5\x5d\xce\x73\x18\xc0\
\x62\x93\x74\x7f\x87\x8c\xe0\xc0\x49\x09\x16\x63\x43\xbe\xc4\x40\
\x51\x07\x88\xc1\x11\x6b\x6e\x40\x31\xe4\xaf\x64\x08\xd2\xab\x40\
\x55\x38\x17\x72\x80\xb5\xac\xc0\x65\x2c\x52\x57\x7f\x38\xd6\x40\
\x5a\xad\x44\x85\x31\xfe\xd5\xc0\x67\xaa\x28\xdf\x87\x91\xd0\x40\
\x69\x9a\x80\x20\x4d\x49\x84\x40\x61\x2c\x4a\x92\x0c\x64\x0a\x40\
\x5e\xba\x92\xb8\xd5\x32\x10\x40\x60\xa2\x8e\x79\x47\xf5\xf0\x40\
\x6b\xde\x52\x7b\xcd\xff\x76\x00\x00\x00\x10\x40\x39\xa0\x87\x98\
\xb8\xe0\xec\xc0\x62\x2e\x9b\xf4\x0c\x9a\x1f\x40\x38\x3b\x99\x1e\
\x7f\x21\x57\xc0\x57\xde\x76\x78\x0a\x75\x02\xc0\x41\xdb\xc9\xce\
\x7f\xdb\x4c\xc0\x4c\xfd\x56\x17\x6b\xc4\x02\x40\x4e\xdd\x63\x67\
\xe8\x83\x36\xc0\x41\xc3\x56\x92\x55\x6f\xcc\xc0\x40\xe7\x7e\x05\
\xfc\x43\x82\x40\x46\x8a\x57\x00\x41\xd6\xd1\x40\x2d\xd4\x62\x54\
\xe0\x04\x59\x40\x4c\x27\x56\xc2\xcd\x8b\x08\xc0\x42\x5a\xf0\x4d\
\x5a\xcb\xe6\xc0\x60\x15\xda\x3c\x46\xaf\x80\x40\x2e\xe6\x21\xc4\
\x8c\x1a\x04\xc0\x62\x99\x50\xe9\xf4\x73\xa0\x40\x50\x8a\xee\x4b\
\xfe\x05\x2c\xc0\x5a\x86\x37\xad\x7c\x15\x19\x40\x3e\x0c\x8f\x47\
\x89\x42\x31\xc0\x62\x98\x5b\xa9\x88\xc3\x3a\xc0\x48\x95\x4f\x39\
\x7f\x7d\xf0\x40\x50\xcc\x9d\xf9\xdf\xe9\xd6\x40\x33\x0b\xd4\x0c\
\x5e\xaf\x8e\x40\x54\xba\x84\x35\x41\xd8\x1e\xc0\x65\x06\xae\x6d\
\x56\x5a\x3b\x40\x5a\xa7\x7d\x7e\x97\xc5\xb4\xc0\x67\xa7\xfe\xa3\
\xda\x60\x69\x40\x69\x90\x32\x32\xe0\xf7\x43\x40\x61\x3b\xaa\x06\
\x78\xb7\xc1\x40\x5e\xc0\x47\x72\x5e\x23\xb6\x40\x60\xa2\x75\x46\
\x84\x56\xc3\x40\x6b\xe0\x78\xd5\xcc\x8c\x07\
\x00\x00\x09\x2c\
\xff\
\xff\xff\xff\x00\x00\x00\x09\x00\x00\x00\x10\xc0\x36\xc1\x31\xf7\
\x71\x07\xcd\xc0\x62\xc4\x31\x65\x15\x96\x34\xc0\x22\x59\x75\x1b\
\x5e\x3f\xf0\xc0\x59\x80\x9a\x15\x13\xe9\x09\xc0\x47\x52\xc9\x1c\
\x2f\x1a\x7a\xc0\x45\x06\x29\x2a\x67\x31\x79\x40\x49\x64\xe4\x36\
\x8c\xfe\x06\xc0\x50\x22\x71\x64\xe5\x3f\xcb\x3f\xc9\x7e\x46\xc2\
\x82\x74\x41\x40\x48\xe2\x7c\x67\xa7\x02\xf2\x40\x48\x75\xb8\xda\
\x6c\xe4\xac\x40\x43\x43\x8c\xb1\x63\x97\xf6\xc0\x59\x94\x32\xd7\
\x73\x76\xf3\x3f\xf8\xd1\x11\x6b\x2c\x82\x25\xc0\x5f\x82\xbf\x09\
\xb5\x16\xad\x40\x4a\x15\x6d\x1b\x87\x1c\x19\x40\x55\x32\x73\xaa\
\x94\xec\x77\xc0\x04\x4e\x49\x68\xe6\x95\xd3\x40\x60\xe5\xde\x62\
\x15\x6c\x95\xc0\x3a\xbb\x5f\xa9\x86\x06\x7e\xc0\x0f\x68\xca\xdb\
\xdc\xdd\x36\x40\x53\x18\x3a\x74\x13\x56\x51\x40\x50\x12\x0e\xb5\
\x6f\x61\x5c\x40\x4e\x52\x58\x0d\xd3\x0c\x6d\xc0\x38\xbe\x69\x34\
\x34\x0d\xd7\x40\x69\x01\x3e\xaa\x3a\x4e\xf3\xc0\x3d\xff\xa9\xa5\
\x4e\x81\xa7\x40\x72\xc0\x69\xcc\x0a\xb2\x0e\x40\x3f\xa2\xaf\xee\
\xb2\x7a\xa3\x40\x66\xb7\x06\x0c\x87\xf6\x2d\x40\x3d\xae\x9a\x40\
\x3c\x67\x56\x40\x71\x9d\x34\xa4\xbb\x47\x7d\x00\x00\x00\x10\xc0\
\x30\xaf\xc1\x69\x97\x5a\x95\xc0\x63\x49\x63\x4e\x8e\x9f\xdc\xc0\
\x1e\xaf\xb7\x04\x39\xf0\x0c\xc0\x5a\x46\x95\xdc\xfb\x24\x8e\xc0\
\x49\x9c\xd2\x5c\x93\x47\x4c\xc0\x48\xb5\xcf\xe8\x99\x9c\xdd\x40\
\x48\x02\x8f\xa5\xd6\xdc\x31\xc0\x4e\xc7\x1a\x4e\x1b\x4a\x1c\xc0\
\x2c\x94\xbf\x13\x77\x29\x41\x40\x47\x68\xf0\xdd\xb4\xba\x6a\x40\
\x41\xac\x5d\x06\x4f\x13\x18\x40\x44\x62\x29\x5f\x79\x2c\x69\xc0\
\x58\x78\xf9\x10\x42\x39\x8c\x40\x0d\x9e\x34\x27\x91\xbd\x70\xc0\
\x63\x03\xb5\x2e\x0b\xf6\xb5\x40\x31\x49\xc5\x70\x34\x57\xdc\x40\
\x55\x55\x0b\xb2\x96\x65\xb2\xbf\xf7\xd0\xc3\xa3\x37\x60\x82\x40\
\x60\xe6\x74\x90\x7c\xd3\xd5\xc0\x3a\xbd\xb7\xf4\x49\xd6\x6d\xc0\
\x35\x33\x14\x59\x01\x4b\xa9\x40\x52\x36\xaa\xf8\xcd\xd9\x51\x40\
\x48\x24\x95\xbf\x77\x92\xaf\x40\x50\x19\x5d\x72\x39\xa4\xee\xc0\
\x38\x3c\x43\x48\x09\x4c\x70\x40\x68\xc7\x21\x6e\x01\xfb\xd3\xc0\
\x3d\xe6\x09\x44\x84\xd2\x4f\x40\x72\xa2\xff\xdd\x04\x92\x32\x40\
\x3d\x30\xda\x3a\xb4\x2e\xfe\x40\x67\x8a\xdf\x98\x83\xb8\xf6\x40\
\x3d\xa4\x89\x7f\x23\xcd\xd5\x40\x72\x07\x6b\x4d\x22\x90\x7e\x00\
\x00\x00\x10\x3f\xe8\xdc\xd8\x6c\x4e\xe2\x7a\xc0\x63\x6c\xe1\x17\
\xf2\xa0\xbf\xc0\x1c\xa0\x1f\x76\xa3\x67\x0e\xc0\x5a\x82\x4b\xa2\
\xc3\x07\x44\xc0\x4e\x6a\xa4\x92\xae\xdd\x89\xc0\x4e\x00\xb1\x06\
\xb8\xfd\xe4\x40\x43\x6f\xe2\x4f\x4e\xd0\x7e\xc0\x4a\x2d\xf6\xac\
\x0f\xbe\x52\xc0\x45\xc6\xbd\x2e\xcf\xf5\x06\x40\x44\xce\x84\x52\
\x4a\x5a\x37\x40\x19\x34\x31\x96\xfb\x44\x7f\x40\x46\xb7\xe1\xb0\
\xec\xbd\xa9\xc0\x58\xfd\xf7\xfd\x56\xdc\x11\xbf\xf1\xca\x67\x3c\
\x06\x01\x31\xc0\x63\x1e\x02\x33\x3c\x22\x49\xc0\x32\xfa\x9d\x13\
\x66\xaf\x41\x40\x54\x9e\xa9\xbe\x7c\x61\xe3\x40\x0a\x72\xb1\x55\
\x98\x28\x33\x40\x61\x32\xb3\x01\xb0\xbc\x2d\xc0\x18\x63\x1b\xa0\
\x7e\x49\xee\xc0\x4b\xb7\xac\x9e\xf7\xef\xea\x40\x50\x71\xa3\x37\
\xe4\x2e\xa7\x40\x2c\xb8\x60\xb8\xfe\xc6\xc2\x40\x51\xc8\x31\x75\
\xbd\x97\x6c\xc0\x3b\x25\x5d\xe3\x55\xb6\xc3\x40\x67\x7e\x28\x3a\
\x2f\xcf\x04\xc0\x3d\x79\xfb\x74\x2a\xf4\x86\x40\x72\x00\xa4\xae\
\xf2\x81\x95\x40\x3c\xfa\x8b\x66\xc9\x22\x30\x40\x68\x75\x84\x1e\
\x7d\x6d\xd4\x40\x3d\xb5\xf8\x8b\xe7\x75\x6d\x40\x72\x7c\xb6\xc6\
\x9a\xe9\x9a\x00\x00\x00\x10\x3f\xe4\x9d\xa9\x42\x5e\xe7\x43\xc0\
\x63\x56\x3a\x89\xf9\xf5\x03\xc0\x1b\xb9\xb9\x7a\xc0\xda\x37\xc0\
\x5a\x51\x6d\x68\xd6\x38\x91\xc0\x4f\xf0\xb8\xf8\x76\x60\x54\xc0\
\x4f\xad\xc6\x15\x51\x43\xe4\x40\x41\x7d\xdd\x1f\xa6\x7e\x97\xc0\
\x48\x29\x8c\x8c\x88\xc5\xd5\xc0\x4b\x19\x1c\xf8\xef\x04\xe9\x40\
\x43\xa1\x52\x38\x86\xcc\xe1\xc0\x13\x11\x86\xaf\xfa\x18\xd1\x40\
\x47\x64\x45\xcd\x84\x52\xf0\xc0\x5d\x6b\xc7\x4d\xed\xcd\x82\xc0\
\x31\x79\xa5\x95\x16\x3c\x6c\xc0\x64\x78\xba\xa8\x85\x58\x74\xc0\
\x48\x8e\x36\x95\x37\x4b\xa5\x40\x54\xcc\x34\x76\x69\xe8\x4e\x40\
\x0b\x3e\xdf\x5f\x9e\x8d\xa3\x40\x60\x65\x54\x53\xc4\x38\x71\x40\
\x40\x0c\xb3\x84\x9f\xd2\x9c\xc0\x50\xf6\x2c\x1c\x95\xbd\x9f\x40\
\x4f\x3b\xb0\x51\x91\x96\x07\x3f\xf5\xb6\xbb\x4c\x89\xfb\xa7\x40\
\x52\x40\x4d\xcb\x0f\x40\xae\xc0\x3e\x45\x9a\x76\x56\xfb\xac\x40\
\x66\xc3\x4b\x9c\xe2\xcb\x0b\xc0\x3d\x73\xb5\x3b\x21\x32\xb6\x40\
\x71\xa3\x97\xbc\x79\xdf\x84\x40\x3c\xd5\xf8\x27\x62\x64\x9d\x40\
\x68\x6b\x6d\x0c\x69\x1b\x7e\x40\x3d\xb5\x37\x81\x76\xb1\xee\x40\
\x72\x77\xa6\xa6\x21\xce\x5f\x00\x00\x00\x10\xbf\xe3\x54\xf9\xd1\
\xc3\xbb\x96\xc0\x62\x39\x5d\x48\x40\x62\x61\xc0\x13\xa4\x40\xfe\
\x23\x41\x66\xc0\x57\xfe\x9e\x7a\x5a\x8c\x65\xc0\x51\x53\x87\x2f\
\x98\x9f\x2f\xc0\x50\xb1\x13\x00\xa7\xbf\x57\x40\x38\x4d\x1d\xa7\
\x39\x9a\x03\xc0\x3f\x95\x6d\x6a\x72\xf6\x4d\xc0\x54\x45\x8b\x2f\
\x57\x8e\x27\x40\x41\xd1\x53\x04\xab\x6e\xf7\xc0\x41\x24\x47\xc5\
\x1a\x30\x9b\x40\x4a\x9d\x0a\xaa\x84\xfc\x0d\xc0\x61\x26\xda\xae\
\x5a\xbe\x0d\xc0\x47\x87\xad\x31\xe6\x56\x49\xc0\x65\xf1\xab\xa2\
\x9d\x71\x7a\xc0\x55\xef\x9f\x23\x26\xbf\xfd\x40\x44\x02\xa4\x34\
\x49\x99\xb4\x40\x42\xad\xc5\x39\xfc\xc3\xc9\x40\x54\x77\x1a\x63\
\x55\xdd\x55\x3f\xd2\x3b\x7b\x22\x6a\xd6\x1f\xc0\x58\xcf\x8d\xc1\
\xb5\x07\x54\x40\x4b\xc2\x62\x63\xab\x19\x62\xc0\x40\xdb\xfa\x46\
\x22\x87\x89\x40\x54\x09\x64\xf2\x9a\x80\x2a\xc0\x44\xeb\x29\x9a\
\x2f\x94\x49\x40\x64\xe0\x36\x35\x4c\x3c\x43\xc0\x3e\x7e\x4a\x3b\
\x7b\x99\xfd\x40\x70\xa7\xca\x20\xaa\x48\xd9\x40\x39\x90\x46\xa5\
\x32\x46\xfc\x40\x67\xd4\xc0\x6c\x37\x15\xaa\x40\x3d\x93\xe0\x30\
\xd5\xa0\xe6\x40\x72\x2b\x16\x2e\xda\x91\xd2\x00\x00\x00\x10\xbf\
\xd7\x9d\xe7\xa1\x97\x39\xc0\xc0\x62\xbc\xd5\xa4\x38\x2b\x33\xc0\
\x2b\x24\x07\xa2\xf7\xbc\x3a\xc0\x59\x6b\x39\x1d\x48\x8d\x2d\xc0\
\x50\xfc\x22\x5d\x98\xd8\xe0\xc0\x4c\x3b\x80\x46\xaf\xdc\x40\x40\
\x3f\xa3\xe1\x10\x37\xbc\x7c\xc0\x47\xa7\x33\xb0\x1d\xd6\x38\xc0\
\x4a\x1a\x00\x9f\xcb\xe1\x48\x40\x46\xb3\xbc\xc6\xce\x1c\xfe\xc0\
\x03\x4e\xc7\x08\x76\xc7\x1c\x40\x48\xfd\xf5\x33\x7e\x4b\x81\xc0\
\x60\x62\xd0\x37\xd0\x3e\x25\xc0\x38\xa5\xfa\x53\xfc\x7a\x7d\xc0\
\x64\x52\x8c\x8e\xde\xf0\x6d\xc0\x51\xb5\x7b\xf0\xe3\x85\x21\x40\
\x52\x61\x8e\x3b\xeb\xd1\xde\x40\x23\x54\x72\x4d\x7e\x66\xb6\x40\
\x5a\xc6\x30\x18\x78\x65\x5e\xc0\x41\x84\x35\xad\x81\xc8\x84\xc0\
\x50\x1c\xe1\x3c\xef\x41\x96\x40\x51\x58\x7f\x41\x27\xa7\x22\x40\
\x15\x04\xa4\x11\x8b\xa5\x4c\x40\x52\xf2\xe2\x10\x1d\xbe\x94\xc0\
\x3a\xad\xdb\xf6\xe0\xcd\x29\x40\x67\x9e\xa8\x6e\xa4\x7f\x43\xc0\
\x3d\xa4\xca\x4b\x1f\x0f\x46\x40\x72\x10\xa0\x16\xce\x0d\xd1\x40\
\x3d\xed\xb9\x7b\x1d\xf5\x8a\x40\x68\xd7\xc6\xd6\xaf\x31\x50\x40\
\x3d\xb4\xa6\x32\xb3\x72\x09\x40\x72\xad\xe2\x15\x3d\xe8\xd8\x00\
\x00\x00\x10\xc0\x34\xf7\x01\x4a\xe8\x31\x37\xc0\x63\x08\xe7\x3c\
\xd7\x52\x6e\xc0\x32\x87\x01\x16\xce\x22\x54\xc0\x59\x95\x8a\xc3\
\xc9\x1c\xb8\xc0\x51\x35\xc9\xdc\xb0\xdf\xb7\xc0\x4a\x53\xba\xaa\
\x59\xe9\x6d\x40\x3f\x27\xe3\x21\x77\x13\xbc\xc0\x4a\x03\x47\xa6\
\x3c\x27\xd1\xc0\x46\x3c\x28\x27\x7d\x72\x7b\x40\x47\xbf\xcc\xb2\
\x9b\x75\xf7\x40\x16\x1c\xbe\x7d\x45\x21\x89\x40\x47\xe9\x2b\xf5\
\x69\xa9\x7a\xc0\x5e\xfa\xe4\x52\xe2\xee\x3d\xc0\x20\x9f\xc3\x12\
\xc5\x03\x2a\xc0\x63\x2d\x67\xd4\x68\xd9\xd5\xc0\x4b\xe6\x04\xf7\
\xc6\xd3\xb2\x40\x56\x9a\x17\x7f\xf0\x8a\x61\xc0\x2a\xde\xdc\xce\
\x99\x86\xc2\x40\x5d\x35\xe2\x20\x89\xba\x10\xc0\x4f\x58\x95\x85\
\x01\x84\xd4\xc0\x4b\x51\x42\xfa\x2b\xa5\xef\x40\x52\x1b\x84\x34\
\xad\x36\xdf\x40\x2e\xba\x76\xa5\xb1\xb8\x45\x40\x52\x39\x02\x49\
\xaa\x30\x3e\xc0\x37\xb9\x6f\x87\xfd\xd0\x77\x40\x68\x3e\xb5\xb9\
\xe8\xcd\x8a\xc0\x3d\xdf\x2f\x40\x7a\xc8\xd0\x40\x72\x5e\x54\xd0\
\xa5\x34\x44\x40\x3d\x8f\xfd\xe1\x90\x52\x8a\x40\x68\xaf\x88\xff\
\xd8\xfc\x30\x40\x3d\xb4\x96\xe1\xc9\x47\x14\x40\x72\x99\xc3\xc3\
\x16\xad\x07\x00\x00\x00\x10\xc0\x44\x1f\xc5\xb0\x8d\xff\xf8\xc0\
\x62\xd7\xcc\xdb\x51\x37\xe9\xc0\x38\x70\x1f\x26\x3f\xdd\xd1\xc0\
\x59\xd3\xca\x12\xd1\x6d\x66\xc0\x50\x9b\x5f\xdf\x54\x98\x82\xc0\
\x47\x35\x38\xde\x3e\x59\xd1\x40\x40\x3a\x4b\xde\xb1\x11\x5e\xc0\
\x4e\xa8\xe3\x73\xc2\x1e\x04\xc0\x3a\xcd\xa4\x3c\x9c\xa3\x4a\x40\
\x48\x5e\xe9\xc8\x41\x4a\x06\x40\x36\xa3\x67\x5c\xdf\x00\x72\x40\
\x44\xa5\x11\xd4\x30\xbb\xbe\xc0\x58\xc8\x06\xa1\x52\x29\xa4\x40\
\x30\x47\xe1\xaa\x31\x64\x3e\xc0\x63\x5d\xb2\xe9\xcc\x7a\xe2\x40\
\x29\xe9\x00\x97\x5b\x70\xaa\x40\x58\x50\x0c\x6f\xef\x08\x06\xc0\
\x40\x80\xf5\x5f\x55\x35\x51\x40\x60\xea\x26\xcc\x19\xc3\x62\xc0\
\x52\x7c\x87\x94\x54\x11\xcd\xc0\x40\x7b\x99\x9a\x58\x7d\x4e\x40\
\x52\xbc\xf9\x47\x74\x9c\xd3\x40\x42\x20\x54\x4b\xe3\x87\xa0\x40\
\x50\x21\x47\x81\x50\x58\x25\xc0\x35\xd8\x1b\x0b\x45\x8c\xdd\x40\
\x68\xfb\x74\x42\x1b\x4c\x50\xc0\x3e\x4d\x9f\x09\xae\x3a\xcb\x40\
\x72\xb9\xff\x8f\xad\x92\x5a\x40\x3d\x6b\xf8\x3a\xfe\x5b\xcc\x40\
\x67\xb7\x76\x0a\x83\x1d\x2b\x40\x3d\xaf\x2e\x02\x24\x00\xa5\x40\
\x72\x1d\xb9\x4a\x83\x5a\x36\x00\x00\x00\x10\xc0\x56\x21\x7b\x1a\
\x2d\x27\x74\xc0\x5d\x7f\x81\xf7\x72\x97\xeb\xc0\x45\x6a\xce\x4b\
\x4a\x59\x0e\xc0\x58\x6b\x98\xca\x1a\xce\x5b\xc0\x50\x79\x21\x8d\
\x02\xed\x0a\xc0\x3e\xd5\xf2\x5c\x01\x40\xa0\x40\x38\x02\xd4\x2c\
\x79\x7b\xb7\xc0\x52\xa7\xde\x66\xb0\xaa\x42\x3f\xd7\xb5\xe2\x7b\
\x89\xa4\xca\x40\x48\x0f\x83\x05\x1a\x5b\x33\x40\x46\xa9\x42\x70\
\x82\xf2\xfc\x40\x3a\x3a\x42\xb9\x85\x37\x29\xc0\x54\xe2\xc5\x27\
\x1d\x51\x6a\x40\x42\xd1\x7b\x2a\xa1\x08\x15\xc0\x61\x0b\x0a\x8b\
\x23\x7a\xc0\x40\x33\x47\x9a\x49\x82\xe6\x81\x40\x57\xac\xeb\x9b\
\x79\x30\xfa\xc0\x53\x13\x15\xe5\x3e\x1d\x05\x40\x60\x43\xab\x51\
\xbf\x87\xef\xc0\x5d\xe3\x5c\x69\xf3\x2d\x89\x40\x02\x9d\x80\x74\
\x60\xb9\xea\x40\x52\xbe\x74\x1e\x0d\x17\x2d\x40\x50\x50\x9b\xe2\
\xa5\xba\x4e\x40\x46\x29\x92\xd8\xeb\x51\xb2\xc0\x32\x67\x07\xb9\
\x5f\xcb\x99\x40\x68\xd4\xcc\x9f\x63\x2f\x21\xc0\x3e\x08\x79\x67\
\x07\x43\x5b\x40\x72\xa1\x8d\xaf\x2d\x86\xe8\x40\x43\x38\x54\x39\
\x89\x5f\xf6\x40\x64\xda\x52\xe8\xbe\xbd\x10\x40\x3e\xaa\x65\xda\
\xb3\xd9\xcf\x40\x70\xaa\x52\xf4\x99\xff\x0c\
"
qt_resource_name = b"\
\x00\x0a\
\x03\x88\x10\x53\
\x00\x61\
\x00\x6e\x00\x69\x00\x6d\x00\x61\x00\x74\x00\x69\x00\x6f\x00\x6e\x00\x73\
\x00\x07\
\x01\xc4\x70\xa7\
\x00\x6a\
\x00\x75\x00\x6d\x00\x70\x00\x69\x00\x6e\x00\x67\
\x00\x04\
\x00\x06\xab\x74\
\x00\x64\
\x00\x65\x00\x61\x00\x64\
\x00\x08\
\x0f\x03\x25\x67\
\x00\x63\
\x00\x68\x00\x69\x00\x6c\x00\x6c\x00\x69\x00\x6e\x00\x67\
\x00\x07\
\x0a\x84\xa0\x87\
\x00\x64\
\x00\x61\x00\x6e\x00\x63\x00\x69\x00\x6e\x00\x67\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x02\
\x00\x00\x00\x2e\x00\x00\x00\x00\x00\x01\x00\x00\x05\x20\
\x00\x00\x00\x1a\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\x52\x00\x00\x00\x00\x00\x01\x00\x00\x1f\xa0\
\x00\x00\x00\x3c\x00\x00\x00\x00\x00\x01\x00\x00\x06\x30\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| gpl-2.0 |
ctsit/nacculator | nacc/uds3/np/forms.py | 1 | 32292 | ###############################################################################
# Copyright 2015-2020 University of Florida. All rights reserved.
# This file is part of UF CTS-IT's NACCulator project.
# Use of this source code is governed by the license found in the LICENSE file.
###############################################################################
import nacc.uds3
### BEGIN non-generated code
# WARNING: When generating new forms, do not overwrite this section
from datetime import date
# WARNING: When generating new forms, use CURRENT_YEAR instead of "2014"
# WARNING: When generating new forms, use CURRENT_YEAR-15 instead of "1999"
CURRENT_YEAR = date.today().year
### END non-generated code
def header_fields():
fields = {}
fields['FORMVER'] = nacc.uds3.Field(name='FORMVER', typename='Num', position=(906, 907), length=2, inclusive_range=(10, 10), allowable_values=[], blanks=[])
fields['ADCID'] = nacc.uds3.Field(name='ADCID', typename='Num', position=(1, 2), length=2, inclusive_range=(1, 99), allowable_values=[], blanks=[])
fields['PTID'] = nacc.uds3.Field(name='PTID', typename='Char', position=(4, 13), length=10, inclusive_range=None, allowable_values=[], blanks=[])
return fields
class FormNP(nacc.uds3.FieldBag):
def __init__(self):
self.fields = header_fields()
self.fields['NPFORMMO'] = nacc.uds3.Field(name='NPFORMMO', typename='Num', position=(15, 16), length=2, inclusive_range=(1, 12), allowable_values=[], blanks=[])
self.fields['NPFORMDY'] = nacc.uds3.Field(name='NPFORMDY', typename='Num', position=(18, 19), length=2, inclusive_range=(1, 31), allowable_values=[], blanks=[])
self.fields['NPFORMYR'] = nacc.uds3.Field(name='NPFORMYR', typename='Num', position=(21, 24), length=4, inclusive_range=(2001, CURRENT_YEAR), allowable_values=[], blanks=[])
self.fields['NPID'] = nacc.uds3.Field(name='NPID', typename='Char', position=(26, 35), length=10, inclusive_range=(), allowable_values=[], blanks=[])
self.fields['NPSEX'] = nacc.uds3.Field(name='NPSEX', typename='Num', position=(37, 37), length=1, inclusive_range=(1, 2), allowable_values=[], blanks=[])
self.fields['NPDAGE'] = nacc.uds3.Field(name='NPDAGE', typename='Num', position=(39, 41), length=3, inclusive_range=(0, 130), allowable_values=[], blanks=[])
self.fields['NPDODMO'] = nacc.uds3.Field(name='NPDODMO', typename='Num', position=(43, 44), length=2, inclusive_range=(1, 12), allowable_values=[], blanks=[])
self.fields['NPDODDY'] = nacc.uds3.Field(name='NPDODDY', typename='Num', position=(46, 47), length=2, inclusive_range=(1, 31), allowable_values=[], blanks=[])
self.fields['NPDODYR'] = nacc.uds3.Field(name='NPDODYR', typename='Num', position=(49, 52), length=4, inclusive_range=(1984, CURRENT_YEAR), allowable_values=[], blanks=[])
self.fields['NPPMIH'] = nacc.uds3.Field(name='NPPMIH', typename='Num', position=(54, 57), length=4, inclusive_range=(0, 98.9), allowable_values=['99.9'], blanks=[])
self.fields['NPFIX'] = nacc.uds3.Field(name='NPFIX', typename='Num', position=(59, 59), length=1, inclusive_range=(1, 2), allowable_values=['7'], blanks=[])
self.fields['NPFIXX'] = nacc.uds3.Field(name='NPFIXX', typename='Char', position=(61, 90), length=30, inclusive_range=(), allowable_values=[], blanks=['Blank if Question 8 NPFIX ne 7 (Other)'])
self.fields['NPWBRWT'] = nacc.uds3.Field(name='NPWBRWT', typename='Num', position=(92, 95), length=4, inclusive_range=(100, 2500), allowable_values=['9999'], blanks=[])
self.fields['NPWBRF'] = nacc.uds3.Field(name='NPWBRF', typename='Num', position=(97, 97), length=1, inclusive_range=(1, 2), allowable_values=['8'], blanks=[])
self.fields['NPGRCCA'] = nacc.uds3.Field(name='NPGRCCA', typename='Num', position=(99, 99), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=[])
self.fields['NPGRLA'] = nacc.uds3.Field(name='NPGRLA', typename='Num', position=(101, 101), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPGRHA'] = nacc.uds3.Field(name='NPGRHA', typename='Num', position=(103, 103), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=[])
self.fields['NPGRSNH'] = nacc.uds3.Field(name='NPGRSNH', typename='Num', position=(105, 105), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=[])
self.fields['NPGRLCH'] = nacc.uds3.Field(name='NPGRLCH', typename='Num', position=(107, 107), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=[])
self.fields['NPAVAS'] = nacc.uds3.Field(name='NPAVAS', typename='Num', position=(109, 109), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=[])
self.fields['NPTAN'] = nacc.uds3.Field(name='NPTAN', typename='Num', position=(111, 111), length=1, inclusive_range=(1, 4), allowable_values=['7', '8'], blanks=[])
self.fields['NPTANX'] = nacc.uds3.Field(name='NPTANX', typename='Char', position=(113, 142), length=30, inclusive_range=(), allowable_values=[], blanks=['Blank if Question 10a NPTAN ne 7 (Other)'])
self.fields['NPABAN'] = nacc.uds3.Field(name='NPABAN', typename='Num', position=(144, 144), length=1, inclusive_range=(1, 2), allowable_values=['7', '8'], blanks=[])
self.fields['NPABANX'] = nacc.uds3.Field(name='NPABANX', typename='Char', position=(146, 175), length=30, inclusive_range=(), allowable_values=[], blanks=['Blank if Question 10b NPABAN ne 7 (Other)'])
self.fields['NPASAN'] = nacc.uds3.Field(name='NPASAN', typename='Num', position=(177, 177), length=1, inclusive_range=(1, 2), allowable_values=['7', '8'], blanks=[])
self.fields['NPASANX'] = nacc.uds3.Field(name='NPASANX', typename='Char', position=(179, 208), length=30, inclusive_range=(), allowable_values=[], blanks=['Blank if Question 10c NPASAN ne 7 (Other)'])
self.fields['NPTDPAN'] = nacc.uds3.Field(name='NPTDPAN', typename='Num', position=(210, 210), length=1, inclusive_range=(1, 2), allowable_values=['7', '8'], blanks=[])
self.fields['NPTDPANX'] = nacc.uds3.Field(name='NPTDPANX', typename='Char', position=(212, 241), length=30, inclusive_range=(), allowable_values=[], blanks=['Blank if Question 10d NPTDPAN ne 7 (Other)'])
self.fields['NPHISMB'] = nacc.uds3.Field(name='NPHISMB', typename='Num', position=(243, 243), length=1, inclusive_range=(0, 1), allowable_values=[], blanks=[])
self.fields['NPHISG'] = nacc.uds3.Field(name='NPHISG', typename='Num', position=(245, 245), length=1, inclusive_range=(0, 1), allowable_values=[], blanks=[])
self.fields['NPHISSS'] = nacc.uds3.Field(name='NPHISSS', typename='Num', position=(247, 247), length=1, inclusive_range=(0, 1), allowable_values=[], blanks=[])
self.fields['NPHIST'] = nacc.uds3.Field(name='NPHIST', typename='Num', position=(249, 249), length=1, inclusive_range=(0, 1), allowable_values=[], blanks=[])
self.fields['NPHISO'] = nacc.uds3.Field(name='NPHISO', typename='Num', position=(251, 251), length=1, inclusive_range=(0, 1), allowable_values=[], blanks=[])
self.fields['NPHISOX'] = nacc.uds3.Field(name='NPHISOX', typename='Char', position=(253, 282), length=30, inclusive_range=(), allowable_values=[], blanks=['Blank if Question 10e5 NPHISO ne 1 (Yes)'])
self.fields['NPTHAL'] = nacc.uds3.Field(name='NPTHAL', typename='Num', position=(284, 284), length=1, inclusive_range=(0, 5), allowable_values=['8', '9'], blanks=[])
self.fields['NPBRAAK'] = nacc.uds3.Field(name='NPBRAAK', typename='Num', position=(286, 286), length=1, inclusive_range=(0, 9), allowable_values=[], blanks=[])
self.fields['NPNEUR'] = nacc.uds3.Field(name='NPNEUR', typename='Num', position=(288, 288), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=[])
self.fields['NPADNC'] = nacc.uds3.Field(name='NPADNC', typename='Num', position=(290, 290), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=[])
self.fields['NPDIFF'] = nacc.uds3.Field(name='NPDIFF', typename='Num', position=(292, 292), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=[])
self.fields['NPAMY'] = nacc.uds3.Field(name='NPAMY', typename='Num', position=(294, 294), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=[])
self.fields['NPINF'] = nacc.uds3.Field(name='NPINF', typename='Num', position=(296, 296), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['If Question 12a NPINF ne 0 then skip to Question 12b NPHEMO'])
self.fields['NPINF1A'] = nacc.uds3.Field(name='NPINF1A', typename='Num', position=(298, 299), length=2, inclusive_range=(0, 87), allowable_values=['88', '99'], blanks=['Blank if Question 12a NPINF ne 1 (Yes)'])
self.fields['NPINF1B'] = nacc.uds3.Field(name='NPINF1B', typename='Num', position=(301, 304), length=4, inclusive_range=(0, 20), allowable_values=['88.8', '99.9'], blanks=['Blank if Question 12a NPINF ne 1 (Yes)', 'Blank if Question 121a NPINF1A = 0', 'Blank if Question 121a NPINF1A = 88', 'Blank if Question 121a NPINF1A = 99'])
self.fields['NPINF1D'] = nacc.uds3.Field(name='NPINF1D', typename='Num', position=(306, 309), length=4, inclusive_range=(0, 20), allowable_values=['88.8', '99.9'], blanks=['Blank if Question 12a NPINF ne 1 (Yes)', 'Blank if Question 121a NPINF1A = 0', 'Blank if Question 121a NPINF1A = 1', 'Blank if Question 121a NPINF1A = 88', 'Blank if Question 121a NPINF1A = 99'])
self.fields['NPINF1F'] = nacc.uds3.Field(name='NPINF1F', typename='Num', position=(311, 314), length=4, inclusive_range=(0, 20), allowable_values=['88.8', '99.9'], blanks=['Blank if Question 12a NPINF ne 1 (Yes)', 'Blank if Question 121a NPINF1A = 0', 'Blank if Question 121a NPINF1A = 1', 'Blank if Question 121a NPINF1A = 2', 'Blank if Question 121a NPINF1A = 88', 'Blank if Question 121a NPINF1A = 99'])
self.fields['NPINF2A'] = nacc.uds3.Field(name='NPINF2A', typename='Num', position=(316, 317), length=2, inclusive_range=(0, 87), allowable_values=['88', '99'], blanks=['Blank if Question 12a NPINF ne 1 (Yes)'])
self.fields['NPINF2B'] = nacc.uds3.Field(name='NPINF2B', typename='Num', position=(319, 322), length=4, inclusive_range=(0, 20), allowable_values=['88.8', '99.9'], blanks=['Blank if Question 12a NPINF ne 1 (Yes)', 'Blank if Question 122a NPINF1A = 0', 'Blank if Question 122a NPINF1A = 88', 'Blank if Question 121a NPINF1A = 99'])
self.fields['NPINF2D'] = nacc.uds3.Field(name='NPINF2D', typename='Num', position=(324, 327), length=4, inclusive_range=(0, 20), allowable_values=['88.8', '99.9'], blanks=['Blank if Question 12a NPINF ne 1 (Yes)', 'Blank if Question 122a NPINF1A = 0', 'Blank if Question 122a NPINF1A = 1', 'Blank if Question 122a NPINF1A = 88', 'Blank if Question 122a NPINF1A = 99'])
self.fields['NPINF2F'] = nacc.uds3.Field(name='NPINF2F', typename='Num', position=(329, 332), length=4, inclusive_range=(0, 20), allowable_values=['88.8', '99.9'], blanks=['Blank if Question 12a NPINF ne 1 (Yes)', 'Blank if Question 122a NPINF1A = 0', 'Blank if Question 122a NPINF1A = 1', 'Blank if Question 122a NPINF1A = 2', 'Blank if Question 122a NPINF1A = 88', 'Blank if Question 122a NPINF1A = 99'])
self.fields['NPINF3A'] = nacc.uds3.Field(name='NPINF3A', typename='Num', position=(334, 335), length=2, inclusive_range=(0, 87), allowable_values=['88', '99'], blanks=['Blank if Question 12a NPINF ne 1 (Yes)'])
self.fields['NPINF3B'] = nacc.uds3.Field(name='NPINF3B', typename='Num', position=(337, 340), length=4, inclusive_range=(0, 20), allowable_values=['88.8', '99.9'], blanks=['Blank if Question 12a NPINF ne 1 (Yes)', 'Blank if Question 123a NPINF1A = 0', 'Blank if Question 123a NPINF1A = 88', 'Blank if Question 123a NPINF1A = 99'])
self.fields['NPINF3D'] = nacc.uds3.Field(name='NPINF3D', typename='Num', position=(342, 345), length=4, inclusive_range=(0, 20), allowable_values=['88.8', '99.9'], blanks=['Blank if Question 12a NPINF ne 1 (Yes)', 'Blank if Question 123a NPINF1A = 0', 'Blank if Question 123a NPINF1A = 1', 'Blank if Question 123a NPINF1A = 88', 'Blank if Question 123a NPINF1A = 99'])
self.fields['NPINF3F'] = nacc.uds3.Field(name='NPINF3F', typename='Num', position=(347, 350), length=4, inclusive_range=(0, 20), allowable_values=['88.8', '99.9'], blanks=['Blank if Question 12a NPINF ne 1 (Yes)', 'Blank if Question 123a NPINF1A = 0', 'Blank if Question 123a NPINF1A = 1', 'Blank if Question 123a NPINF1A = 2', 'Blank if Question 123a NPINF1A = 88', 'Blank if Question 123a NPINF1A = 99'])
self.fields['NPINF4A'] = nacc.uds3.Field(name='NPINF4A', typename='Num', position=(352, 353), length=2, inclusive_range=(0, 87), allowable_values=['88', '99'], blanks=['Blank if Question 12a NPINF ne 1 (Yes)'])
self.fields['NPINF4B'] = nacc.uds3.Field(name='NPINF4B', typename='Num', position=(355, 358), length=4, inclusive_range=(0, 20), allowable_values=['88.8', '99.9'], blanks=['Blank if Question 12a NPINF ne 1 (Yes)', 'Blank if Question 124a NPINF1A = 0', 'Blank if Question 124a NPINF1A = 88', 'Blank if Question 124a NPINF1A = 99'])
self.fields['NPINF4D'] = nacc.uds3.Field(name='NPINF4D', typename='Num', position=(360, 363), length=4, inclusive_range=(0, 20), allowable_values=['88.8', '99.9'], blanks=['Blank if Question 12a NPINF ne 1 (Yes)', 'Blank if Question 124a NPINF1A = 0', 'Blank if Question 124a NPINF1A = 1', 'Blank if Question 124a NPINF1A = 88', 'Blank if Question 124a NPINF1A = 99'])
self.fields['NPINF4F'] = nacc.uds3.Field(name='NPINF4F', typename='Num', position=(365, 368), length=4, inclusive_range=(0, 20), allowable_values=['88.8', '99.9'], blanks=['Blank if Question 12a NPINF ne 1 (Yes)', 'Blank if Question 124a NPINF1A = 0', 'Blank if Question 124a NPINF1A = 1', 'Blank if Question 124a NPINF1A = 2', 'Blank if Question 124a NPINF1A = 88', 'Blank if Question 124a NPINF1A = 99'])
self.fields['NPHEMO'] = nacc.uds3.Field(name='NPHEMO', typename='Num', position=(370, 370), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['If Question 12b NPHEMO ne 1 then skip to Question 12c NPOLD'])
self.fields['NPHEMO1'] = nacc.uds3.Field(name='NPHEMO1', typename='Num', position=(372, 372), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank if Question 12b NPINF ne 1 (Yes)'])
self.fields['NPHEMO2'] = nacc.uds3.Field(name='NPHEMO2', typename='Num', position=(374, 374), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank if Question 12b NPINF ne 1 (Yes)'])
self.fields['NPHEMO3'] = nacc.uds3.Field(name='NPHEMO3', typename='Num', position=(376, 376), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank if Question 12b NPINF ne 1 (Yes)'])
self.fields['NPOLD'] = nacc.uds3.Field(name='NPOLD', typename='Num', position=(378, 378), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['If Question 12c NPOLD ne 1 then skip to Question 12d NPOLDD'])
self.fields['NPOLD1'] = nacc.uds3.Field(name='NPOLD1', typename='Num', position=(380, 380), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=['Blank if Question 12c NPOLD ne 1 (Yes)'])
self.fields['NPOLD2'] = nacc.uds3.Field(name='NPOLD2', typename='Num', position=(382, 382), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=['Blank if Question 12c NPOLD ne 1 (Yes)'])
self.fields['NPOLD3'] = nacc.uds3.Field(name='NPOLD3', typename='Num', position=(384, 384), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=['Blank if Question 12c NPOLD ne 1 (Yes)'])
self.fields['NPOLD4'] = nacc.uds3.Field(name='NPOLD4', typename='Num', position=(386, 386), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=['Blank if Question 12c NPOLD ne 1 (Yes)'])
self.fields['NPOLDD'] = nacc.uds3.Field(name='NPOLDD', typename='Num', position=(388, 388), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['If Question 12d NPOLDD ne 1 then skip to Question 12e NPARTER'])
self.fields['NPOLDD1'] = nacc.uds3.Field(name='NPOLDD1', typename='Num', position=(390, 390), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=['Blank if Question 12d NPOLDD ne 1 (Yes)'])
self.fields['NPOLDD2'] = nacc.uds3.Field(name='NPOLDD2', typename='Num', position=(392, 392), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=['Blank if Question 12d NPOLDD ne 1 (Yes)'])
self.fields['NPOLDD3'] = nacc.uds3.Field(name='NPOLDD3', typename='Num', position=(394, 394), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=['Blank if Question 12d NPOLDD ne 1 (Yes)'])
self.fields['NPOLDD4'] = nacc.uds3.Field(name='NPOLDD4', typename='Num', position=(396, 396), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=['Blank if Question 12d NPOLDD ne 1 (Yes)'])
self.fields['NPARTER'] = nacc.uds3.Field(name='NPARTER', typename='Num', position=(398, 398), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=[])
self.fields['NPWMR'] = nacc.uds3.Field(name='NPWMR', typename='Num', position=(400, 400), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=[])
self.fields['NPPATH'] = nacc.uds3.Field(name='NPPATH', typename='Num', position=(402, 402), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['If Question 12g NPPATH ne 1 then skip to Question 13 NPLBOD'])
self.fields['NPNEC'] = nacc.uds3.Field(name='NPNEC', typename='Num', position=(404, 404), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 12g NPPATH ne 1 (Yes)'])
self.fields['NPPATH2'] = nacc.uds3.Field(name='NPPATH2', typename='Num', position=(406, 406), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 12g NPPATH ne 1 (Yes)'])
self.fields['NPPATH3'] = nacc.uds3.Field(name='NPPATH3', typename='Num', position=(408, 408), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 12g NPPATH ne 1 (Yes)'])
self.fields['NPPATH4'] = nacc.uds3.Field(name='NPPATH4', typename='Num', position=(410, 410), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 12g NPPATH ne 1 (Yes)'])
self.fields['NPPATH5'] = nacc.uds3.Field(name='NPPATH5', typename='Num', position=(412, 412), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 12g NPPATH ne 1 (Yes)'])
self.fields['NPPATH6'] = nacc.uds3.Field(name='NPPATH6', typename='Num', position=(414, 414), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 12g NPPATH ne 1 (Yes)'])
self.fields['NPPATH7'] = nacc.uds3.Field(name='NPPATH7', typename='Num', position=(416, 416), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 12g NPPATH ne 1 (Yes)'])
self.fields['NPPATH8'] = nacc.uds3.Field(name='NPPATH8', typename='Num', position=(418, 418), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 12g NPPATH ne 1 (Yes)'])
self.fields['NPPATH9'] = nacc.uds3.Field(name='NPPATH9', typename='Num', position=(420, 420), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 12g NPPATH ne 1 (Yes)'])
self.fields['NPPATH10'] = nacc.uds3.Field(name='NPPATH10', typename='Num', position=(422, 422), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 12g NPPATH ne 1 (Yes)'])
self.fields['NPPATH11'] = nacc.uds3.Field(name='NPPATH11', typename='Num', position=(424, 424), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 12g NPPATH ne 1 (Yes)'])
self.fields['NPPATHO'] = nacc.uds3.Field(name='NPPATHO', typename='Num', position=(426, 426), length=1, inclusive_range=(0, 1), allowable_values=[], blanks=['Blank If Question 12g NPPATH ne 1 (Yes)'])
self.fields['NPPATHOX'] = nacc.uds3.Field(name='NPPATHOX', typename='Char', position=(428, 457), length=30, inclusive_range=(), allowable_values=[], blanks=['Blank If Question 12g NPPATH ne 1 (Yes)', 'Blank If Question 1212 NPPATHO ne 1 (Yes)'])
self.fields['NPLBOD'] = nacc.uds3.Field(name='NPLBOD', typename='Num', position=(459, 459), length=1, inclusive_range=(0, 5), allowable_values=['8', '9'], blanks=[])
self.fields['NPNLOSS'] = nacc.uds3.Field(name='NPNLOSS', typename='Num', position=(461, 461), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=[])
self.fields['NPHIPSCL'] = nacc.uds3.Field(name='NPHIPSCL', typename='Num', position=(463, 463), length=1, inclusive_range=(0, 3), allowable_values=['8', '9'], blanks=[])
self.fields['NPTDPA'] = nacc.uds3.Field(name='NPTDPA', typename='Num', position=(465, 465), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPTDPB'] = nacc.uds3.Field(name='NPTDPB', typename='Num', position=(467, 467), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPTDPC'] = nacc.uds3.Field(name='NPTDPC', typename='Num', position=(469, 469), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPTDPD'] = nacc.uds3.Field(name='NPTDPD', typename='Num', position=(471, 471), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPTDPE'] = nacc.uds3.Field(name='NPTDPE', typename='Num', position=(473, 473), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPFTDTAU'] = nacc.uds3.Field(name='NPFTDTAU', typename='Num', position=(475, 475), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['If Question 17a NPFTDTAU ne 1 then skip to Question 17c NPFTDTDP'])
self.fields['NPPICK'] = nacc.uds3.Field(name='NPPICK', typename='Num', position=(477, 477), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 17a NPFTDTAU ne 1 (Yes)'])
self.fields['NPFTDT2'] = nacc.uds3.Field(name='NPFTDT2', typename='Num', position=(479, 479), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 17a NPFTDTAU ne 1 (Yes)'])
self.fields['NPCORT'] = nacc.uds3.Field(name='NPCORT', typename='Num', position=(481, 481), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 17a NPFTDTAU ne 1 (Yes)'])
self.fields['NPPROG'] = nacc.uds3.Field(name='NPPROG', typename='Num', position=(483, 483), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 17a NPFTDTAU ne 1 (Yes)'])
self.fields['NPFTDT5'] = nacc.uds3.Field(name='NPFTDT5', typename='Num', position=(485, 485), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 17a NPFTDTAU ne 1 (Yes)'])
self.fields['NPFTDT6'] = nacc.uds3.Field(name='NPFTDT6', typename='Num', position=(487, 487), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 17a NPFTDTAU ne 1 (Yes)'])
self.fields['NPFTDT7'] = nacc.uds3.Field(name='NPFTDT7', typename='Num', position=(489, 489), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 17a NPFTDTAU ne 1 (Yes)'])
self.fields['NPFTDT8'] = nacc.uds3.Field(name='NPFTDT8', typename='Num', position=(491, 491), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 17a NPFTDTAU ne 1 (Yes)'])
self.fields['NPFTDT9'] = nacc.uds3.Field(name='NPFTDT9', typename='Num', position=(493, 493), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 17a NPFTDTAU ne 1 (Yes)'])
self.fields['NPFTDT10'] = nacc.uds3.Field(name='NPFTDT10', typename='Num', position=(495, 495), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 17a NPFTDTAU ne 1 (Yes)'])
self.fields['NPFTDTDP'] = nacc.uds3.Field(name='NPFTDTDP', typename='Num', position=(497, 497), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPALSMND'] = nacc.uds3.Field(name='NPALSMND', typename='Num', position=(499, 499), length=1, inclusive_range=(0, 5), allowable_values=['8', '9'], blanks=[])
self.fields['NPOFTD'] = nacc.uds3.Field(name='NPOFTD', typename='Num', position=(501, 501), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['If Question 17e FPOFTD ne 1 then skip to Question 18a NPPDXA'])
self.fields['NPOFTD1'] = nacc.uds3.Field(name='NPOFTD1', typename='Num', position=(503, 503), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 17e NPOFTD ne 1 (Yes)'])
self.fields['NPOFTD2'] = nacc.uds3.Field(name='NPOFTD2', typename='Num', position=(505, 505), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 17e NPOFTD ne 1 (Yes)'])
self.fields['NPOFTD3'] = nacc.uds3.Field(name='NPOFTD3', typename='Num', position=(507, 507), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 17e NPOFTD ne 1 (Yes)'])
self.fields['NPOFTD4'] = nacc.uds3.Field(name='NPOFTD4', typename='Num', position=(509, 509), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 17e NPOFTD ne 1 (Yes)'])
self.fields['NPOFTD5'] = nacc.uds3.Field(name='NPOFTD5', typename='Num', position=(511, 511), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=['Blank If Question 17e NPOFTD ne 1 (Yes)'])
self.fields['NPPDXA'] = nacc.uds3.Field(name='NPPDXA', typename='Num', position=(513, 513), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPPDXB'] = nacc.uds3.Field(name='NPPDXB', typename='Num', position=(515, 515), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPPDXC'] = nacc.uds3.Field(name='NPPDXC', typename='Num', position=(517, 517), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPPDXD'] = nacc.uds3.Field(name='NPPDXD', typename='Num', position=(519, 519), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPPDXE'] = nacc.uds3.Field(name='NPPDXE', typename='Num', position=(521, 521), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPPDXF'] = nacc.uds3.Field(name='NPPDXF', typename='Num', position=(523, 523), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPPDXG'] = nacc.uds3.Field(name='NPPDXG', typename='Num', position=(525, 525), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPPDXH'] = nacc.uds3.Field(name='NPPDXH', typename='Num', position=(527, 527), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPPDXI'] = nacc.uds3.Field(name='NPPDXI', typename='Num', position=(529, 529), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPPDXJ'] = nacc.uds3.Field(name='NPPDXJ', typename='Num', position=(531, 531), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPPDXK'] = nacc.uds3.Field(name='NPPDXK', typename='Num', position=(533, 533), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPPDXL'] = nacc.uds3.Field(name='NPPDXL', typename='Num', position=(535, 535), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPPDXM'] = nacc.uds3.Field(name='NPPDXM', typename='Num', position=(537, 537), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPPDXN'] = nacc.uds3.Field(name='NPPDXN', typename='Num', position=(539, 539), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPPDXO'] = nacc.uds3.Field(name='NPPDXO', typename='Num', position=(541, 541), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPPDXP'] = nacc.uds3.Field(name='NPPDXP', typename='Num', position=(543, 543), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPPDXQ'] = nacc.uds3.Field(name='NPPDXQ', typename='Num', position=(545, 545), length=1, inclusive_range=(0, 1), allowable_values=['8', '9'], blanks=[])
self.fields['NPPDXR'] = nacc.uds3.Field(name='NPPDXR', typename='Num', position=(547, 547), length=1, inclusive_range=(0, 1), allowable_values=[], blanks=[])
self.fields['NPPDXRX'] = nacc.uds3.Field(name='NPPDXRX', typename='Char', position=(549, 578), length=30, inclusive_range=(), allowable_values=[], blanks=['Blank If Question 18r NPPDXR = 0 (No)'])
self.fields['NPPDXS'] = nacc.uds3.Field(name='NPPDXS', typename='Num', position=(580, 580), length=1, inclusive_range=(0, 1), allowable_values=[], blanks=[])
self.fields['NPPDXSX'] = nacc.uds3.Field(name='NPPDXSX', typename='Char', position=(582, 611), length=30, inclusive_range=(), allowable_values=[], blanks=['Blank If Question 18s NPPDXS = 0 (No)'])
self.fields['NPPDXT'] = nacc.uds3.Field(name='NPPDXT', typename='Num', position=(613, 613), length=1, inclusive_range=(0, 1), allowable_values=[], blanks=[])
self.fields['NPPDXTX'] = nacc.uds3.Field(name='NPPDXTX', typename='Char', position=(615, 644), length=30, inclusive_range=(), allowable_values=[], blanks=['Blank If Question 18t NPPDXT = 0 (No)'])
self.fields['NPBNKA'] = nacc.uds3.Field(name='NPBNKA', typename='Num', position=(646, 646), length=1, inclusive_range=(0, 1), allowable_values=['9'], blanks=[])
self.fields['NPBNKB'] = nacc.uds3.Field(name='NPBNKB', typename='Num', position=(648, 648), length=1, inclusive_range=(0, 1), allowable_values=['9'], blanks=[])
self.fields['NPBNKC'] = nacc.uds3.Field(name='NPBNKC', typename='Num', position=(650, 650), length=1, inclusive_range=(0, 1), allowable_values=['9'], blanks=[])
self.fields['NPBNKD'] = nacc.uds3.Field(name='NPBNKD', typename='Num', position=(652, 652), length=1, inclusive_range=(0, 1), allowable_values=['9'], blanks=[])
self.fields['NPBNKE'] = nacc.uds3.Field(name='NPBNKE', typename='Num', position=(654, 654), length=1, inclusive_range=(0, 1), allowable_values=['9'], blanks=[])
self.fields['NPBNKF'] = nacc.uds3.Field(name='NPBNKF', typename='Num', position=(656, 656), length=1, inclusive_range=(0, 1), allowable_values=['9'], blanks=[])
self.fields['NPBNKG'] = nacc.uds3.Field(name='NPBNKG', typename='Num', position=(658, 658), length=1, inclusive_range=(0, 1), allowable_values=['9'], blanks=[])
self.fields['NPFAUT'] = nacc.uds3.Field(name='NPFAUT', typename='Num', position=(660, 660), length=1, inclusive_range=(0, 1), allowable_values=['9'], blanks=[])
self.fields['NPFAUT1'] = nacc.uds3.Field(name='NPFAUT1', typename='Char', position=(662, 721), length=60, inclusive_range=(), allowable_values=[], blanks=['Blank if Question 19h NPFAUT ne 1 (Yes)'])
self.fields['NPFAUT2'] = nacc.uds3.Field(name='NPFAUT2', typename='Char', position=(723, 782), length=60, inclusive_range=(), allowable_values=[], blanks=['Blank if Question 19h NPFAUT ne 1 (Yes)'])
self.fields['NPFAUT3'] = nacc.uds3.Field(name='NPFAUT3', typename='Char', position=(784, 843), length=60, inclusive_range=(), allowable_values=[], blanks=['Blank if Question 19h NPFAUT ne 1 (Yes)'])
self.fields['NPFAUT4'] = nacc.uds3.Field(name='NPFAUT4', typename='Char', position=(845, 904), length=60, inclusive_range=(), allowable_values=[], blanks=['Blank if Question 19h NPFAUT ne 1 (Yes)']) | bsd-2-clause |
AICP/external_chromium_org | tools/cr/cr/commands/init.py | 59 | 5608 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the init command."""
import os
import cr
# The set of variables to store in the per output configuration.
OUT_CONFIG_VARS = [
'CR_VERSION',
cr.Platform.SELECTOR, cr.BuildType.SELECTOR, cr.Arch.SELECTOR,
'CR_OUT_BASE', 'CR_OUT_FULL',
]
class InitCommand(cr.Command):
"""The implementation of the init command.
The init command builds or updates an output directory.
It then uses the Prepare and Select commands to get that directory
ready to use.
"""
def __init__(self):
super(InitCommand, self).__init__()
self.requires_build_dir = False
self.help = 'Create and configure an output directory'
self.description = ("""
If the .cr directory is not present, build it and add
the specified configuration.
If the file already exists, update the configuration with any
additional settings.
""")
self._settings = []
def AddArguments(self, subparsers):
"""Overridden from cr.Command."""
parser = super(InitCommand, self).AddArguments(subparsers)
cr.Platform.AddArguments(parser)
cr.BuildType.AddArguments(parser)
cr.Arch.AddArguments(parser)
cr.SelectCommand.AddPrepareArguments(parser)
parser.add_argument(
'-s', '--set', dest='_settings', metavar='settings',
action='append',
help='Configuration overrides.'
)
return parser
def EarlyArgProcessing(self):
base_settings = getattr(cr.context.args, '_settings', None)
if base_settings:
self._settings.extend(base_settings)
# Do not call super early processing, we do not want to apply
# the output arg...
out = cr.base.client.GetOutArgument()
if out:
# Output directory is fully specified
# We need to deduce other settings from it's name
base, buildtype = os.path.split(out)
if not (base and buildtype):
print 'Specified output directory must be two levels'
exit(1)
if not cr.BuildType.FindPlugin(buildtype):
print 'Specified build type', buildtype, 'is not valid'
print 'Must be one of', ','.join(p.name for p in cr.BuildType.Plugins())
exit(1)
if (cr.context.args.CR_BUILDTYPE and
cr.context.args.CR_BUILDTYPE != buildtype):
print 'If --type and --out are both specified, they must match'
print 'Got', cr.context.args.CR_BUILDTYPE, 'and', buildtype
exit(1)
platform = cr.context.args.CR_PLATFORM
if not platform:
# Try to guess platform based on output name
platforms = [p.name for p in cr.Platform.AllPlugins()]
matches = [p for p in platforms if p in base]
if len(matches) != 1:
print 'Platform is not set, and could not be guessed from', base
print 'Should be one of', ','.join(platforms)
if len(matches) > 1:
print 'Matched all of', ','.join(matches)
exit(1)
platform = matches[0]
cr.context.derived.Set(
CR_OUT_FULL=out,
CR_OUT_BASE=base,
CR_PLATFORM=platform,
CR_BUILDTYPE=buildtype,
)
if not 'CR_OUT_BASE' in cr.context:
cr.context.derived['CR_OUT_BASE'] = 'out_{CR_PLATFORM}'
if not 'CR_OUT_FULL' in cr.context:
cr.context.derived['CR_OUT_FULL'] = os.path.join(
'{CR_OUT_BASE}', '{CR_BUILDTYPE}')
def Run(self):
"""Overridden from cr.Command."""
src_path = cr.context.Get('CR_SRC')
if not os.path.isdir(src_path):
print cr.context.Substitute('Path {CR_SRC} is not a valid client')
exit(1)
# Ensure we have an output directory override ready to fill in
# This will only be missing if we are creating a brand new output
# directory
build_package = cr.auto.build
# Collect the old version (and float convert)
old_version = cr.context.Find('CR_VERSION')
try:
old_version = float(old_version)
except (ValueError, TypeError):
old_version = 0.0
is_new = not hasattr(build_package, 'config')
if is_new:
class FakeModule(object):
OVERRIDES = cr.Config('OVERRIDES')
def __init__(self):
self.__name__ = 'config'
old_version = None
config = FakeModule()
setattr(build_package, 'config', config)
cr.plugin.ChainModuleConfigs(config)
# Force override the version
build_package.config.OVERRIDES.Set(CR_VERSION=cr.base.client.VERSION)
# Add all the variables that we always want to have
for name in OUT_CONFIG_VARS:
value = cr.context.Find(name)
build_package.config.OVERRIDES[name] = value
# Apply the settings from the command line
for setting in self._settings:
name, separator, value = setting.partition('=')
name = name.strip()
if not separator:
value = True
else:
value = cr.Config.ParseValue(value.strip())
build_package.config.OVERRIDES[name] = value
# Run all the output directory init hooks
for hook in cr.InitHook.Plugins():
hook.Run(old_version, build_package.config)
# Redo activations, they might have changed
cr.plugin.Activate()
# Write out the new configuration, and select it as the default
cr.base.client.WriteConfig(cr.context.Get('CR_BUILD_DIR'),
build_package.config.OVERRIDES.exported)
# Prepare the platform in here, using the updated config
cr.Platform.Prepare()
cr.SelectCommand.Select()
| bsd-3-clause |
jve/rabbittop | rabbittop/terminal.py | 1 | 5872 | """
The MIT License (MIT)
Copyright (c) 2014 Jozef van Eenbergen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import curses
class Terminal(object):
def __init__(self, scrn=None):
self._screen = scrn if scrn else curses.initscr()
self._screen = curses.initscr()
curses.noecho()
curses.cbreak()
# curses.curs_set(0)
self._screen.keypad(1)
self._refresh_rate = 3
self._screen.timeout(self._refresh_rate * 1000)
self.selected_row = None
self.start_row = 0
curses.start_color()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_RED)
curses.init_pair(3, curses.COLOR_WHITE, curses.COLOR_GREEN)
curses.init_pair(4, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(5, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
curses.init_pair(6, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(7, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(8, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(9, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
curses.init_pair(10, curses.COLOR_BLACK, curses.COLOR_YELLOW)
curses.init_pair(11, curses.COLOR_BLACK, curses.COLOR_WHITE)
self._colors_list = {
'DEFAULT': curses.color_pair(1),
'UNDERLINE': curses.A_UNDERLINE,
'BOLD': curses.A_BOLD,
'SORT': curses.A_BOLD,
'OK': curses.color_pair(7),
'TITLE': curses.A_BOLD,
'PROCESS': curses.color_pair(7),
'STATUS': curses.color_pair(7),
'NICE': curses.color_pair(9),
'CAREFUL': curses.color_pair(8),
'WARNING': curses.color_pair(9),
'CRITICAL': curses.color_pair(6),
'OK_LOG': curses.color_pair(3),
'CAREFUL_LOG': curses.color_pair(4),
'WARNING_LOG': curses.color_pair(5),
'CRITICAL_LOG': curses.color_pair(2),
'SEPARATOR': curses.color_pair(10),
'REVERSE': curses.color_pair(11),
}
self._panels = {}
self._windows = {}
@property
def colors(self):
return self._colors_list
@property
def panels(self):
return self._panels
@property
def windows(self):
return self._windows
def getch(self):
return self._screen.getch()
def refresh(self):
return self._screen.refresh()
def get_size(self):
return self._screen.getmaxyx()
def stop(self):
curses.nocbreak()
self._screen.keypad(0)
curses.echo()
curses.endwin()
def create_window(self, name, height, width, top, left):
panel = Window(height, width, top, left, self)
self._windows[name] = panel
return panel
def create_panel(self, name, height, width):
panel = Panel(height, width)
self._panels[name] = panel
return panel
def add_line(self, text, top, left, color=None):
self._screen.addstr(top, left, text, color)
def up(self):
self.selected_row -= 1
def down(self):
self.selected_row += 1
class Window(object):
def __init__(self, height, width, top, left, parent):
self._panel = parent._screen.subwin(height, width, top, left)
self._parent = parent
self._panel.scrollok(1)
self._panel.idlok(1)
self._panel.touchwin()
def add_line(self, text, top, left, color=None):
self._panel.addstr(top, left, text, color)
def refresh(self):
return self._panel.refresh()
class Panel(object):
""" Wrapped newpad object
"""
def __init__(self, height, width):
self._panel = curses.newpad(height, width)
self.selected_row = 0
self.ptopy = 0
self.ptopx = 0
self.stopy = 0
self.stopx = 0
self.sbottomy = 0
self.sbottomx = 0
self.max = height
def set_max(self, value):
self.max = value
def add_line(self, text, top, left, color=None):
self._panel.addstr(top, left, text, color)
def refresh(self, ptopy, ptopx, stopy, stopx, sbottomy, sbottomx):
self.ptopx = ptopx
self.ptopy = ptopy
self.stopy = stopy
self.stopx = stopx
self.sbottomy = sbottomy
self.sbottomx = sbottomx
return self._panel.refresh(self.ptopy, self.ptopx, self.stopy, self.stopx, self.sbottomy, self.sbottomx)
def getch(self):
return self._panel.getch()
def scroll_up(self):
self.refresh(max(self.ptopy - 1, 0), self.ptopx, self.stopy, self.stopx, self.sbottomy, self.sbottomx)
def scroll_down(self):
self.refresh(min(self.ptopy+1, self.max), self.ptopx, self.stopy, self.stopx, self.sbottomy, self.sbottomx) | mit |
manasapte/pants | src/python/pants/java/nailgun_client.py | 7 | 6693 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import signal
import socket
import sys
from pants.java.nailgun_io import NailgunStreamReader
from pants.java.nailgun_protocol import ChunkType, NailgunProtocol
from pants.util.socket import RecvBufferedSocket
logger = logging.getLogger(__name__)
class NailgunClientSession(NailgunProtocol):
"""Handles a single nailgun client session."""
def __init__(self, sock, in_fd, out_fd, err_fd):
self._sock = sock
self._input_reader = NailgunStreamReader(in_fd, self._sock) if in_fd else None
self._stdout = out_fd
self._stderr = err_fd
self.remote_pid = None
def _maybe_start_input_reader(self):
if self._input_reader:
self._input_reader.start()
def _maybe_stop_input_reader(self):
if self._input_reader:
self._input_reader.stop()
def _process_session(self):
"""Process the outputs of the nailgun session."""
try:
for chunk_type, payload in self.iter_chunks(self._sock, return_bytes=True):
if chunk_type == ChunkType.STDOUT:
self._stdout.write(payload)
self._stdout.flush()
elif chunk_type == ChunkType.STDERR:
self._stderr.write(payload)
self._stderr.flush()
elif chunk_type == ChunkType.EXIT:
self._stdout.flush()
self._stderr.flush()
return int(payload)
elif chunk_type == ChunkType.PID:
self.remote_pid = int(payload)
elif chunk_type == ChunkType.START_READING_INPUT:
self._maybe_start_input_reader()
else:
raise self.ProtocolError('received unexpected chunk {} -> {}'.format(chunk_type, payload))
finally:
# Bad chunk types received from the server can throw NailgunProtocol.ProtocolError in
# NailgunProtocol.iter_chunks(). This ensures the NailgunStreamReader is always stopped.
self._maybe_stop_input_reader()
def execute(self, working_dir, main_class, *arguments, **environment):
# Send the nailgun request.
self.send_request(self._sock, working_dir, main_class, *arguments, **environment)
# Process the remainder of the nailgun session.
return self._process_session()
class NailgunClient(object):
"""A python nailgun client (see http://martiansoftware.com/nailgun for more info)."""
class NailgunError(Exception):
"""Indicates an error interacting with a nailgun server."""
class NailgunConnectionError(NailgunError):
"""Indicates an error upon initial connect to the nailgun server."""
# For backwards compatibility with nails expecting the ng c client special env vars.
ENV_DEFAULTS = dict(NAILGUN_FILESEPARATOR=os.sep, NAILGUN_PATHSEPARATOR=os.pathsep)
DEFAULT_NG_HOST = '127.0.0.1'
DEFAULT_NG_PORT = 2113
def __init__(self, host=DEFAULT_NG_HOST, port=DEFAULT_NG_PORT, ins=sys.stdin, out=None, err=None,
workdir=None):
"""Creates a nailgun client that can be used to issue zero or more nailgun commands.
:param string host: the nailgun server to contact (defaults to '127.0.0.1')
:param int port: the port the nailgun server is listening on (defaults to the default nailgun
port: 2113)
:param file ins: a file to read command standard input from (defaults to stdin) - can be None
in which case no input is read
:param file out: a stream to write command standard output to (defaults to stdout)
:param file err: a stream to write command standard error to (defaults to stderr)
:param string workdir: the default working directory for all nailgun commands (defaults to CWD)
"""
self._host = host
self._port = port
self._stdin = ins
self._stdout = out or sys.stdout
self._stderr = err or sys.stderr
self._workdir = workdir or os.path.abspath(os.path.curdir)
self._session = None
def try_connect(self):
"""Creates a socket, connects it to the nailgun and returns the connected socket.
:returns: a connected `socket.socket`.
:raises: `NailgunClient.NailgunConnectionError` on failure to connect.
"""
sock = RecvBufferedSocket(socket.socket(socket.AF_INET, socket.SOCK_STREAM))
try:
sock.connect((self._host, self._port))
except (socket.error, socket.gaierror) as e:
logger.debug('Encountered socket exception {!r} when attempting connect to nailgun'.format(e))
sock.close()
raise self.NailgunConnectionError(
'Problem connecting to nailgun server at {}:{}: {!r}'.format(self._host, self._port, e))
else:
return sock
def send_control_c(self):
"""Sends SIGINT to a nailgun server using pid information from the active session."""
if self._session and self._session.remote_pid is not None:
os.kill(self._session.remote_pid, signal.SIGINT)
def execute(self, main_class, cwd=None, *args, **environment):
"""Executes the given main_class with any supplied args in the given environment.
:param string main_class: the fully qualified class name of the main entrypoint
:param string cwd: Set the working directory for this command
:param list args: any arguments to pass to the main entrypoint
:param dict environment: an env mapping made available to native nails via the nail context
:returns: the exit code of the main_class.
"""
environment = dict(self.ENV_DEFAULTS.items() + environment.items())
cwd = cwd or self._workdir
# N.B. This can throw NailgunConnectionError (catchable via NailgunError).
sock = self.try_connect()
self._session = NailgunClientSession(sock, self._stdin, self._stdout, self._stderr)
try:
return self._session.execute(cwd, main_class, *args, **environment)
except socket.error as e:
raise self.NailgunError('Problem communicating with nailgun server at {}:{}: {!r}'
.format(self._host, self._port, e))
except NailgunProtocol.ProtocolError as e:
raise self.NailgunError('Problem in nailgun protocol with nailgun server at {}:{}: {!r}'
.format(self._host, self._port, e))
finally:
sock.close()
self._session = None
def __repr__(self):
return 'NailgunClient(host={!r}, port={!r}, workdir={!r})'.format(self._host,
self._port,
self._workdir)
| apache-2.0 |
detrout/pykolab | pykolab/cli/cmd_remove_mailaddress.py | 1 | 3308 | # -*- coding: utf-8 -*-
# Copyright 2010-2012 Kolab Systems AG (http://www.kolabsys.com)
#
# Jeroen van Meeuwen (Kolab Systems) <vanmeeuwen a kolabsys.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 3 or, at your option, any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
import sys
import commands
import pykolab
from pykolab.auth import Auth
from pykolab import utils
from pykolab.translate import _
log = pykolab.getLogger('pykolab.cli')
conf = pykolab.getConf()
def __init__():
commands.register('remove_mail', execute, description=description())
def description():
return """Remove a recipient's mail address."""
def execute(*args, **kw):
try:
email_address = conf.cli_args.pop(0)
except IndexError, errmsg:
email_address = utils.ask_question("Email address to remove")
# Get the domain from the email address
if len(email_address.split('@')) > 1:
domain = email_address.split('@')[1]
else:
log.error(_("Invalid or unqualified email address."))
sys.exit(1)
auth = Auth()
auth.connect(domain=domain)
recipients = auth.find_recipient(email_address)
if len(recipients) == 0:
log.error(_("No recipient found for email address %r") % (email_address))
sys.exit(1)
log.debug(_("Found the following recipient(s): %r") % (recipients), level=8)
mail_attributes = conf.get_list(domain, 'mail_attributes')
if mail_attributes == None or len(mail_attributes) < 1:
mail_attributes = conf.get_list(conf.get('kolab', 'auth_mechanism'), 'mail_attributes')
log.debug(_("Using the following mail attributes: %r") % (mail_attributes), level=8)
if isinstance(recipients, basestring):
recipient = recipients
# Only a single recipient found, remove the address
attributes = auth.get_entry_attributes(domain, recipient, mail_attributes)
# See which attribute holds the value we're trying to remove
for attribute in attributes.keys():
if isinstance(attributes[attribute], list):
if email_address in attributes[attribute]:
attributes[attribute].pop(attributes[attribute].index(email_address))
replace_attributes = {
attribute: attributes[attribute]
}
auth.set_entry_attributes(domain, recipient, replace_attributes)
else:
if email_address == attributes[attribute]:
auth.set_entry_attributes(domain, recipient, {attribute: None})
pass
else:
print >> sys.stderr, _("Found the following recipients:")
for recipient in recipients:
print recipient
| gpl-3.0 |
klonage/nlt-gcs | packages/IronPython.StdLib.2.7.4/content/Lib/pickle.py | 42 | 46516 | """Create portable serialized representations of Python objects.
See module cPickle for a (much) faster implementation.
See module copy_reg for a mechanism for registering custom picklers.
See module pickletools source for extensive comments.
Classes:
Pickler
Unpickler
Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
loads(string) -> object
Misc variables:
__version__
format_version
compatible_formats
"""
__version__ = "$Revision$" # Code version
from types import *
from copy_reg import dispatch_table
from copy_reg import _extension_registry, _inverted_registry, _extension_cache
import marshal
import sys
import struct
import re
__all__ = ["PickleError", "PicklingError", "UnpicklingError", "Pickler",
"Unpickler", "dump", "dumps", "load", "loads"]
# These are purely informational; no code uses these.
format_version = "2.0" # File format version we write
compatible_formats = ["1.0", # Original protocol 0
"1.1", # Protocol 0 with INST added
"1.2", # Original protocol 1
"1.3", # Protocol 1 with BINFLOAT added
"2.0", # Protocol 2
] # Old format versions we can read
# Keep in synch with cPickle. This is the highest protocol number we
# know how to read.
HIGHEST_PROTOCOL = 2
# Why use struct.pack() for pickling but marshal.loads() for
# unpickling? struct.pack() is 40% faster than marshal.dumps(), but
# marshal.loads() is twice as fast as struct.unpack()!
mloads = marshal.loads
class PickleError(Exception):
"""A common base class for the other pickling exceptions."""
pass
class PicklingError(PickleError):
"""This exception is raised when an unpicklable object is passed to the
dump() method.
"""
pass
class UnpicklingError(PickleError):
"""This exception is raised when there is a problem unpickling an object,
such as a security violation.
Note that other exceptions may also be raised during unpickling, including
(but not necessarily limited to) AttributeError, EOFError, ImportError,
and IndexError.
"""
pass
# An instance of _Stop is raised by Unpickler.load_stop() in response to
# the STOP opcode, passing the object that is the result of unpickling.
class _Stop(Exception):
def __init__(self, value):
self.value = value
# Jython has PyStringMap; it's a dict subclass with string keys
try:
from org.python.core import PyStringMap
except ImportError:
PyStringMap = None
# UnicodeType may or may not be exported (normally imported from types)
try:
UnicodeType
except NameError:
UnicodeType = None
# Pickle opcodes. See pickletools.py for extensive docs. The listing
# here is in kind-of alphabetical order of 1-character pickle code.
# pickletools groups them by purpose.
MARK = '(' # push special markobject on stack
STOP = '.' # every pickle ends with STOP
POP = '0' # discard topmost stack item
POP_MARK = '1' # discard stack top through topmost markobject
DUP = '2' # duplicate top stack item
FLOAT = 'F' # push float object; decimal string argument
INT = 'I' # push integer or bool; decimal string argument
BININT = 'J' # push four-byte signed int
BININT1 = 'K' # push 1-byte unsigned int
LONG = 'L' # push long; decimal string argument
BININT2 = 'M' # push 2-byte unsigned int
NONE = 'N' # push None
PERSID = 'P' # push persistent object; id is taken from string arg
BINPERSID = 'Q' # " " " ; " " " " stack
REDUCE = 'R' # apply callable to argtuple, both on stack
STRING = 'S' # push string; NL-terminated string argument
BINSTRING = 'T' # push string; counted binary string argument
SHORT_BINSTRING = 'U' # " " ; " " " " < 256 bytes
UNICODE = 'V' # push Unicode string; raw-unicode-escaped'd argument
BINUNICODE = 'X' # " " " ; counted UTF-8 string argument
APPEND = 'a' # append stack top to list below it
BUILD = 'b' # call __setstate__ or __dict__.update()
GLOBAL = 'c' # push self.find_class(modname, name); 2 string args
DICT = 'd' # build a dict from stack items
EMPTY_DICT = '}' # push empty dict
APPENDS = 'e' # extend list on stack by topmost stack slice
GET = 'g' # push item from memo on stack; index is string arg
BINGET = 'h' # " " " " " " ; " " 1-byte arg
INST = 'i' # build & push class instance
LONG_BINGET = 'j' # push item from memo on stack; index is 4-byte arg
LIST = 'l' # build list from topmost stack items
EMPTY_LIST = ']' # push empty list
OBJ = 'o' # build & push class instance
PUT = 'p' # store stack top in memo; index is string arg
BINPUT = 'q' # " " " " " ; " " 1-byte arg
LONG_BINPUT = 'r' # " " " " " ; " " 4-byte arg
SETITEM = 's' # add key+value pair to dict
TUPLE = 't' # build tuple from topmost stack items
EMPTY_TUPLE = ')' # push empty tuple
SETITEMS = 'u' # modify dict by adding topmost key+value pairs
BINFLOAT = 'G' # push float; arg is 8-byte float encoding
TRUE = 'I01\n' # not an opcode; see INT docs in pickletools.py
FALSE = 'I00\n' # not an opcode; see INT docs in pickletools.py
# Protocol 2
PROTO = '\x80' # identify pickle protocol
NEWOBJ = '\x81' # build object by applying cls.__new__ to argtuple
EXT1 = '\x82' # push object from extension registry; 1-byte index
EXT2 = '\x83' # ditto, but 2-byte index
EXT4 = '\x84' # ditto, but 4-byte index
TUPLE1 = '\x85' # build 1-tuple from stack top
TUPLE2 = '\x86' # build 2-tuple from two topmost stack items
TUPLE3 = '\x87' # build 3-tuple from three topmost stack items
NEWTRUE = '\x88' # push True
NEWFALSE = '\x89' # push False
LONG1 = '\x8a' # push long from < 256 bytes
LONG4 = '\x8b' # push really big long
_tuplesize2code = [EMPTY_TUPLE, TUPLE1, TUPLE2, TUPLE3]
__all__.extend([x for x in dir() if re.match("[A-Z][A-Z0-9_]+$",x)])
del x
# Pickling machinery
class Pickler:
def __init__(self, file, protocol=None):
"""This takes a file-like object for writing a pickle data stream.
The optional protocol argument tells the pickler to use the
given protocol; supported protocols are 0, 1, 2. The default
protocol is 0, to be backwards compatible. (Protocol 0 is the
only protocol that can be written to a file opened in text
mode and read back successfully. When using a protocol higher
than 0, make sure the file is opened in binary mode, both when
pickling and unpickling.)
Protocol 1 is more efficient than protocol 0; protocol 2 is
more efficient than protocol 1.
Specifying a negative protocol version selects the highest
protocol version supported. The higher the protocol used, the
more recent the version of Python needed to read the pickle
produced.
The file parameter must have a write() method that accepts a single
string argument. It can thus be an open file object, a StringIO
object, or any other custom object that meets this interface.
"""
if protocol is None:
protocol = 0
if protocol < 0:
protocol = HIGHEST_PROTOCOL
elif not 0 <= protocol <= HIGHEST_PROTOCOL:
raise ValueError("pickle protocol must be <= %d" % HIGHEST_PROTOCOL)
self.write = file.write
self.memo = {}
self.proto = int(protocol)
self.bin = protocol >= 1
self.fast = 0
def clear_memo(self):
"""Clears the pickler's "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects are
pickled by reference and not by value. This method is useful when
re-using picklers.
"""
self.memo.clear()
def dump(self, obj):
"""Write a pickled representation of obj to the open file."""
if self.proto >= 2:
self.write(PROTO + chr(self.proto))
self.save(obj)
self.write(STOP)
def memoize(self, obj):
"""Store an object in the memo."""
# The Pickler memo is a dictionary mapping object ids to 2-tuples
# that contain the Unpickler memo key and the object being memoized.
# The memo key is written to the pickle and will become
# the key in the Unpickler's memo. The object is stored in the
# Pickler memo so that transient objects are kept alive during
# pickling.
# The use of the Unpickler memo length as the memo key is just a
# convention. The only requirement is that the memo values be unique.
# But there appears no advantage to any other scheme, and this
# scheme allows the Unpickler memo to be implemented as a plain (but
# growable) array, indexed by memo key.
if self.fast:
return
assert id(obj) not in self.memo
memo_len = len(self.memo)
self.write(self.put(memo_len))
self.memo[id(obj)] = memo_len, obj
# Return a PUT (BINPUT, LONG_BINPUT) opcode string, with argument i.
def put(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINPUT + chr(i)
else:
return LONG_BINPUT + pack("<i", i)
return PUT + repr(i) + '\n'
# Return a GET (BINGET, LONG_BINGET) opcode string, with argument i.
def get(self, i, pack=struct.pack):
if self.bin:
if i < 256:
return BINGET + chr(i)
else:
return LONG_BINGET + pack("<i", i)
return GET + repr(i) + '\n'
def save(self, obj):
# Check for persistent id (defined by a subclass)
pid = self.persistent_id(obj)
if pid:
self.save_pers(pid)
return
# Check the memo
x = self.memo.get(id(obj))
if x:
self.write(self.get(x[0]))
return
# Check the type dispatch table
t = type(obj)
f = self.dispatch.get(t)
if f:
f(self, obj) # Call unbound method with explicit self
return
# Check for a class with a custom metaclass; treat as regular class
try:
issc = issubclass(t, TypeType)
except TypeError: # t is not a class (old Boost; see SF #502085)
issc = 0
if issc:
self.save_global(obj)
return
# Check copy_reg.dispatch_table
reduce = dispatch_table.get(t)
if reduce:
rv = reduce(obj)
else:
# Check for a __reduce_ex__ method, fall back to __reduce__
reduce = getattr(obj, "__reduce_ex__", None)
if reduce:
rv = reduce(self.proto)
else:
reduce = getattr(obj, "__reduce__", None)
if reduce:
rv = reduce()
else:
raise PicklingError("Can't pickle %r object: %r" %
(t.__name__, obj))
# Check for string returned by reduce(), meaning "save as global"
if type(rv) is StringType:
self.save_global(obj, rv)
return
# Assert that reduce() returned a tuple
if type(rv) is not TupleType:
raise PicklingError("%s must return string or tuple" % reduce)
# Assert that it returned an appropriately sized tuple
l = len(rv)
if not (2 <= l <= 5):
raise PicklingError("Tuple returned by %s must have "
"two to five elements" % reduce)
# Save the reduce() output and finally memoize the object
self.save_reduce(obj=obj, *rv)
def persistent_id(self, obj):
# This exists so a subclass can override it
return None
def save_pers(self, pid):
# Save a persistent id reference
if self.bin:
self.save(pid)
self.write(BINPERSID)
else:
self.write(PERSID + str(pid) + '\n')
def save_reduce(self, func, args, state=None,
listitems=None, dictitems=None, obj=None):
# This API is called by some subclasses
# Assert that args is a tuple or None
if not isinstance(args, TupleType):
raise PicklingError("args from reduce() should be a tuple")
# Assert that func is callable
if not hasattr(func, '__call__'):
raise PicklingError("func from reduce should be callable")
save = self.save
write = self.write
# Protocol 2 special case: if func's name is __newobj__, use NEWOBJ
if self.proto >= 2 and getattr(func, "__name__", "") == "__newobj__":
# A __reduce__ implementation can direct protocol 2 to
# use the more efficient NEWOBJ opcode, while still
# allowing protocol 0 and 1 to work normally. For this to
# work, the function returned by __reduce__ should be
# called __newobj__, and its first argument should be a
# new-style class. The implementation for __newobj__
# should be as follows, although pickle has no way to
# verify this:
#
# def __newobj__(cls, *args):
# return cls.__new__(cls, *args)
#
# Protocols 0 and 1 will pickle a reference to __newobj__,
# while protocol 2 (and above) will pickle a reference to
# cls, the remaining args tuple, and the NEWOBJ code,
# which calls cls.__new__(cls, *args) at unpickling time
# (see load_newobj below). If __reduce__ returns a
# three-tuple, the state from the third tuple item will be
# pickled regardless of the protocol, calling __setstate__
# at unpickling time (see load_build below).
#
# Note that no standard __newobj__ implementation exists;
# you have to provide your own. This is to enforce
# compatibility with Python 2.2 (pickles written using
# protocol 0 or 1 in Python 2.3 should be unpicklable by
# Python 2.2).
cls = args[0]
if not hasattr(cls, "__new__"):
raise PicklingError(
"args[0] from __newobj__ args has no __new__")
if obj is not None and cls is not obj.__class__:
raise PicklingError(
"args[0] from __newobj__ args has the wrong class")
args = args[1:]
save(cls)
save(args)
write(NEWOBJ)
else:
save(func)
save(args)
write(REDUCE)
if obj is not None:
self.memoize(obj)
# More new special cases (that work with older protocols as
# well): when __reduce__ returns a tuple with 4 or 5 items,
# the 4th and 5th item should be iterators that provide list
# items and dict items (as (key, value) tuples), or None.
if listitems is not None:
self._batch_appends(listitems)
if dictitems is not None:
self._batch_setitems(dictitems)
if state is not None:
save(state)
write(BUILD)
# Methods below this point are dispatched through the dispatch table
dispatch = {}
def save_none(self, obj):
self.write(NONE)
dispatch[NoneType] = save_none
def save_bool(self, obj):
if self.proto >= 2:
self.write(obj and NEWTRUE or NEWFALSE)
else:
self.write(obj and TRUE or FALSE)
dispatch[bool] = save_bool
def save_int(self, obj, pack=struct.pack):
if self.bin:
# If the int is small enough to fit in a signed 4-byte 2's-comp
# format, we can store it more efficiently than the general
# case.
# First one- and two-byte unsigned ints:
if obj >= 0:
if obj <= 0xff:
self.write(BININT1 + chr(obj))
return
if obj <= 0xffff:
self.write("%c%c%c" % (BININT2, obj&0xff, obj>>8))
return
# Next check for 4-byte signed ints:
high_bits = obj >> 31 # note that Python shift sign-extends
if high_bits == 0 or high_bits == -1:
# All high bits are copies of bit 2**31, so the value
# fits in a 4-byte signed int.
self.write(BININT + pack("<i", obj))
return
# Text pickle, or int too big to fit in signed 4-byte format.
self.write(INT + repr(obj) + '\n')
dispatch[IntType] = save_int
def save_long(self, obj, pack=struct.pack):
if self.proto >= 2:
bytes = encode_long(obj)
n = len(bytes)
if n < 256:
self.write(LONG1 + chr(n) + bytes)
else:
self.write(LONG4 + pack("<i", n) + bytes)
return
self.write(LONG + repr(obj) + '\n')
dispatch[LongType] = save_long
def save_float(self, obj, pack=struct.pack):
if self.bin:
self.write(BINFLOAT + pack('>d', obj))
else:
self.write(FLOAT + repr(obj) + '\n')
dispatch[FloatType] = save_float
def save_string(self, obj, pack=struct.pack):
if self.bin:
n = len(obj)
if n < 256:
self.write(SHORT_BINSTRING + chr(n) + obj)
else:
self.write(BINSTRING + pack("<i", n) + obj)
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_unicode(self, obj, pack=struct.pack):
if self.bin:
encoding = obj.encode('utf-8')
n = len(encoding)
self.write(BINUNICODE + pack("<i", n) + encoding)
else:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
self.write(UNICODE + obj.encode('raw-unicode-escape') + '\n')
self.memoize(obj)
dispatch[UnicodeType] = save_unicode
if StringType is UnicodeType:
# This is true for Jython
def save_string(self, obj, pack=struct.pack):
unicode = obj.isunicode()
if self.bin:
if unicode:
obj = obj.encode("utf-8")
l = len(obj)
if l < 256 and not unicode:
self.write(SHORT_BINSTRING + chr(l) + obj)
else:
s = pack("<i", l)
if unicode:
self.write(BINUNICODE + s + obj)
else:
self.write(BINSTRING + s + obj)
else:
if unicode:
obj = obj.replace("\\", "\\u005c")
obj = obj.replace("\n", "\\u000a")
obj = obj.encode('raw-unicode-escape')
self.write(UNICODE + obj + '\n')
else:
self.write(STRING + repr(obj) + '\n')
self.memoize(obj)
dispatch[StringType] = save_string
def save_tuple(self, obj):
write = self.write
proto = self.proto
n = len(obj)
if n == 0:
if proto:
write(EMPTY_TUPLE)
else:
write(MARK + TUPLE)
return
save = self.save
memo = self.memo
if n <= 3 and proto >= 2:
for element in obj:
save(element)
# Subtle. Same as in the big comment below.
if id(obj) in memo:
get = self.get(memo[id(obj)][0])
write(POP * n + get)
else:
write(_tuplesize2code[n])
self.memoize(obj)
return
# proto 0 or proto 1 and tuple isn't empty, or proto > 1 and tuple
# has more than 3 elements.
write(MARK)
for element in obj:
save(element)
if id(obj) in memo:
# Subtle. d was not in memo when we entered save_tuple(), so
# the process of saving the tuple's elements must have saved
# the tuple itself: the tuple is recursive. The proper action
# now is to throw away everything we put on the stack, and
# simply GET the tuple (it's already constructed). This check
# could have been done in the "for element" loop instead, but
# recursive tuples are a rare thing.
get = self.get(memo[id(obj)][0])
if proto:
write(POP_MARK + get)
else: # proto 0 -- POP_MARK not available
write(POP * (n+1) + get)
return
# No recursion.
self.write(TUPLE)
self.memoize(obj)
dispatch[TupleType] = save_tuple
# save_empty_tuple() isn't used by anything in Python 2.3. However, I
# found a Pickler subclass in Zope3 that calls it, so it's not harmless
# to remove it.
def save_empty_tuple(self, obj):
self.write(EMPTY_TUPLE)
def save_list(self, obj):
write = self.write
if self.bin:
write(EMPTY_LIST)
else: # proto 0 -- can't use EMPTY_LIST
write(MARK + LIST)
self.memoize(obj)
self._batch_appends(iter(obj))
dispatch[ListType] = save_list
# Keep in synch with cPickle's BATCHSIZE. Nothing will break if it gets
# out of synch, though.
_BATCHSIZE = 1000
def _batch_appends(self, items):
# Helper to batch up APPENDS sequences
save = self.save
write = self.write
if not self.bin:
for x in items:
save(x)
write(APPEND)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
x = items.next()
tmp.append(x)
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for x in tmp:
save(x)
write(APPENDS)
elif n:
save(tmp[0])
write(APPEND)
# else tmp is empty, and we're done
def save_dict(self, obj):
write = self.write
if self.bin:
write(EMPTY_DICT)
else: # proto 0 -- can't use EMPTY_DICT
write(MARK + DICT)
self.memoize(obj)
self._batch_setitems(obj.iteritems())
dispatch[DictionaryType] = save_dict
if not PyStringMap is None:
dispatch[PyStringMap] = save_dict
def _batch_setitems(self, items):
# Helper to batch up SETITEMS sequences; proto >= 1 only
save = self.save
write = self.write
if not self.bin:
for k, v in items:
save(k)
save(v)
write(SETITEM)
return
r = xrange(self._BATCHSIZE)
while items is not None:
tmp = []
for i in r:
try:
tmp.append(items.next())
except StopIteration:
items = None
break
n = len(tmp)
if n > 1:
write(MARK)
for k, v in tmp:
save(k)
save(v)
write(SETITEMS)
elif n:
k, v = tmp[0]
save(k)
save(v)
write(SETITEM)
# else tmp is empty, and we're done
def save_inst(self, obj):
cls = obj.__class__
memo = self.memo
write = self.write
save = self.save
if hasattr(obj, '__getinitargs__'):
args = obj.__getinitargs__()
len(args) # XXX Assert it's a sequence
_keep_alive(args, memo)
else:
args = ()
write(MARK)
if self.bin:
save(cls)
for arg in args:
save(arg)
write(OBJ)
else:
for arg in args:
save(arg)
write(INST + cls.__module__ + '\n' + cls.__name__ + '\n')
self.memoize(obj)
try:
getstate = obj.__getstate__
except AttributeError:
stuff = obj.__dict__
else:
stuff = getstate()
_keep_alive(stuff, memo)
save(stuff)
write(BUILD)
dispatch[InstanceType] = save_inst
def save_global(self, obj, name=None, pack=struct.pack):
write = self.write
memo = self.memo
if name is None:
name = obj.__name__
module = getattr(obj, "__module__", None)
if module is None:
module = whichmodule(obj, name)
try:
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
except (ImportError, KeyError, AttributeError):
raise PicklingError(
"Can't pickle %r: it's not found as %s.%s" %
(obj, module, name))
else:
if klass is not obj:
raise PicklingError(
"Can't pickle %r: it's not the same object as %s.%s" %
(obj, module, name))
if self.proto >= 2:
code = _extension_registry.get((module, name))
if code:
assert code > 0
if code <= 0xff:
write(EXT1 + chr(code))
elif code <= 0xffff:
write("%c%c%c" % (EXT2, code&0xff, code>>8))
else:
write(EXT4 + pack("<i", code))
return
write(GLOBAL + module + '\n' + name + '\n')
self.memoize(obj)
dispatch[ClassType] = save_global
dispatch[FunctionType] = save_global
dispatch[BuiltinFunctionType] = save_global
dispatch[TypeType] = save_global
# Pickling helpers
def _keep_alive(x, memo):
"""Keeps a reference to the object x in the memo.
Because we remember objects by their id, we have
to assure that possibly temporary objects are kept
alive by referencing them.
We store a reference at the id of the memo, which should
normally not be used unless someone tries to deepcopy
the memo itself...
"""
try:
memo[id(memo)].append(x)
except KeyError:
# aha, this is the first one :-)
memo[id(memo)]=[x]
# A cache for whichmodule(), mapping a function object to the name of
# the module in which the function was found.
classmap = {} # called classmap for backwards compatibility
def whichmodule(func, funcname):
"""Figure out the module in which a function occurs.
Search sys.modules for the module.
Cache in classmap.
Return a module name.
If the function cannot be found, return "__main__".
"""
# Python functions should always get an __module__ from their globals.
mod = getattr(func, "__module__", None)
if mod is not None:
return mod
if func in classmap:
return classmap[func]
for name, module in sys.modules.items():
if module is None:
continue # skip dummy package entries
if name != '__main__' and getattr(module, funcname, None) is func:
break
else:
name = '__main__'
classmap[func] = name
return name
# Unpickling machinery
class Unpickler:
def __init__(self, file):
"""This takes a file-like object for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so no
proto argument is needed.
The file-like object must have two methods, a read() method that
takes an integer argument, and a readline() method that requires no
arguments. Both methods should return a string. Thus file-like
object can be a file object opened for reading, a StringIO object,
or any other custom object that meets this interface.
"""
self.readline = file.readline
self.read = file.read
self.memo = {}
def load(self):
"""Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.
"""
self.mark = object() # any new unique object
self.stack = []
self.append = self.stack.append
read = self.read
dispatch = self.dispatch
try:
while 1:
key = read(1)
dispatch[key](self)
except _Stop, stopinst:
return stopinst.value
# Return largest index k such that self.stack[k] is self.mark.
# If the stack doesn't contain a mark, eventually raises IndexError.
# This could be sped by maintaining another stack, of indices at which
# the mark appears. For that matter, the latter stack would suffice,
# and we wouldn't need to push mark objects on self.stack at all.
# Doing so is probably a good thing, though, since if the pickle is
# corrupt (or hostile) we may get a clue from finding self.mark embedded
# in unpickled objects.
def marker(self):
stack = self.stack
mark = self.mark
k = len(stack)-1
while stack[k] is not mark: k = k-1
return k
dispatch = {}
def load_eof(self):
raise EOFError
dispatch[''] = load_eof
def load_proto(self):
proto = ord(self.read(1))
if not 0 <= proto <= 2:
raise ValueError, "unsupported pickle protocol: %d" % proto
dispatch[PROTO] = load_proto
def load_persid(self):
pid = self.readline()[:-1]
self.append(self.persistent_load(pid))
dispatch[PERSID] = load_persid
def load_binpersid(self):
pid = self.stack.pop()
self.append(self.persistent_load(pid))
dispatch[BINPERSID] = load_binpersid
def load_none(self):
self.append(None)
dispatch[NONE] = load_none
def load_false(self):
self.append(False)
dispatch[NEWFALSE] = load_false
def load_true(self):
self.append(True)
dispatch[NEWTRUE] = load_true
def load_int(self):
data = self.readline()
if data == FALSE[1:]:
val = False
elif data == TRUE[1:]:
val = True
else:
try:
val = int(data)
except ValueError:
val = long(data)
self.append(val)
dispatch[INT] = load_int
def load_binint(self):
self.append(mloads('i' + self.read(4)))
dispatch[BININT] = load_binint
def load_binint1(self):
self.append(ord(self.read(1)))
dispatch[BININT1] = load_binint1
def load_binint2(self):
self.append(mloads('i' + self.read(2) + '\000\000'))
dispatch[BININT2] = load_binint2
def load_long(self):
self.append(long(self.readline()[:-1], 0))
dispatch[LONG] = load_long
def load_long1(self):
n = ord(self.read(1))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG1] = load_long1
def load_long4(self):
n = mloads('i' + self.read(4))
bytes = self.read(n)
self.append(decode_long(bytes))
dispatch[LONG4] = load_long4
def load_float(self):
self.append(float(self.readline()[:-1]))
dispatch[FLOAT] = load_float
def load_binfloat(self, unpack=struct.unpack):
self.append(unpack('>d', self.read(8))[0])
dispatch[BINFLOAT] = load_binfloat
def load_string(self):
rep = self.readline()[:-1]
for q in "\"'": # double or single quote
if rep.startswith(q):
if not rep.endswith(q):
raise ValueError, "insecure string pickle"
rep = rep[len(q):-len(q)]
break
else:
raise ValueError, "insecure string pickle"
self.append(rep.decode("string-escape"))
dispatch[STRING] = load_string
def load_binstring(self):
len = mloads('i' + self.read(4))
self.append(self.read(len))
dispatch[BINSTRING] = load_binstring
def load_unicode(self):
self.append(unicode(self.readline()[:-1],'raw-unicode-escape'))
dispatch[UNICODE] = load_unicode
def load_binunicode(self):
len = mloads('i' + self.read(4))
self.append(unicode(self.read(len),'utf-8'))
dispatch[BINUNICODE] = load_binunicode
def load_short_binstring(self):
len = ord(self.read(1))
self.append(self.read(len))
dispatch[SHORT_BINSTRING] = load_short_binstring
def load_tuple(self):
k = self.marker()
self.stack[k:] = [tuple(self.stack[k+1:])]
dispatch[TUPLE] = load_tuple
def load_empty_tuple(self):
self.stack.append(())
dispatch[EMPTY_TUPLE] = load_empty_tuple
def load_tuple1(self):
self.stack[-1] = (self.stack[-1],)
dispatch[TUPLE1] = load_tuple1
def load_tuple2(self):
self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
dispatch[TUPLE2] = load_tuple2
def load_tuple3(self):
self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
dispatch[TUPLE3] = load_tuple3
def load_empty_list(self):
self.stack.append([])
dispatch[EMPTY_LIST] = load_empty_list
def load_empty_dictionary(self):
self.stack.append({})
dispatch[EMPTY_DICT] = load_empty_dictionary
def load_list(self):
k = self.marker()
self.stack[k:] = [self.stack[k+1:]]
dispatch[LIST] = load_list
def load_dict(self):
k = self.marker()
d = {}
items = self.stack[k+1:]
for i in range(0, len(items), 2):
key = items[i]
value = items[i+1]
d[key] = value
self.stack[k:] = [d]
dispatch[DICT] = load_dict
# INST and OBJ differ only in how they get a class object. It's not
# only sensible to do the rest in a common routine, the two routines
# previously diverged and grew different bugs.
# klass is the class to instantiate, and k points to the topmost mark
# object, following which are the arguments for klass.__init__.
def _instantiate(self, klass, k):
args = tuple(self.stack[k+1:])
del self.stack[k:]
instantiated = 0
if (not args and
type(klass) is ClassType and
not hasattr(klass, "__getinitargs__")):
try:
value = _EmptyClass()
value.__class__ = klass
instantiated = 1
except RuntimeError:
# In restricted execution, assignment to inst.__class__ is
# prohibited
pass
if not instantiated:
try:
value = klass(*args)
except TypeError, err:
raise TypeError, "in constructor for %s: %s" % (
klass.__name__, str(err)), sys.exc_info()[2]
self.append(value)
def load_inst(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self._instantiate(klass, self.marker())
dispatch[INST] = load_inst
def load_obj(self):
# Stack is ... markobject classobject arg1 arg2 ...
k = self.marker()
klass = self.stack.pop(k+1)
self._instantiate(klass, k)
dispatch[OBJ] = load_obj
def load_newobj(self):
args = self.stack.pop()
cls = self.stack[-1]
obj = cls.__new__(cls, *args)
self.stack[-1] = obj
dispatch[NEWOBJ] = load_newobj
def load_global(self):
module = self.readline()[:-1]
name = self.readline()[:-1]
klass = self.find_class(module, name)
self.append(klass)
dispatch[GLOBAL] = load_global
def load_ext1(self):
code = ord(self.read(1))
self.get_extension(code)
dispatch[EXT1] = load_ext1
def load_ext2(self):
code = mloads('i' + self.read(2) + '\000\000')
self.get_extension(code)
dispatch[EXT2] = load_ext2
def load_ext4(self):
code = mloads('i' + self.read(4))
self.get_extension(code)
dispatch[EXT4] = load_ext4
def get_extension(self, code):
nil = []
obj = _extension_cache.get(code, nil)
if obj is not nil:
self.append(obj)
return
key = _inverted_registry.get(code)
if not key:
raise ValueError("unregistered extension code %d" % code)
obj = self.find_class(*key)
_extension_cache[code] = obj
self.append(obj)
def find_class(self, module, name):
# Subclasses may override this
__import__(module)
mod = sys.modules[module]
klass = getattr(mod, name)
return klass
def load_reduce(self):
stack = self.stack
args = stack.pop()
func = stack[-1]
value = func(*args)
stack[-1] = value
dispatch[REDUCE] = load_reduce
def load_pop(self):
del self.stack[-1]
dispatch[POP] = load_pop
def load_pop_mark(self):
k = self.marker()
del self.stack[k:]
dispatch[POP_MARK] = load_pop_mark
def load_dup(self):
self.append(self.stack[-1])
dispatch[DUP] = load_dup
def load_get(self):
self.append(self.memo[self.readline()[:-1]])
dispatch[GET] = load_get
def load_binget(self):
i = ord(self.read(1))
self.append(self.memo[repr(i)])
dispatch[BINGET] = load_binget
def load_long_binget(self):
i = mloads('i' + self.read(4))
self.append(self.memo[repr(i)])
dispatch[LONG_BINGET] = load_long_binget
def load_put(self):
self.memo[self.readline()[:-1]] = self.stack[-1]
dispatch[PUT] = load_put
def load_binput(self):
i = ord(self.read(1))
self.memo[repr(i)] = self.stack[-1]
dispatch[BINPUT] = load_binput
def load_long_binput(self):
i = mloads('i' + self.read(4))
self.memo[repr(i)] = self.stack[-1]
dispatch[LONG_BINPUT] = load_long_binput
def load_append(self):
stack = self.stack
value = stack.pop()
list = stack[-1]
list.append(value)
dispatch[APPEND] = load_append
def load_appends(self):
stack = self.stack
mark = self.marker()
list = stack[mark - 1]
list.extend(stack[mark + 1:])
del stack[mark:]
dispatch[APPENDS] = load_appends
def load_setitem(self):
stack = self.stack
value = stack.pop()
key = stack.pop()
dict = stack[-1]
dict[key] = value
dispatch[SETITEM] = load_setitem
def load_setitems(self):
stack = self.stack
mark = self.marker()
dict = stack[mark - 1]
for i in range(mark + 1, len(stack), 2):
dict[stack[i]] = stack[i + 1]
del stack[mark:]
dispatch[SETITEMS] = load_setitems
def load_build(self):
stack = self.stack
state = stack.pop()
inst = stack[-1]
setstate = getattr(inst, "__setstate__", None)
if setstate:
setstate(state)
return
slotstate = None
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if state:
try:
d = inst.__dict__
try:
for k, v in state.iteritems():
d[intern(k)] = v
# keys in state don't have to be strings
# don't blow up, but don't go out of our way
except TypeError:
d.update(state)
except RuntimeError:
# XXX In restricted execution, the instance's __dict__
# is not accessible. Use the old way of unpickling
# the instance variables. This is a semantic
# difference when unpickling in restricted
# vs. unrestricted modes.
# Note, however, that cPickle has never tried to do the
# .update() business, and always uses
# PyObject_SetItem(inst.__dict__, key, value) in a
# loop over state.items().
for k, v in state.items():
setattr(inst, k, v)
if slotstate:
for k, v in slotstate.items():
setattr(inst, k, v)
dispatch[BUILD] = load_build
def load_mark(self):
self.append(self.mark)
dispatch[MARK] = load_mark
def load_stop(self):
value = self.stack.pop()
raise _Stop(value)
dispatch[STOP] = load_stop
# Helper class for load_inst/load_obj
class _EmptyClass:
pass
# Encode/decode longs in linear time.
import binascii as _binascii
def encode_long(x):
r"""Encode a long to a two's complement little-endian binary string.
Note that 0L is a special case, returning an empty string, to save a
byte in the LONG1 pickling context.
# bug 24549
#>>> encode_long(0L)
#''
#>>> encode_long(255L)
#'\xff\x00'
#>>> encode_long(32767L)
#'\xff\x7f'
#>>> encode_long(-256L)
#'\x00\xff'
#>>> encode_long(-32768L)
#'\x00\x80'
#>>> encode_long(-128L)
#'\x80'
#>>> encode_long(127L)
#'\x7f'
#>>>
"""
if x == 0:
return ''
if x > 0:
ashex = hex(x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# need an even # of nibbles for unhexlify
ashex = "0x0" + ashex[2:]
elif int(ashex[2], 16) >= 8:
# "looks negative", so need a byte of sign bits
ashex = "0x00" + ashex[2:]
else:
# Build the 256's-complement: (1L << nbytes) + x. The trick is
# to find the number of bytes in linear time (although that should
# really be a constant-time task).
ashex = hex(-x)
assert ashex.startswith("0x")
njunkchars = 2 + ashex.endswith('L')
nibbles = len(ashex) - njunkchars
if nibbles & 1:
# Extend to a full byte.
nibbles += 1
nbits = nibbles * 4
x += 1L << nbits
assert x > 0
ashex = hex(x)
njunkchars = 2 + ashex.endswith('L')
newnibbles = len(ashex) - njunkchars
if newnibbles < nibbles:
ashex = "0x" + "0" * (nibbles - newnibbles) + ashex[2:]
if int(ashex[2], 16) < 8:
# "looks positive", so need a byte of sign bits
ashex = "0xff" + ashex[2:]
if ashex.endswith('L'):
ashex = ashex[2:-1]
else:
ashex = ashex[2:]
assert len(ashex) & 1 == 0, (x, ashex)
binary = _binascii.unhexlify(ashex)
return binary[::-1]
def decode_long(data):
r"""Decode a long from a two's complement little-endian binary string.
>>> decode_long('')
0L
>>> decode_long("\xff\x00")
255L
>>> decode_long("\xff\x7f")
32767L
>>> decode_long("\x00\xff")
-256L
>>> decode_long("\x00\x80")
-32768L
>>> decode_long("\x80")
-128L
>>> decode_long("\x7f")
127L
"""
nbytes = len(data)
if nbytes == 0:
return 0L
ashex = _binascii.hexlify(data[::-1])
n = long(ashex, 16) # quadratic time before Python 2.3; linear now
if data[-1] >= '\x80':
n -= 1L << (nbytes * 8)
return n
# Shorthands
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
def dump(obj, file, protocol=None):
Pickler(file, protocol).dump(obj)
def dumps(obj, protocol=None):
file = StringIO()
Pickler(file, protocol).dump(obj)
return file.getvalue()
def load(file):
return Unpickler(file).load()
def loads(str):
file = StringIO(str)
return Unpickler(file).load()
# Doctest
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
| gpl-3.0 |
jjmleiro/hue | desktop/core/ext-py/cffi-1.5.2/demo/pyobj.py | 13 | 3399 |
referents = [] # list "object descriptor -> python object"
freelist = None
def store(x):
"Store the object 'x' and returns a new object descriptor for it."
global freelist
p = freelist
if p is None:
p = len(referents)
referents.append(x)
else:
freelist = referents[p]
referents[p] = x
return p
def discard(p):
"""Discard (i.e. close) the object descriptor 'p'.
Return the original object that was attached to 'p'."""
global freelist
x = referents[p]
referents[p] = freelist
freelist = p
return x
class Ref(object):
"""For use in 'with Ref(x) as ob': open an object descriptor
and returns it in 'ob', and close it automatically when the
'with' statement finishes."""
def __init__(self, x):
self.x = x
def __enter__(self):
self.p = p = store(self.x)
return p
def __exit__(self, *args):
discard(self.p)
def count_pyobj_alive():
result = len(referents)
p = freelist
while p is not None:
assert result > 0
result -= 1
p = referents[p]
return result
# ------------------------------------------------------------
if __name__ == '__main__':
import api
ffi = api.PythonFFI()
ffi.cdef("""
typedef int pyobj_t;
int sum_integers(pyobj_t p_list);
pyobj_t sum_objects(pyobj_t p_list, pyobj_t p_initial);
""")
@ffi.pyexport("int(pyobj_t)")
def length(p_list):
list = referents[p_list]
return len(list)
@ffi.pyexport("int(pyobj_t, int)")
def getitem(p_list, index):
list = referents[p_list]
return list[index]
@ffi.pyexport("pyobj_t(pyobj_t)")
def pyobj_dup(p):
return store(referents[p])
@ffi.pyexport("void(pyobj_t)")
def pyobj_close(p):
discard(p)
@ffi.pyexport("pyobj_t(pyobj_t, int)")
def pyobj_getitem(p_list, index):
list = referents[p_list]
return store(list[index])
@ffi.pyexport("pyobj_t(pyobj_t, pyobj_t)")
def pyobj_add(p1, p2):
return store(referents[p1] + referents[p2])
lib = ffi.verify("""
typedef int pyobj_t; /* an "object descriptor" number */
int sum_integers(pyobj_t p_list) {
/* this a demo function written in C, using the API
defined above: length() and getitem(). */
int i, result = 0;
int count = length(p_list);
for (i=0; i<count; i++) {
int n = getitem(p_list, i);
result += n;
}
return result;
}
pyobj_t sum_objects(pyobj_t p_list, pyobj_t p_initial) {
/* same as above, but keeps all additions as Python objects */
int i;
int count = length(p_list);
pyobj_t p1 = pyobj_dup(p_initial);
for (i=0; i<count; i++) {
pyobj_t p2 = pyobj_getitem(p_list, i);
pyobj_t p3 = pyobj_add(p1, p2);
pyobj_close(p2);
pyobj_close(p1);
p1 = p3;
}
return p1;
}
""")
with Ref([10, 20, 30, 40]) as p_list:
print lib.sum_integers(p_list)
with Ref(5) as p_initial:
result = discard(lib.sum_objects(p_list, p_initial))
print result
assert count_pyobj_alive() == 0
| apache-2.0 |
ActiveState/code | recipes/Python/579037_How_execute_x86_64bit_assembly_code_directly/recipe-579037.py | 1 | 1537 | #!/usr/bin/env python
import subprocess, os, tempfile
from ctypes import *
PAGE_SIZE = 4096
class AssemblerFunction(object):
def __init__(self, code, ret_type, *arg_types):
# Run Nasm
fd, source = tempfile.mkstemp(".S", "assembly", os.getcwd())
os.write(fd, code)
os.close(fd)
target = os.path.splitext(source)[0]
subprocess.check_call(["nasm",source])
os.unlink(source)
binary = file(target,"rb").read()
os.unlink(target)
bin_len = len(binary)
# align our code on page boundary.
self.code_buffer = create_string_buffer(PAGE_SIZE*2+bin_len)
addr = (addressof(self.code_buffer) + PAGE_SIZE) & (~(PAGE_SIZE-1))
memmove(addr, binary, bin_len)
# Change memory protection
self.mprotect = cdll.LoadLibrary("libc.so.6").mprotect
mp_ret = self.mprotect(addr, bin_len, 4) # execute only.
if mp_ret: raise OSError("Unable to change memory protection")
self.func = CFUNCTYPE(ret_type, *arg_types)(addr)
self.addr = addr
self.bin_len = bin_len
def __call__(self, *args):
return self.func(*args)
def __del__(self):
# Revert memory protection
if hasattr(self,"mprotect"):
self.mprotect(self.addr, self.bin_len, 3)
if __name__ == "__main__":
add_func = """ BITS 64
mov rax, rdi ; Move the first parameter
add rax, rsi ; add the second parameter
ret ; rax will be returned
"""
Add = AssemblerFunction(add_func, c_int, c_int, c_int)
print Add(1, 2)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.