repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
a-doumoulakis/tensorflow | tensorflow/contrib/slim/python/slim/data/dataset.py | 163 | 2444 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of a Dataset.
A Dataset is a collection of several components: (1) a list of data sources
(2) a Reader class that can read those sources and returns possibly encoded
samples of data (3) a decoder that decodes each sample of data provided by the
reader (4) the total number of samples and (5) an optional dictionary mapping
the list of items returns to a description of those items.
Data can be loaded from a dataset specification using a dataset_data_provider:
dataset = CreateMyDataset(...)
provider = dataset_data_provider.DatasetDataProvider(
dataset, shuffle=False)
image, label = provider.get(['image', 'label'])
See slim.data.dataset_data_provider for additional examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Dataset(object):
"""Represents a Dataset specification."""
def __init__(self, data_sources, reader, decoder, num_samples,
items_to_descriptions, **kwargs):
"""Initializes the dataset.
Args:
data_sources: A list of files that make up the dataset.
reader: The reader class, a subclass of BaseReader such as TextLineReader
or TFRecordReader.
decoder: An instance of a data_decoder.
num_samples: The number of samples in the dataset.
items_to_descriptions: A map from the items that the dataset provides to
the descriptions of those items.
**kwargs: Any remaining dataset-specific fields.
"""
kwargs['data_sources'] = data_sources
kwargs['reader'] = reader
kwargs['decoder'] = decoder
kwargs['num_samples'] = num_samples
kwargs['items_to_descriptions'] = items_to_descriptions
self.__dict__.update(kwargs)
| apache-2.0 |
jianglu/mojo | tools/clang/scripts/run_tool.py | 34 | 11858 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Wrapper script to help run clang tools across Chromium code.
How to use this tool:
If you want to run the tool across all Chromium code:
run_tool.py <tool> <path/to/compiledb>
If you want to include all files mentioned in the compilation database:
run_tool.py <tool> <path/to/compiledb> --all
If you only want to run the tool across just chrome/browser and content/browser:
run_tool.py <tool> <path/to/compiledb> chrome/browser content/browser
Please see https://code.google.com/p/chromium/wiki/ClangToolRefactoring for more
information, which documents the entire automated refactoring flow in Chromium.
Why use this tool:
The clang tool implementation doesn't take advantage of multiple cores, and if
it fails mysteriously in the middle, all the generated replacements will be
lost.
Unfortunately, if the work is simply sharded across multiple cores by running
multiple RefactoringTools, problems arise when they attempt to rewrite a file at
the same time. To work around that, clang tools that are run using this tool
should output edits to stdout in the following format:
==== BEGIN EDITS ====
r:<file path>:<offset>:<length>:<replacement text>
r:<file path>:<offset>:<length>:<replacement text>
...etc...
==== END EDITS ====
Any generated edits are applied once the clang tool has finished running
across Chromium, regardless of whether some instances failed or not.
"""
import collections
import functools
import json
import multiprocessing
import os.path
import pipes
import subprocess
import sys
Edit = collections.namedtuple(
'Edit', ('edit_type', 'offset', 'length', 'replacement'))
def _GetFilesFromGit(paths = None):
"""Gets the list of files in the git repository.
Args:
paths: Prefix filter for the returned paths. May contain multiple entries.
"""
args = []
if sys.platform == 'win32':
args.append('git.bat')
else:
args.append('git')
args.append('ls-files')
if paths:
args.extend(paths)
command = subprocess.Popen(args, stdout=subprocess.PIPE)
output, _ = command.communicate()
return [os.path.realpath(p) for p in output.splitlines()]
def _GetFilesFromCompileDB(build_directory):
""" Gets the list of files mentioned in the compilation database.
Args:
build_directory: Directory that contains the compile database.
"""
compiledb_path = os.path.join(build_directory, 'compile_commands.json')
with open(compiledb_path, 'rb') as compiledb_file:
json_commands = json.load(compiledb_file)
return [os.path.join(entry['directory'], entry['file'])
for entry in json_commands]
def _ExtractEditsFromStdout(build_directory, stdout):
"""Extracts generated list of edits from the tool's stdout.
The expected format is documented at the top of this file.
Args:
build_directory: Directory that contains the compile database. Used to
normalize the filenames.
stdout: The stdout from running the clang tool.
Returns:
A dictionary mapping filenames to the associated edits.
"""
lines = stdout.splitlines()
start_index = lines.index('==== BEGIN EDITS ====')
end_index = lines.index('==== END EDITS ====')
edits = collections.defaultdict(list)
for line in lines[start_index + 1:end_index]:
try:
edit_type, path, offset, length, replacement = line.split(':::', 4)
replacement = replacement.replace("\0", "\n");
# Normalize the file path emitted by the clang tool.
path = os.path.realpath(os.path.join(build_directory, path))
edits[path].append(Edit(edit_type, int(offset), int(length), replacement))
except ValueError:
print 'Unable to parse edit: %s' % line
return edits
def _ExecuteTool(toolname, build_directory, filename):
"""Executes the tool.
This is defined outside the class so it can be pickled for the multiprocessing
module.
Args:
toolname: Path to the tool to execute.
build_directory: Directory that contains the compile database.
filename: The file to run the tool over.
Returns:
A dictionary that must contain the key "status" and a boolean value
associated with it.
If status is True, then the generated edits are stored with the key "edits"
in the dictionary.
Otherwise, the filename and the output from stderr are associated with the
keys "filename" and "stderr" respectively.
"""
command = subprocess.Popen((toolname, '-p', build_directory, filename),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = command.communicate()
if command.returncode != 0:
return {'status': False, 'filename': filename, 'stderr': stderr}
else:
return {'status': True,
'edits': _ExtractEditsFromStdout(build_directory, stdout)}
class _CompilerDispatcher(object):
"""Multiprocessing controller for running clang tools in parallel."""
def __init__(self, toolname, build_directory, filenames):
"""Initializer method.
Args:
toolname: Path to the tool to execute.
build_directory: Directory that contains the compile database.
filenames: The files to run the tool over.
"""
self.__toolname = toolname
self.__build_directory = build_directory
self.__filenames = filenames
self.__success_count = 0
self.__failed_count = 0
self.__edit_count = 0
self.__edits = collections.defaultdict(list)
@property
def edits(self):
return self.__edits
@property
def failed_count(self):
return self.__failed_count
def Run(self):
"""Does the grunt work."""
pool = multiprocessing.Pool()
result_iterator = pool.imap_unordered(
functools.partial(_ExecuteTool, self.__toolname,
self.__build_directory),
self.__filenames)
for result in result_iterator:
self.__ProcessResult(result)
sys.stdout.write('\n')
sys.stdout.flush()
def __ProcessResult(self, result):
"""Handles result processing.
Args:
result: The result dictionary returned by _ExecuteTool.
"""
if result['status']:
self.__success_count += 1
for k, v in result['edits'].iteritems():
self.__edits[k].extend(v)
self.__edit_count += len(v)
else:
self.__failed_count += 1
sys.stdout.write('\nFailed to process %s\n' % result['filename'])
sys.stdout.write(result['stderr'])
sys.stdout.write('\n')
percentage = (
float(self.__success_count + self.__failed_count) /
len(self.__filenames)) * 100
sys.stdout.write('Succeeded: %d, Failed: %d, Edits: %d [%.2f%%]\r' % (
self.__success_count, self.__failed_count, self.__edit_count,
percentage))
sys.stdout.flush()
def _ApplyEdits(edits, clang_format_diff_path):
"""Apply the generated edits.
Args:
edits: A dict mapping filenames to Edit instances that apply to that file.
clang_format_diff_path: Path to the clang-format-diff.py helper to help
automatically reformat diffs to avoid style violations. Pass None if the
clang-format step should be skipped.
"""
edit_count = 0
for k, v in edits.iteritems():
# Sort the edits and iterate through them in reverse order. Sorting allows
# duplicate edits to be quickly skipped, while reversing means that
# subsequent edits don't need to have their offsets updated with each edit
# applied.
v.sort()
last_edit = None
with open(k, 'rb+') as f:
contents = bytearray(f.read())
for edit in reversed(v):
if edit == last_edit:
continue
last_edit = edit
contents[edit.offset:edit.offset + edit.length] = edit.replacement
if not edit.replacement:
_ExtendDeletionIfElementIsInList(contents, edit.offset)
edit_count += 1
f.seek(0)
f.truncate()
f.write(contents)
if clang_format_diff_path:
# TODO(dcheng): python3.3 exposes this publicly as shlex.quote, but Chrome
# uses python2.7. Use the deprecated interface until Chrome uses a newer
# Python.
if subprocess.call('git diff -U0 %s | python %s -i -p1 -style=file ' % (
pipes.quote(k), clang_format_diff_path), shell=True) != 0:
print 'clang-format failed for %s' % k
print 'Applied %d edits to %d files' % (edit_count, len(edits))
_WHITESPACE_BYTES = frozenset((ord('\t'), ord('\n'), ord('\r'), ord(' ')))
def _ExtendDeletionIfElementIsInList(contents, offset):
"""Extends the range of a deletion if the deleted element was part of a list.
This rewriter helper makes it easy for refactoring tools to remove elements
from a list. Even if a matcher callback knows that it is removing an element
from a list, it may not have enough information to accurately remove the list
element; for example, another matcher callback may end up removing an adjacent
list element, or all the list elements may end up being removed.
With this helper, refactoring tools can simply remove the list element and not
worry about having to include the comma in the replacement.
Args:
contents: A bytearray with the deletion already applied.
offset: The offset in the bytearray where the deleted range used to be.
"""
char_before = char_after = None
left_trim_count = 0
for byte in reversed(contents[:offset]):
left_trim_count += 1
if byte in _WHITESPACE_BYTES:
continue
if byte in (ord(','), ord(':'), ord('('), ord('{')):
char_before = chr(byte)
break
right_trim_count = 0
for byte in contents[offset:]:
right_trim_count += 1
if byte in _WHITESPACE_BYTES:
continue
if byte == ord(','):
char_after = chr(byte)
break
if char_before:
if char_after:
del contents[offset:offset + right_trim_count]
elif char_before in (',', ':'):
del contents[offset - left_trim_count:offset]
def main(argv):
if len(argv) < 2:
print 'Usage: run_tool.py <clang tool> <compile DB> <path 1> <path 2> ...'
print ' <clang tool> is the clang tool that should be run.'
print ' <compile db> is the directory that contains the compile database'
print ' <path 1> <path2> ... can be used to filter what files are edited'
return 1
clang_format_diff_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'../../../third_party/llvm/tools/clang/tools/clang-format',
'clang-format-diff.py')
# TODO(dcheng): Allow this to be controlled with a flag as well.
# TODO(dcheng): Shell escaping of args to git diff to clang-format is broken
# on Windows.
if not os.path.isfile(clang_format_diff_path) or sys.platform == 'win32':
clang_format_diff_path = None
if len(argv) == 3 and argv[2] == '--all':
filenames = set(_GetFilesFromCompileDB(argv[1]))
source_filenames = filenames
else:
filenames = set(_GetFilesFromGit(argv[2:]))
# Filter out files that aren't C/C++/Obj-C/Obj-C++.
extensions = frozenset(('.c', '.cc', '.m', '.mm'))
source_filenames = [f for f in filenames
if os.path.splitext(f)[1] in extensions]
dispatcher = _CompilerDispatcher(argv[0], argv[1], source_filenames)
dispatcher.Run()
# Filter out edits to files that aren't in the git repository, since it's not
# useful to modify files that aren't under source control--typically, these
# are generated files or files in a git submodule that's not part of Chromium.
_ApplyEdits({k : v for k, v in dispatcher.edits.iteritems()
if os.path.realpath(k) in filenames},
clang_format_diff_path)
if dispatcher.failed_count != 0:
return 2
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
DVegaCapital/zipline | zipline/utils/control_flow.py | 25 | 1295 | """
Control flow utilities.
"""
from warnings import (
catch_warnings,
filterwarnings,
)
class nullctx(object):
"""
Null context manager. Useful for conditionally adding a contextmanager in
a single line, e.g.:
with SomeContextManager() if some_expr else nullctx():
do_stuff()
"""
def __enter__(self):
return self
def __exit__(*args):
return False
class WarningContext(object):
"""
Re-entrant contextmanager for contextually managing warnings.
"""
def __init__(self, *warning_specs):
self._warning_specs = warning_specs
self._catchers = []
def __enter__(self):
catcher = catch_warnings()
catcher.__enter__()
self._catchers.append(catcher)
for args, kwargs in self._warning_specs:
filterwarnings(*args, **kwargs)
return catcher
def __exit__(self, *exc_info):
catcher = self._catchers.pop()
return catcher.__exit__(*exc_info)
def ignore_nanwarnings():
"""
Helper for building a WarningContext that ignores warnings from numpy's
nanfunctions.
"""
return WarningContext(
(
('ignore',),
{'category': RuntimeWarning, 'module': 'numpy.lib.nanfunctions'},
)
)
| apache-2.0 |
pdellaert/ansible | lib/ansible/modules/cloud/amazon/aws_batch_compute_environment.py | 9 | 16634 | #!/usr/bin/python
# Copyright (c) 2017 Jon Meran <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aws_batch_compute_environment
short_description: Manage AWS Batch Compute Environments
description:
- This module allows the management of AWS Batch Compute Environments.
It is idempotent and supports "Check" mode. Use module M(aws_batch_compute_environment) to manage the compute
environment, M(aws_batch_job_queue) to manage job queues, M(aws_batch_job_definition) to manage job definitions.
version_added: "2.5"
author: Jon Meran (@jonmer85)
options:
compute_environment_name:
description:
- The name for your compute environment. Up to 128 letters (uppercase and lowercase), numbers, and underscores
are allowed.
required: true
type:
description:
- The type of the compute environment.
required: true
choices: ["MANAGED", "UNMANAGED"]
state:
description:
- Describes the desired state.
default: "present"
choices: ["present", "absent"]
compute_environment_state:
description:
- The state of the compute environment. If the state is ENABLED, then the compute environment accepts jobs
from a queue and can scale out automatically based on queues.
default: "ENABLED"
choices: ["ENABLED", "DISABLED"]
service_role:
description:
- The full Amazon Resource Name (ARN) of the IAM role that allows AWS Batch to make calls to other AWS
services on your behalf.
required: true
compute_resource_type:
description:
- The type of compute resource.
required: true
choices: ["EC2", "SPOT"]
minv_cpus:
description:
- The minimum number of EC2 vCPUs that an environment should maintain.
required: true
maxv_cpus:
description:
- The maximum number of EC2 vCPUs that an environment can reach.
required: true
desiredv_cpus:
description:
- The desired number of EC2 vCPUS in the compute environment.
instance_types:
description:
- The instance types that may be launched.
required: true
image_id:
description:
- The Amazon Machine Image (AMI) ID used for instances launched in the compute environment.
subnets:
description:
- The VPC subnets into which the compute resources are launched.
required: true
security_group_ids:
description:
- The EC2 security groups that are associated with instances launched in the compute environment.
required: true
ec2_key_pair:
description:
- The EC2 key pair that is used for instances launched in the compute environment.
instance_role:
description:
- The Amazon ECS instance role applied to Amazon EC2 instances in a compute environment.
required: true
tags:
description:
- Key-value pair tags to be applied to resources that are launched in the compute environment.
bid_percentage:
description:
- The minimum percentage that a Spot Instance price must be when compared with the On-Demand price for that
instance type before instances are launched. For example, if your bid percentage is 20%, then the Spot price
must be below 20% of the current On-Demand price for that EC2 instance.
spot_iam_fleet_role:
description:
- The Amazon Resource Name (ARN) of the Amazon EC2 Spot Fleet IAM role applied to a SPOT compute environment.
requirements:
- boto3
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
---
- hosts: localhost
gather_facts: no
vars:
state: present
tasks:
- name: My Batch Compute Environment
aws_batch_compute_environment:
compute_environment_name: computeEnvironmentName
state: present
region: us-east-1
compute_environment_state: ENABLED
type: MANAGED
compute_resource_type: EC2
minv_cpus: 0
maxv_cpus: 2
desiredv_cpus: 1
instance_types:
- optimal
subnets:
- my-subnet1
- my-subnet2
security_group_ids:
- my-sg1
- my-sg2
instance_role: arn:aws:iam::<account>:instance-profile/<role>
tags:
tag1: value1
tag2: value2
service_role: arn:aws:iam::<account>:role/service-role/<role>
- name: show results
debug: var=aws_batch_compute_environment_action
'''
RETURN = '''
---
output:
description: "returns what action was taken, whether something was changed, invocation and response"
returned: always
sample:
batch_compute_environment_action: none
changed: false
invocation:
module_args:
aws_access_key: ~
aws_secret_key: ~
bid_percentage: ~
compute_environment_name: <name>
compute_environment_state: ENABLED
compute_resource_type: EC2
desiredv_cpus: 0
ec2_key_pair: ~
ec2_url: ~
image_id: ~
instance_role: "arn:aws:iam::..."
instance_types:
- optimal
maxv_cpus: 8
minv_cpus: 0
profile: ~
region: us-east-1
security_group_ids:
- "*******"
security_token: ~
service_role: "arn:aws:iam::...."
spot_iam_fleet_role: ~
state: present
subnets:
- "******"
tags:
Environment: <name>
Name: <name>
type: MANAGED
validate_certs: true
response:
computeEnvironmentArn: "arn:aws:batch:...."
computeEnvironmentName: <name>
computeResources:
desiredvCpus: 0
instanceRole: "arn:aws:iam::..."
instanceTypes:
- optimal
maxvCpus: 8
minvCpus: 0
securityGroupIds:
- "******"
subnets:
- "*******"
tags:
Environment: <name>
Name: <name>
type: EC2
ecsClusterArn: "arn:aws:ecs:....."
serviceRole: "arn:aws:iam::..."
state: ENABLED
status: VALID
statusReason: "ComputeEnvironment Healthy"
type: MANAGED
type: dict
'''
from ansible.module_utils._text import to_native
from ansible.module_utils.aws.batch import AWSConnection
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, HAS_BOTO3
from ansible.module_utils.ec2 import snake_dict_to_camel_dict, camel_dict_to_snake_dict
import re
import traceback
try:
from botocore.exceptions import ClientError, ParamValidationError, MissingParametersError
except ImportError:
pass # Handled by HAS_BOTO3
# ---------------------------------------------------------------------------------------------------
#
# Helper Functions & classes
#
# ---------------------------------------------------------------------------------------------------
def set_api_params(module, module_params):
"""
Sets module parameters to those expected by the boto3 API.
:param module:
:param module_params:
:return:
"""
api_params = dict((k, v) for k, v in dict(module.params).items() if k in module_params and v is not None)
return snake_dict_to_camel_dict(api_params)
def validate_params(module, aws):
"""
Performs basic parameter validation.
:param module:
:param aws:
:return:
"""
compute_environment_name = module.params['compute_environment_name']
# validate compute environment name
if not re.search(r'^[\w\_:]+$', compute_environment_name):
module.fail_json(
msg="Function compute_environment_name {0} is invalid. Names must contain only alphanumeric characters "
"and underscores.".format(compute_environment_name)
)
if not compute_environment_name.startswith('arn:aws:batch:'):
if len(compute_environment_name) > 128:
module.fail_json(msg='compute_environment_name "{0}" exceeds 128 character limit'
.format(compute_environment_name))
return
# ---------------------------------------------------------------------------------------------------
#
# Batch Compute Environment functions
#
# ---------------------------------------------------------------------------------------------------
def get_current_compute_environment(module, connection):
try:
environments = connection.client().describe_compute_environments(
computeEnvironments=[module.params['compute_environment_name']]
)
if len(environments['computeEnvironments']) > 0:
return environments['computeEnvironments'][0]
else:
return None
except ClientError:
return None
def create_compute_environment(module, aws):
"""
Adds a Batch compute environment
:param module:
:param aws:
:return:
"""
client = aws.client('batch')
changed = False
# set API parameters
params = (
'compute_environment_name', 'type', 'service_role')
api_params = set_api_params(module, params)
if module.params['compute_environment_state'] is not None:
api_params['state'] = module.params['compute_environment_state']
compute_resources_param_list = ('minv_cpus', 'maxv_cpus', 'desiredv_cpus', 'instance_types', 'image_id', 'subnets',
'security_group_ids', 'ec2_key_pair', 'instance_role', 'tags', 'bid_percentage',
'spot_iam_fleet_role')
compute_resources_params = set_api_params(module, compute_resources_param_list)
if module.params['compute_resource_type'] is not None:
compute_resources_params['type'] = module.params['compute_resource_type']
# if module.params['minv_cpus'] is not None:
# compute_resources_params['minvCpus'] = module.params['minv_cpus']
api_params['computeResources'] = compute_resources_params
try:
if not module.check_mode:
client.create_compute_environment(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error creating compute environment: {0}'.format(to_native(e)),
exception=traceback.format_exc())
return changed
def remove_compute_environment(module, aws):
"""
Remove a Batch compute environment
:param module:
:param aws:
:return:
"""
client = aws.client('batch')
changed = False
# set API parameters
api_params = {'computeEnvironment': module.params['compute_environment_name']}
try:
if not module.check_mode:
client.delete_compute_environment(**api_params)
changed = True
except (ClientError, ParamValidationError, MissingParametersError) as e:
module.fail_json(msg='Error removing compute environment: {0}'.format(to_native(e)),
exception=traceback.format_exc())
return changed
def manage_state(module, aws):
changed = False
current_state = 'absent'
state = module.params['state']
compute_environment_state = module.params['compute_environment_state']
compute_environment_name = module.params['compute_environment_name']
service_role = module.params['service_role']
minv_cpus = module.params['minv_cpus']
maxv_cpus = module.params['maxv_cpus']
desiredv_cpus = module.params['desiredv_cpus']
action_taken = 'none'
update_env_response = ''
check_mode = module.check_mode
# check if the compute environment exists
current_compute_environment = get_current_compute_environment(module, aws)
response = current_compute_environment
if current_compute_environment:
current_state = 'present'
if state == 'present':
if current_state == 'present':
updates = False
# Update Batch Compute Environment configuration
compute_kwargs = {'computeEnvironment': compute_environment_name}
# Update configuration if needed
compute_resources = {}
if compute_environment_state and current_compute_environment['state'] != compute_environment_state:
compute_kwargs.update({'state': compute_environment_state})
updates = True
if service_role and current_compute_environment['serviceRole'] != service_role:
compute_kwargs.update({'serviceRole': service_role})
updates = True
if minv_cpus is not None and current_compute_environment['computeResources']['minvCpus'] != minv_cpus:
compute_resources['minvCpus'] = minv_cpus
if maxv_cpus is not None and current_compute_environment['computeResources']['maxvCpus'] != maxv_cpus:
compute_resources['maxvCpus'] = maxv_cpus
if desiredv_cpus is not None and current_compute_environment['computeResources']['desiredvCpus'] != desiredv_cpus:
compute_resources['desiredvCpus'] = desiredv_cpus
if len(compute_resources) > 0:
compute_kwargs['computeResources'] = compute_resources
updates = True
if updates:
try:
if not check_mode:
update_env_response = aws.client().update_compute_environment(**compute_kwargs)
if not update_env_response:
module.fail_json(msg='Unable to get compute environment information after creating')
changed = True
action_taken = "updated"
except (ParamValidationError, ClientError) as e:
module.fail_json(msg="Unable to update environment: {0}".format(to_native(e)),
exception=traceback.format_exc())
else:
# Create Batch Compute Environment
changed = create_compute_environment(module, aws)
# Describe compute environment
action_taken = 'added'
response = get_current_compute_environment(module, aws)
if not response:
module.fail_json(msg='Unable to get compute environment information after creating')
else:
if current_state == 'present':
# remove the compute environment
changed = remove_compute_environment(module, aws)
action_taken = 'deleted'
return dict(changed=changed, batch_compute_environment_action=action_taken, response=response)
# ---------------------------------------------------------------------------------------------------
#
# MAIN
#
# ---------------------------------------------------------------------------------------------------
def main():
"""
Main entry point.
:return dict: changed, batch_compute_environment_action, response
"""
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent']),
compute_environment_name=dict(required=True),
type=dict(required=True, choices=['MANAGED', 'UNMANAGED']),
compute_environment_state=dict(required=False, default='ENABLED', choices=['ENABLED', 'DISABLED']),
service_role=dict(required=True),
compute_resource_type=dict(required=True, choices=['EC2', 'SPOT']),
minv_cpus=dict(type='int', required=True),
maxv_cpus=dict(type='int', required=True),
desiredv_cpus=dict(type='int'),
instance_types=dict(type='list', required=True),
image_id=dict(),
subnets=dict(type='list', required=True),
security_group_ids=dict(type='list', required=True),
ec2_key_pair=dict(),
instance_role=dict(required=True),
tags=dict(type='dict'),
bid_percentage=dict(type='int'),
spot_iam_fleet_role=dict(),
region=dict(aliases=['aws_region', 'ec2_region'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
# validate dependencies
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required for this module.')
aws = AWSConnection(module, ['batch'])
validate_params(module, aws)
results = manage_state(module, aws)
module.exit_json(**camel_dict_to_snake_dict(results, ignore_list=['Tags']))
if __name__ == '__main__':
main()
| gpl-3.0 |
vsajip/django | tests/regressiontests/localflavor/generic/tests.py | 57 | 4361 | from __future__ import unicode_literals
import datetime
from django.contrib.localflavor.generic.forms import DateField, DateTimeField
from django.test import SimpleTestCase
class GenericLocalFlavorTests(SimpleTestCase):
def test_GenericDateField(self):
error_invalid = ['Enter a valid date.']
valid = {
datetime.date(2006, 10, 25): datetime.date(2006, 10, 25),
datetime.datetime(2006, 10, 25, 14, 30): datetime.date(2006, 10, 25),
datetime.datetime(2006, 10, 25, 14, 30, 59): datetime.date(2006, 10, 25),
datetime.datetime(2006, 10, 25, 14, 30, 59, 200): datetime.date(2006, 10, 25),
'2006-10-25': datetime.date(2006, 10, 25),
'25/10/2006': datetime.date(2006, 10, 25),
'25/10/06': datetime.date(2006, 10, 25),
'Oct 25 2006': datetime.date(2006, 10, 25),
'October 25 2006': datetime.date(2006, 10, 25),
'October 25, 2006': datetime.date(2006, 10, 25),
'25 October 2006': datetime.date(2006, 10, 25),
'25 October, 2006': datetime.date(2006, 10, 25),
}
invalid = {
'2006-4-31': error_invalid,
'200a-10-25': error_invalid,
'10/25/06': error_invalid,
}
self.assertFieldOutput(DateField, valid, invalid, empty_value=None)
# DateField with optional input_formats parameter
valid = {
datetime.date(2006, 10, 25): datetime.date(2006, 10, 25),
datetime.datetime(2006, 10, 25, 14, 30): datetime.date(2006, 10, 25),
'2006 10 25': datetime.date(2006, 10, 25),
}
invalid = {
'2006-10-25': error_invalid,
'25/10/2006': error_invalid,
'25/10/06': error_invalid,
}
kwargs = {'input_formats':['%Y %m %d'],}
self.assertFieldOutput(DateField,
valid, invalid, field_kwargs=kwargs, empty_value=None
)
def test_GenericDateTimeField(self):
error_invalid = ['Enter a valid date/time.']
valid = {
datetime.date(2006, 10, 25): datetime.datetime(2006, 10, 25, 0, 0),
datetime.datetime(2006, 10, 25, 14, 30): datetime.datetime(2006, 10, 25, 14, 30),
datetime.datetime(2006, 10, 25, 14, 30, 59): datetime.datetime(2006, 10, 25, 14, 30, 59),
datetime.datetime(2006, 10, 25, 14, 30, 59, 200): datetime.datetime(2006, 10, 25, 14, 30, 59, 200),
'2006-10-25 14:30:45': datetime.datetime(2006, 10, 25, 14, 30, 45),
'2006-10-25 14:30:00': datetime.datetime(2006, 10, 25, 14, 30),
'2006-10-25 14:30': datetime.datetime(2006, 10, 25, 14, 30),
'2006-10-25': datetime.datetime(2006, 10, 25, 0, 0),
'25/10/2006 14:30:45': datetime.datetime(2006, 10, 25, 14, 30, 45),
'25/10/2006 14:30:00': datetime.datetime(2006, 10, 25, 14, 30),
'25/10/2006 14:30': datetime.datetime(2006, 10, 25, 14, 30),
'25/10/2006': datetime.datetime(2006, 10, 25, 0, 0),
'25/10/06 14:30:45': datetime.datetime(2006, 10, 25, 14, 30, 45),
'25/10/06 14:30:00': datetime.datetime(2006, 10, 25, 14, 30),
'25/10/06 14:30': datetime.datetime(2006, 10, 25, 14, 30),
'25/10/06': datetime.datetime(2006, 10, 25, 0, 0),
}
invalid = {
'hello': error_invalid,
'2006-10-25 4:30 p.m.': error_invalid,
}
self.assertFieldOutput(DateTimeField, valid, invalid, empty_value=None)
# DateTimeField with optional input_formats paramter
valid = {
datetime.date(2006, 10, 25): datetime.datetime(2006, 10, 25, 0, 0),
datetime.datetime(2006, 10, 25, 14, 30): datetime.datetime(2006, 10, 25, 14, 30),
datetime.datetime(2006, 10, 25, 14, 30, 59): datetime.datetime(2006, 10, 25, 14, 30, 59),
datetime.datetime(2006, 10, 25, 14, 30, 59, 200): datetime.datetime(2006, 10, 25, 14, 30, 59, 200),
'2006 10 25 2:30 PM': datetime.datetime(2006, 10, 25, 14, 30),
}
invalid = {
'2006-10-25 14:30:45': error_invalid,
}
kwargs = {'input_formats':['%Y %m %d %I:%M %p'],}
self.assertFieldOutput(DateTimeField,
valid, invalid, field_kwargs=kwargs, empty_value=None
)
| bsd-3-clause |
Andreasdahlberg/sillycat | scripts/check_style.py | 1 | 1234 | #!/usr/bin/env python3
# -*- coding: utf-8 -*
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
def execute_format_command(cmd='scons -C firmware format'):
"""Execute the format command and return the result."""
output = subprocess.check_output(cmd, shell=True)
return output.decode('utf-8')
def check_command_output(output):
"""Check if the output contains 'Formatted'."""
if 'Formatted' in output:
print(output)
return 1
return 0
def main():
output = execute_format_command()
return check_command_output(output)
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
ukgovdatascience/classifyintentsapp | app/auth/forms.py | 1 | 5291 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired, Length, Email, Regexp, EqualTo
from wtforms.widgets import PasswordInput
from wtforms import ValidationError
import safe
from ..models import User
class PasswordWidget(PasswordInput):
'''
Custom password field widget, but with autocomplete="off" by default.
'''
def __call__(self, field, **kwargs):
if "autocomplete" not in kwargs:
kwargs['autocomplete'] = 'off'
return super(PasswordWidget, self).__call__(field, **kwargs)
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
password = PasswordField('Password', validators=[DataRequired()],
widget=PasswordWidget())
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
class PasswordStrength(object):
'''
Custom password strength validator using safe.
'''
def __init__(self, message=None):
if not message:
message = ('Your password is too easy to guess. Please try again '
'with a harder to guess password.')
self.message = message
def __call__(self, form, field):
password = field.data
strength = safe.check(password)
if strength.strength not in ['medium', 'strong']:
raise ValidationError(self.message)
class RegistrationForm(FlaskForm):
email = StringField(
'Email address',
validators=[
DataRequired(),
Length(1, 64),
Email(),
Regexp(
regex='.*\@digital\.cabinet\-office\.gov\.uk',
message='Must be a valid @digital.cabinet-office.gov.uk address')
]
)
username = StringField('Username', validators=[
DataRequired(),
Length(1, 64),
Regexp(
'^[A-Za-z][A-Za-z0-9_.]*$',
0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
password = PasswordField('Password', validators=[
DataRequired(),
EqualTo('password2', message='Passwords must match.'),
Length(
min=8,
message='Password must be at least 8 characters in length.'),
PasswordStrength()],
widget=PasswordWidget())
password2 = PasswordField('Confirm password', validators=[DataRequired()],
widget=PasswordWidget())
submit = SubmitField('Register')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
class ChangePasswordForm(FlaskForm):
old_password = PasswordField('Old password', validators=[DataRequired()],
widget=PasswordWidget())
password = PasswordField('New password',
widget=PasswordWidget(),
validators=[
DataRequired(),
EqualTo('password2', message='Passwords must match.'),
Length(
min=8, message='Password must be at least 8 characters in length.'),
PasswordStrength()
])
password2 = PasswordField(
'Confirm new password', validators=[DataRequired()],
widget=PasswordWidget())
submit = SubmitField('Update Password')
class PasswordResetRequestForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Length(1, 64),
Email()])
submit = SubmitField('Reset Password')
class PasswordResetForm(FlaskForm):
email = StringField('Email', validators=[
DataRequired(), Length(1, 64), Email()])
password = PasswordField('New Password', widget=PasswordWidget(),
validators=[
DataRequired(), EqualTo('password2', message='Passwords must match'),
PasswordStrength()])
password2 = PasswordField('Confirm password', validators=[DataRequired()],
widget=PasswordWidget())
submit = SubmitField('Reset Password')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first() is None:
raise ValidationError('Unknown email address.')
class ChangeEmailForm(FlaskForm):
email = StringField(
'Email address',
validators=[
DataRequired(),
Length(1, 64),
Email(),
Regexp(regex=r'.*\@digital\.cabinet\-office\.gov\.uk',
message='Must be a valid @digital.cabinet-office.gov.uk address')
])
password = PasswordField('Password', validators=[DataRequired()],
widget=PasswordWidget())
submit = SubmitField('Update Email Address')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
| mit |
mrnamingo/vix4-34-enigma2-bcm | lib/python/Plugins/SystemPlugins/CommonInterfaceAssignment/plugin.py | 20 | 26382 | from xml.etree.cElementTree import parse as ci_parse
from boxbranding import getMachineBrand, getMachineName
from os import path as os_path
from enigma import eDVBCI_UI, eDVBCIInterfaces
from Screens.ChannelSelection import *
from Components.ActionMap import ActionMap
from Components.Sources.StaticText import StaticText
from Components.config import ConfigNothing
from Components.ConfigList import ConfigList
from Components.SelectionList import SelectionList
from ServiceReference import ServiceReference
from Plugins.Plugin import PluginDescriptor
class CIselectMainMenu(Screen):
skin = """
<screen name="CIselectMainMenu" position="center,center" size="500,250" title="CI assignment" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="CiList" position="5,50" size="490,200" scrollbarMode="showOnDemand" />
</screen>"""
def __init__(self, session, args = 0):
Screen.__init__(self, session)
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("Edit"))
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"green": self.greenPressed,
"red": self.close,
"ok": self.greenPressed,
"cancel": self.close
}, -1)
NUM_CI=eDVBCIInterfaces.getInstance().getNumOfSlots()
print "[CI_Wizzard] FOUND %d CI Slots " % NUM_CI
self.state = { }
self.list = [ ]
if NUM_CI > 0:
for slot in range(NUM_CI):
state = eDVBCI_UI.getInstance().getState(slot)
if state != -1:
if state == 0:
appname = _("Slot %d") %(slot+1) + " - " + _("no module found")
elif state == 1:
appname = _("Slot %d") %(slot+1) + " - " + _("init modules")
elif state == 2:
appname = _("Slot %d") %(slot+1) + " - " + eDVBCI_UI.getInstance().getAppName(slot)
self.list.append( (appname, ConfigNothing(), 0, slot) )
else:
self.list.append( (_("Slot %d") %(slot+1) + " - " + _("no module found") , ConfigNothing(), 1, -1) )
else:
self.list.append( (_("no CI slots found") , ConfigNothing(), 1, -1) )
menuList = ConfigList(self.list)
menuList.list = self.list
menuList.l.setList(self.list)
self["CiList"] = menuList
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("CI assignment"))
def greenPressed(self):
cur = self["CiList"].getCurrent()
if cur and len(cur) > 2:
action = cur[2]
slot = cur[3]
if action == 1:
print "[CI_Wizzard] there is no CI Slot in your %s %s" % (getMachineBrand(), getMachineName())
else:
print "[CI_Wizzard] selected CI Slot : %d" % slot
if config.usage.setup_level.index > 1: # advanced
self.session.open(CIconfigMenu, slot)
else:
self.session.open(easyCIconfigMenu, slot)
# def yellowPressed(self): # unused
# NUM_CI=eDVBCIInterfaces.getInstance().getNumOfSlots()
# print "[CI_Check] FOUND %d CI Slots " % NUM_CI
# if NUM_CI > 0:
# for ci in range(NUM_CI):
# print eDVBCIInterfaces.getInstance().getDescrambleRules(ci)
class CIconfigMenu(Screen):
skin = """
<screen name="CIconfigMenu" position="center,center" size="560,440" title="CI assignment" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget source="CAidList_desc" render="Label" position="5,50" size="550,22" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<widget source="CAidList" render="Label" position="5,80" size="550,45" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<ePixmap pixmap="div-h.png" position="0,125" zPosition="1" size="560,2" />
<widget source="ServiceList_desc" render="Label" position="5,130" size="550,22" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<widget name="ServiceList" position="5,160" size="550,250" zPosition="1" scrollbarMode="showOnDemand" />
<widget source="ServiceList_info" render="Label" position="5,160" size="550,250" zPosition="2" font="Regular;20" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, ci_slot="9"):
Screen.__init__(self, session)
self.ci_slot=ci_slot
self.filename = eEnv.resolve("${sysconfdir}/enigma2/ci") + str(self.ci_slot) + ".xml"
self["key_red"] = StaticText(_("Delete"))
self["key_green"] = StaticText(_("Add service"))
self["key_yellow"] = StaticText(_("Add provider"))
self["key_blue"] = StaticText(_("Select CAId"))
self["CAidList_desc"] = StaticText(_("Assigned CAIds:"))
self["CAidList"] = StaticText()
self["ServiceList_desc"] = StaticText(_("Assigned services/provider:"))
self["ServiceList_info"] = StaticText()
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"green": self.greenPressed,
"red": self.redPressed,
"yellow": self.yellowPressed,
"blue": self.bluePressed,
"cancel": self.cancel
}, -1)
print "[CI_Wizzard_Config] Configuring CI Slots : %d " % self.ci_slot
i=0
self.caidlist=[]
print eDVBCIInterfaces.getInstance().readCICaIds(self.ci_slot)
for caid in eDVBCIInterfaces.getInstance().readCICaIds(self.ci_slot):
i+=1
self.caidlist.append((str(hex(int(caid))),str(caid),i))
# noinspection PyStringFormat
print "[CI_Wizzard_Config_CI%d] read following CAIds from CI: %s" %(self.ci_slot, self.caidlist)
self.selectedcaid = []
self.servicelist = []
self.caids = ""
serviceList = ConfigList(self.servicelist)
serviceList.list = self.servicelist
serviceList.l.setList(self.servicelist)
self["ServiceList"] = serviceList
self.loadXML()
# if config mode !=advanced autoselect any caid
if config.usage.setup_level.index <= 1: # advanced
self.selectedcaid=self.caidlist
self.finishedCAidSelection(self.selectedcaid)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("CI assignment"))
def redPressed(self):
self.delete()
def greenPressed(self):
self.session.openWithCallback( self.finishedChannelSelection, myChannelSelection, None)
def yellowPressed(self):
self.session.openWithCallback( self.finishedProviderSelection, myProviderSelection, None)
def bluePressed(self):
self.session.openWithCallback(self.finishedCAidSelection, CAidSelect, self.caidlist, self.selectedcaid)
def cancel(self):
self.saveXML()
activate_all(self)
self.close()
def setServiceListInfo(self):
if len(self.servicelist):
self["ServiceList_info"].setText("")
else:
self["ServiceList_info"].setText(_("No services/providers selected"))
def delete(self):
cur = self["ServiceList"].getCurrent()
if cur and len(cur) > 2:
self.servicelist.remove(cur)
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
def finishedChannelSelection(self, *args):
if len(args):
ref=args[0]
service_ref = ServiceReference(ref)
service_name = service_ref.getServiceName()
if not find_in_list(self.servicelist, service_name, 0):
split_ref=service_ref.ref.toString().split(":")
if split_ref[0] == "1":#== dvb service und nicht muell von None
self.servicelist.append( (service_name , ConfigNothing(), 0, service_ref.ref.toString()) )
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
def finishedProviderSelection(self, *args):
if len(args)>1: # bei nix selected kommt nur 1 arg zurueck (==None)
name=args[0]
dvbnamespace=args[1]
if not find_in_list(self.servicelist, name, 0):
self.servicelist.append( (name , ConfigNothing(), 1, dvbnamespace) )
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
def finishedCAidSelection(self, *args):
if len(args):
self.selectedcaid=args[0]
self.caids=""
if len(self.selectedcaid):
for item in self.selectedcaid:
if len(self.caids):
self.caids+= ", " + item[0]
else:
self.caids=item[0]
else:
self.selectedcaid=[]
self.caids=_("no CAId selected")
else:
self.selectedcaid=[]
self.caids=_("no CAId selected")
self["CAidList"].setText(self.caids)
def saveXML(self):
try:
fp = open(self.filename, 'w')
fp.write("<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n")
fp.write("<ci>\n")
fp.write("\t<slot>\n")
fp.write("\t\t<id>%s</id>\n" % self.ci_slot)
for item in self.selectedcaid:
if len(self.selectedcaid):
fp.write("\t\t<caid id=\"%s\" />\n" % item[0])
for item in self.servicelist:
if len(self.servicelist):
if item[2]==1:
fp.write("\t\t<provider name=\"%s\" dvbnamespace=\"%s\" />\n" % (item[0], item[3]))
else:
fp.write("\t\t<service name=\"%s\" ref=\"%s\" />\n" % (item[0], item[3]))
fp.write("\t</slot>\n")
fp.write("</ci>\n")
fp.close()
except:
print "[CI_Config_CI%d] xml not written" %self.ci_slot
os.unlink(self.filename)
def loadXML(self):
if not os_path.exists(self.filename):
return
def getValue(definitions, default):
ret = ""
Len = len(definitions)
return Len > 0 and definitions[Len-1].text or default
self.read_services=[]
self.read_providers=[]
self.usingcaid=[]
self.ci_config=[]
try:
fp = open(self.filename, 'r')
tree = ci_parse(fp).getroot()
fp.close()
for slot in tree.findall("slot"):
read_slot = getValue(slot.findall("id"), False).encode("UTF-8")
print "ci " + read_slot
i=0
for caid in slot.findall("caid"):
read_caid = caid.get("id").encode("UTF-8")
self.selectedcaid.append((str(read_caid),str(read_caid),i))
self.usingcaid.append(long(read_caid,16))
i+=1
for service in slot.findall("service"):
read_service_name = service.get("name").encode("UTF-8")
read_service_ref = service.get("ref").encode("UTF-8")
self.read_services.append (read_service_ref)
for provider in slot.findall("provider"):
read_provider_name = provider.get("name").encode("UTF-8")
read_provider_dvbname = provider.get("dvbnamespace").encode("UTF-8")
self.read_providers.append((read_provider_name,read_provider_dvbname))
self.ci_config.append((int(read_slot), (self.read_services, self.read_providers, self.usingcaid)))
except:
print "[CI_Config_CI%d] error parsing xml..." %self.ci_slot
for item in self.read_services:
if len(item):
self.finishedChannelSelection(item)
for item in self.read_providers:
if len(item):
self.finishedProviderSelection(item[0],item[1])
print self.ci_config
self.finishedCAidSelection(self.selectedcaid)
self["ServiceList"].l.setList(self.servicelist)
self.setServiceListInfo()
class easyCIconfigMenu(CIconfigMenu):
skin = """
<screen name="easyCIconfigMenu" position="center,center" size="560,440" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="280,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="ServiceList_desc" render="Label" position="5,50" size="550,22" font="Regular;20" backgroundColor="#25062748" transparent="1" />
<widget name="ServiceList" position="5,80" size="550,300" zPosition="1" scrollbarMode="showOnDemand" />
<widget source="ServiceList_info" render="Label" position="5,80" size="550,300" zPosition="2" font="Regular;20" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, ci_slot="9"):
Screen.setTitle(self, _("CI assignment"))
ci=ci_slot
CIconfigMenu.__init__(self, session, ci_slot)
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"green": self.greenPressed,
"red": self.redPressed,
"yellow": self.yellowPressed,
"cancel": self.cancel
})
class CAidSelect(Screen):
skin = """
<screen name="CAidSelect" position="center,center" size="450,440" title="select CAId's" >
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget name="list" position="5,50" size="440,330" scrollbarMode="showOnDemand" />
<ePixmap pixmap="div-h.png" position="0,390" zPosition="1" size="450,2" />
<widget source="introduction" render="Label" position="0,400" size="450,40" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, list, selected_caids):
Screen.__init__(self, session)
self.list = SelectionList()
self["list"] = self.list
for listindex in range(len(list)):
if find_in_list(selected_caids,list[listindex][0],0):
self.list.addSelection(list[listindex][0], list[listindex][1], listindex, True)
else:
self.list.addSelection(list[listindex][0], list[listindex][1], listindex, False)
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("Save"))
self["introduction"] = StaticText(_("Press OK to select/deselect a CAId."))
self["actions"] = ActionMap(["ColorActions","SetupActions"],
{
"ok": self.list.toggleSelection,
"cancel": self.cancel,
"green": self.greenPressed,
"red": self.cancel
}, -1)
self.onShown.append(self.setWindowTitle)
def setWindowTitle(self):
self.setTitle(_("select CAId's"))
def greenPressed(self):
list = self.list.getSelectionsList()
print list
self.close(list)
def cancel(self):
self.close()
class myProviderSelection(ChannelSelectionBase):
skin = """
<screen name="myProviderSelection" position="center,center" size="560,440" title="Select provider to add...">
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="list" position="5,50" size="550,330" scrollbarMode="showOnDemand" />
<ePixmap pixmap="div-h.png" position="0,390" zPosition="1" size="560,2" />
<widget source="introduction" render="Label" position="0,400" size="560,40" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, title):
ChannelSelectionBase.__init__(self, session)
self.onShown.append(self.__onExecCallback)
self.bouquet_mark_edit = EDIT_BOUQUET
self["actions"] = ActionMap(["OkCancelActions", "ChannelSelectBaseActions"],
{
"showFavourites": self.doNothing,
"showAllServices": self.cancel,
"showProviders": self.doNothing,
"showSatellites": self.doNothing,
"cancel": self.cancel,
"ok": self.channelSelected
})
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText()
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText()
self["introduction"] = StaticText(_("Press OK to select a provider."))
def doNothing(self):
pass
def __onExecCallback(self):
self.showSatellites()
self.setTitle(_("Select provider to add..."))
def channelSelected(self): # just return selected service
ref = self.getCurrentSelection()
splited_ref=ref.toString().split(":")
if ref.flags == 7 and splited_ref[6] != "0":
self.dvbnamespace=splited_ref[6]
self.enterPath(ref)
else:
self.close(ref.getName(), self.dvbnamespace)
def showSatellites(self):
if not self.pathChangeDisabled:
refstr = '%s FROM SATELLITES ORDER BY satellitePosition'% self.service_types
if not self.preEnterPath(refstr):
ref = eServiceReference(refstr)
justSet=False
prev = None
if self.isBasePathEqual(ref):
if self.isPrevPathEqual(ref):
justSet=True
prev = self.pathUp(justSet)
else:
currentRoot = self.getRoot()
if currentRoot is None or currentRoot != ref:
justSet=True
self.clearPath()
self.enterPath(ref, True)
if justSet:
serviceHandler = eServiceCenter.getInstance()
servicelist = serviceHandler.list(ref)
if not servicelist is None:
while True:
service = servicelist.getNext()
if not service.valid(): #check if end of list
break
unsigned_orbpos = service.getUnsignedData(4) >> 16
orbpos = service.getData(4) >> 16
if orbpos < 0:
orbpos += 3600
if service.getPath().find("FROM PROVIDER") != -1:
service_type = _("Providers")
try:
# why we need this cast?
service_name = str(nimmanager.getSatDescription(orbpos))
except:
if unsigned_orbpos == 0xFFFF: #Cable
service_name = _("Cable")
elif unsigned_orbpos == 0xEEEE: #Terrestrial
service_name = _("Terrestrial")
else:
if orbpos > 1800: # west
orbpos = 3600 - orbpos
h = _("W")
else:
h = _("E")
service_name = ("%d.%d" + h) % (orbpos / 10, orbpos % 10)
service.setName("%s - %s" % (service_name, service_type))
self.servicelist.addService(service)
self.servicelist.finishFill()
if prev is not None:
self.setCurrentSelection(prev)
def cancel(self):
self.close(None)
class myChannelSelection(ChannelSelectionBase):
skin = """
<screen name="myChannelSelection" position="center,center" size="560,440" title="Select service to add...">
<ePixmap pixmap="buttons/red.png" position="0,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="140,0" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="420,0" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="0,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="140,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="280,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="420,0" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
<widget name="list" position="5,50" size="550,330" scrollbarMode="showOnDemand" />
<ePixmap pixmap="div-h.png" position="0,390" zPosition="1" size="560,2" />
<widget source="introduction" render="Label" position="0,400" size="560,40" zPosition="10" font="Regular;21" halign="center" valign="center" backgroundColor="#25062748" transparent="1" />
</screen>"""
def __init__(self, session, title):
ChannelSelectionBase.__init__(self, session)
self.onShown.append(self.__onExecCallback)
self.bouquet_mark_edit = OFF
self["actions"] = ActionMap(["OkCancelActions", "TvRadioActions", "ChannelSelectBaseActions"],
{
"showProviders": self.doNothing,
"showSatellites": self.showAllServices,
"showAllServices": self.cancel,
"cancel": self.cancel,
"ok": self.channelSelected,
"keyRadio": self.setModeRadio,
"keyTV": self.setModeTv
})
self["key_red"] = StaticText(_("Close"))
self["key_green"] = StaticText(_("All"))
self["key_yellow"] = StaticText()
self["key_blue"] = StaticText(_("Favourites"))
self["introduction"] = StaticText(_("Press OK to select a provider."))
def __onExecCallback(self):
self.setModeTv()
self.setTitle(_("Select service to add..."))
def doNothing(self):
pass
def channelSelected(self): # just return selected service
ref = self.getCurrentSelection()
if (ref.flags & 7) == 7:
self.enterPath(ref)
elif not (ref.flags & eServiceReference.isMarker):
ref = self.getCurrentSelection()
self.close(ref)
def setModeTv(self):
self.setTvMode()
self.showFavourites()
def setModeRadio(self):
self.setRadioMode()
self.showFavourites()
def cancel(self):
self.close(None)
def activate_all(session):
NUM_CI=eDVBCIInterfaces.getInstance().getNumOfSlots()
print "[CI_Activate] FOUND %d CI Slots " % NUM_CI
if NUM_CI > 0:
ci_config=[]
def getValue(definitions, default):
# Initialize Output
ret = ""
# How many definitions are present
Len = len(definitions)
return Len > 0 and definitions[Len-1].text or default
for ci in range(NUM_CI):
filename = eEnv.resolve("${sysconfdir}/enigma2/ci") + str(ci) + ".xml"
if not os_path.exists(filename):
print "[CI_Activate_Config_CI%d] no config file found" %ci
try:
if not os_path.exists(self.filename):
return
fp = open(filename, 'r')
tree = ci_parse(fp).getroot()
fp.close()
read_services=[]
read_providers=[]
usingcaid=[]
for slot in tree.findall("slot"):
read_slot = getValue(slot.findall("id"), False).encode("UTF-8")
for caid in slot.findall("caid"):
read_caid = caid.get("id").encode("UTF-8")
usingcaid.append(long(read_caid,16))
for service in slot.findall("service"):
read_service_ref = service.get("ref").encode("UTF-8")
read_services.append (read_service_ref)
for provider in slot.findall("provider"):
read_provider_name = provider.get("name").encode("UTF-8")
read_provider_dvbname = provider.get("dvbnamespace").encode("UTF-8")
read_providers.append((read_provider_name,long(read_provider_dvbname,16)))
ci_config.append((int(read_slot), (read_services, read_providers, usingcaid)))
except:
print "[CI_Activate_Config_CI%d] error parsing xml..." %ci
for item in ci_config:
print "[CI_Activate] activate CI%d with following settings:" %item[0]
print item[0]
print item[1]
try:
eDVBCIInterfaces.getInstance().setDescrambleRules(item[0],item[1])
except:
print "[CI_Activate_Config_CI%d] error setting DescrambleRules..." %item[0]
def find_in_list(list, search, listpos=0):
for item in list:
if item[listpos]==search:
return True
return False
global_session = None
def sessionstart(reason, session):
global global_session
global_session = session
def autostart(reason, **kwargs):
global global_session
if reason == 0:
print "[CI_Assignment] activating ci configs:"
activate_all(global_session)
elif reason == 1:
global_session = None
def main(session, **kwargs):
session.open(CIselectMainMenu)
def menu(menuid, **kwargs):
if menuid == "cam" and eDVBCIInterfaces.getInstance().getNumOfSlots():
return [(_("Common Interface Assignment"), main, "ci_assign", 11)]
return [ ]
def Plugins(**kwargs):
if config.usage.setup_level.index > 1:
return [PluginDescriptor( where = PluginDescriptor.WHERE_SESSIONSTART, needsRestart = False, fnc = sessionstart ),
PluginDescriptor( where = PluginDescriptor.WHERE_AUTOSTART, needsRestart = False, fnc = autostart ),
PluginDescriptor( name = _("Common Interface assignment"), description = _("a gui to assign services/providers/caids to common interface modules"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc = menu )]
else:
return [PluginDescriptor( where = PluginDescriptor.WHERE_SESSIONSTART, needsRestart = False, fnc = sessionstart ),
PluginDescriptor( where = PluginDescriptor.WHERE_AUTOSTART, needsRestart = False, fnc = autostart ),
PluginDescriptor( name = _("Common Interface assignment"), description = _("a gui to assign services/providers to common interface modules"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc = menu )]
| gpl-2.0 |
YanyangChen/OpenNI2 | Wrappers/java/OpenNI.jni/CreateMethods.py | 32 | 2398 | #/****************************************************************************
#* *
#* OpenNI 1.x Alpha *
#* Copyright (C) 2012 PrimeSense Ltd. *
#* *
#* This file is part of OpenNI. *
#* *
#* Licensed under the Apache License, Version 2.0 (the "License"); *
#* you may not use this file except in compliance with the License. *
#* You may obtain a copy of the License at *
#* *
#* http://www.apache.org/licenses/LICENSE-2.0 *
#* *
#* Unless required by applicable law or agreed to in writing, software *
#* distributed under the License is distributed on an "AS IS" BASIS, *
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
#* See the License for the specific language governing permissions and *
#* limitations under the License. *
#* *
#****************************************************************************/
import os
import re
# ----------------------- MAIN -------------------------
java_header = open("org_openni_NativeMethods.h")
cont = java_header.read()
java_header.close()
result = open("methods.inl", "w")
result.write("static JNINativeMethod methods[] = {\n")
while True:
match = re.search("Method:\s*(\w*)", cont)
if match is None:
break
method_name = match.group(1)
match = re.search("Signature:\s*([\w\(\)\[;/]*)", cont)
if match is None:
break
signature = match.group(1)
match = re.search("JNIEXPORT.*JNICALL (\w*)", cont)
if match is None:
break
method = match.group(1)
result.write('\t{ "' + method_name + '", "' + signature + '", (void*)&' + method + ' },\n')
cont = cont[match.end():];
result.write('};\n');
result.close()
| apache-2.0 |
chitr/neutron | neutron/db/quota/models.py | 22 | 2526 | # Copyright (c) 2015 OpenStack Foundation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy import sql
from neutron.db import model_base
from neutron.db import models_v2
class ResourceDelta(model_base.BASEV2):
resource = sa.Column(sa.String(255), primary_key=True)
reservation_id = sa.Column(sa.String(36),
sa.ForeignKey('reservations.id',
ondelete='CASCADE'),
primary_key=True,
nullable=False)
# Requested amount of resource
amount = sa.Column(sa.Integer)
class Reservation(model_base.BASEV2, models_v2.HasId):
tenant_id = sa.Column(sa.String(255))
expiration = sa.Column(sa.DateTime())
resource_deltas = orm.relationship(ResourceDelta,
backref='reservation',
lazy="joined",
cascade='all, delete-orphan')
class Quota(model_base.BASEV2, models_v2.HasId, models_v2.HasTenant):
"""Represent a single quota override for a tenant.
If there is no row for a given tenant id and resource, then the
default for the deployment is used.
"""
resource = sa.Column(sa.String(255))
limit = sa.Column(sa.Integer)
class QuotaUsage(model_base.BASEV2):
"""Represents the current usage for a given resource."""
resource = sa.Column(sa.String(255), nullable=False,
primary_key=True, index=True)
tenant_id = sa.Column(sa.String(255), nullable=False,
primary_key=True, index=True)
dirty = sa.Column(sa.Boolean, nullable=False, server_default=sql.false())
in_use = sa.Column(sa.Integer, nullable=False,
server_default="0")
reserved = sa.Column(sa.Integer, nullable=False,
server_default="0")
| apache-2.0 |
richardcs/ansible | lib/ansible/modules/cloud/amazon/aws_ssm_parameter_store.py | 6 | 7683 | #!/usr/bin/python
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: aws_ssm_parameter_store
short_description: Manage key-value pairs in aws parameter store.
description:
- Manage key-value pairs in aws parameter store.
version_added: "2.5"
options:
name:
description:
- parameter key name.
required: true
description:
description:
- parameter key description.
required: false
value:
description:
- Parameter value.
required: false
state:
description:
- Creates or modifies an existing parameter
- Deletes a parameter
required: false
choices: ['present', 'absent']
default: present
string_type:
description:
- Parameter String type
required: false
choices: ['String', 'StringList', 'SecureString']
default: String
decryption:
description:
- Work with SecureString type to get plain text secrets
type: bool
required: false
default: True
key_id:
description:
- aws KMS key to decrypt the secrets.
required: false
default: aws/ssm (this key is automatically generated at the first parameter created).
overwrite_value:
description:
- Option to overwrite an existing value if it already exists.
- String
required: false
version_added: "2.6"
choices: ['never', 'changed', 'always']
default: changed
region:
description:
- region.
required: false
author:
- Nathan Webster (@nathanwebsterdotme)
- Bill Wang (@ozbillwang) <[email protected]>
- Michael De La Rue (@mikedlr)
extends_documentation_fragment: aws
requirements: [ botocore, boto3 ]
'''
EXAMPLES = '''
- name: Create or update key/value pair in aws parameter store
aws_ssm_parameter_store:
name: "Hello"
description: "This is your first key"
value: "World"
- name: Delete the key
aws_ssm_parameter_store:
name: "Hello"
state: absent
- name: Create or update secure key/value pair with default kms key (aws/ssm)
aws_ssm_parameter_store:
name: "Hello"
description: "This is your first key"
string_type: "SecureString"
value: "World"
- name: Create or update secure key/value pair with nominated kms key
aws_ssm_parameter_store:
name: "Hello"
description: "This is your first key"
string_type: "SecureString"
key_id: "alias/demo"
value: "World"
- name: Always update a parameter store value and create a new version
aws_ssm_parameter_store:
name: "overwrite_example"
description: "This example will always overwrite the value"
string_type: "String"
value: "Test1234"
overwrite_value: "always"
- name: recommend to use with aws_ssm lookup plugin
debug: msg="{{ lookup('aws_ssm', 'hello') }}"
'''
RETURN = '''
put_parameter:
description: Add one or more parameters to the system.
returned: success
type: dictionary
delete_parameter:
description: Delete a parameter from the system.
returned: success
type: dictionary
'''
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils.ec2 import boto3_conn, get_aws_connection_info
try:
from botocore.exceptions import ClientError
except ImportError:
pass # will be captured by imported HAS_BOTO3
def update_parameter(client, module, args):
changed = False
response = {}
try:
response = client.put_parameter(**args)
changed = True
except ClientError as e:
module.fail_json_aws(e, msg="setting parameter")
return changed, response
def create_update_parameter(client, module):
changed = False
existing_parameter = None
response = {}
args = dict(
Name=module.params.get('name'),
Value=module.params.get('value'),
Type=module.params.get('string_type')
)
if (module.params.get('overwrite_value') in ("always", "changed")):
args.update(Overwrite=True)
else:
args.update(Overwrite=False)
if module.params.get('description'):
args.update(Description=module.params.get('description'))
if module.params.get('string_type') == 'SecureString':
args.update(KeyId=module.params.get('key_id'))
try:
existing_parameter = client.get_parameter(Name=args['Name'], WithDecryption=True)
except:
pass
if existing_parameter:
if (module.params.get('overwrite_value') == 'always'):
(changed, response) = update_parameter(client, module, args)
elif (module.params.get('overwrite_value') == 'changed'):
if existing_parameter['Parameter']['Type'] != args['Type']:
(changed, response) = update_parameter(client, module, args)
if existing_parameter['Parameter']['Value'] != args['Value']:
(changed, response) = update_parameter(client, module, args)
if args.get('Description'):
# Description field not available from get_parameter function so get it from describe_parameters
describe_existing_parameter = None
try:
describe_existing_parameter_paginator = client.get_paginator('describe_parameters')
describe_existing_parameter = describe_existing_parameter_paginator.paginate(
Filters=[{"Key": "Name", "Values": [args['Name']]}]).build_full_result()
except ClientError as e:
module.fail_json_aws(e, msg="getting description value")
if describe_existing_parameter['Parameters'][0]['Description'] != args['Description']:
(changed, response) = update_parameter(client, module, args)
else:
(changed, response) = update_parameter(client, module, args)
return changed, response
def delete_parameter(client, module):
response = {}
try:
response = client.delete_parameter(
Name=module.params.get('name')
)
except ClientError as e:
if e.response['Error']['Code'] == 'ParameterNotFound':
return False, {}
module.fail_json_aws(e, msg="deleting parameter")
return True, response
def setup_client(module):
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
connection = boto3_conn(module, conn_type='client', resource='ssm', region=region, endpoint=ec2_url, **aws_connect_params)
return connection
def setup_module_object():
argument_spec = dict(
name=dict(required=True),
description=dict(),
value=dict(required=False, no_log=True),
state=dict(default='present', choices=['present', 'absent']),
string_type=dict(default='String', choices=['String', 'StringList', 'SecureString']),
decryption=dict(default=True, type='bool'),
key_id=dict(default="alias/aws/ssm"),
overwrite_value=dict(default='changed', choices=['never', 'changed', 'always']),
region=dict(required=False),
)
return AnsibleAWSModule(
argument_spec=argument_spec,
)
def main():
module = setup_module_object()
state = module.params.get('state')
client = setup_client(module)
invocations = {
"present": create_update_parameter,
"absent": delete_parameter,
}
(changed, response) = invocations[state](client, module)
module.exit_json(changed=changed, response=response)
if __name__ == '__main__':
main()
| gpl-3.0 |
bswartz/cinder | cinder/volume/drivers/solidfire.py | 1 | 86895 | # All Rights Reserved.
# Copyright 2013 SolidFire Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import math
import random
import re
import socket
import string
import time
import warnings
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import timeutils
from oslo_utils import units
import requests
from requests.packages.urllib3 import exceptions
import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.image import image_utils
from cinder import interface
from cinder.objects import fields
from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume.targets import iscsi as iscsi_driver
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
sf_opts = [
cfg.BoolOpt('sf_emulate_512',
default=True,
help='Set 512 byte emulation on volume creation; '),
cfg.BoolOpt('sf_allow_tenant_qos',
default=False,
help='Allow tenants to specify QOS on create'),
cfg.StrOpt('sf_account_prefix',
help='Create SolidFire accounts with this prefix. Any string '
'can be used here, but the string \"hostname\" is special '
'and will create a prefix using the cinder node hostname '
'(previous default behavior). The default is NO prefix.'),
cfg.StrOpt('sf_volume_prefix',
default='UUID-',
help='Create SolidFire volumes with this prefix. Volume names '
'are of the form <sf_volume_prefix><cinder-volume-id>. '
'The default is to use a prefix of \'UUID-\'.'),
cfg.StrOpt('sf_template_account_name',
default='openstack-vtemplate',
help='Account name on the SolidFire Cluster to use as owner of '
'template/cache volumes (created if does not exist).'),
cfg.BoolOpt('sf_allow_template_caching',
default=True,
help='Create an internal cache of copy of images when '
'a bootable volume is created to eliminate fetch from '
'glance and qemu-conversion on subsequent calls.'),
cfg.StrOpt('sf_svip',
help='Overrides default cluster SVIP with the one specified. '
'This is required or deployments that have implemented '
'the use of VLANs for iSCSI networks in their cloud.'),
cfg.BoolOpt('sf_enable_volume_mapping',
default=True,
help='Create an internal mapping of volume IDs and account. '
'Optimizes lookups and performance at the expense of '
'memory, very large deployments may want to consider '
'setting to False.'),
cfg.PortOpt('sf_api_port',
default=443,
help='SolidFire API port. Useful if the device api is behind '
'a proxy on a different port.'),
cfg.BoolOpt('sf_enable_vag',
default=False,
help='Utilize volume access groups on a per-tenant basis.')]
CONF = cfg.CONF
CONF.register_opts(sf_opts)
# SolidFire API Error Constants
xExceededLimit = 'xExceededLimit'
xAlreadyInVolumeAccessGroup = 'xAlreadyInVolumeAccessGroup'
xVolumeAccessGroupIDDoesNotExist = 'xVolumeAccessGroupIDDoesNotExist'
xNotInVolumeAccessGroup = 'xNotInVolumeAccessGroup'
def retry(exc_tuple, tries=5, delay=1, backoff=2):
def retry_dec(f):
@six.wraps(f)
def func_retry(*args, **kwargs):
_tries, _delay = tries, delay
while _tries > 1:
try:
return f(*args, **kwargs)
except exc_tuple:
time.sleep(_delay)
_tries -= 1
_delay *= backoff
LOG.debug('Retrying %(args)s, %(tries)s attempts '
'remaining...',
{'args': args, 'tries': _tries})
# NOTE(jdg): Don't log the params passed here
# some cmds like createAccount will have sensitive
# info in the params, grab only the second tuple
# which should be the Method
msg = (_('Retry count exceeded for command: %s') %
(args[1],))
LOG.error(msg)
raise exception.SolidFireAPIException(message=msg)
return func_retry
return retry_dec
@interface.volumedriver
class SolidFireDriver(san.SanISCSIDriver):
"""OpenStack driver to enable SolidFire cluster.
Version history:
1.0 - Initial driver
1.1 - Refactor, clone support, qos by type and minor bug fixes
1.2 - Add xfr and retype support
1.2.1 - Add export/import support
1.2.2 - Catch VolumeNotFound on accept xfr
2.0.0 - Move from httplib to requests
2.0.1 - Implement SolidFire Snapshots
2.0.2 - Implement secondary account
2.0.3 - Implement cluster pairing
2.0.4 - Implement volume replication
2.0.5 - Try and deal with the stupid retry/clear issues from objects
and tflow
"""
VERSION = '2.0.2'
sf_qos_dict = {'slow': {'minIOPS': 100,
'maxIOPS': 200,
'burstIOPS': 200},
'medium': {'minIOPS': 200,
'maxIOPS': 400,
'burstIOPS': 400},
'fast': {'minIOPS': 500,
'maxIOPS': 1000,
'burstIOPS': 1000},
'performant': {'minIOPS': 2000,
'maxIOPS': 4000,
'burstIOPS': 4000},
'off': None}
sf_qos_keys = ['minIOPS', 'maxIOPS', 'burstIOPS']
cluster_stats = {}
retry_exc_tuple = (exception.SolidFireRetryableException,
requests.exceptions.ConnectionError)
retryable_errors = ['xDBVersionMismatch',
'xMaxSnapshotsPerVolumeExceeded',
'xMaxClonesPerVolumeExceeded',
'xMaxSnapshotsPerNodeExceeded',
'xMaxClonesPerNodeExceeded',
'xNotReadyForIO']
def __init__(self, *args, **kwargs):
super(SolidFireDriver, self).__init__(*args, **kwargs)
self.failed_over_id = kwargs.get('active_backend_id', None)
self.active_cluster_info = {}
self.configuration.append_config_values(sf_opts)
self.template_account_id = None
self.max_volumes_per_account = 1990
self.volume_map = {}
self.cluster_pairs = []
self.replication_enabled = False
self.failed_over = False
self.target_driver = SolidFireISCSI(solidfire_driver=self,
configuration=self.configuration)
if self.failed_over_id:
remote_info = self._get_remote_info_by_id(self.failed_over_id)
if remote_info:
self._set_active_cluster_info(remote_info['endpoint'])
else:
LOG.error(_LE('Failed to initialize SolidFire driver to '
'a remote cluster specified at id: %s'),
self.failed_over_id)
else:
self._set_active_cluster_info()
try:
self._update_cluster_status()
except exception.SolidFireAPIException:
pass
if self.configuration.sf_allow_template_caching:
account = self.configuration.sf_template_account_name
self.template_account_id = self._create_template_account(account)
if not self.failed_over_id:
self._set_cluster_pairs()
def __getattr__(self, attr):
if hasattr(self.target_driver, attr):
return getattr(self.target_driver, attr)
else:
msg = _('Attribute: %s not found.') % attr
raise NotImplementedError(msg)
def _get_remote_info_by_id(self, backend_id):
remote_info = None
for rd in self.configuration.get('replication_device', []):
if rd.get('backend_id', None) == backend_id:
remote_endpoint = self._build_endpoint_info(**rd)
remote_info = self._get_remote_cluster_info(remote_endpoint)
remote_info['endpoint'] = remote_endpoint
if not remote_info['endpoint']['svip']:
remote_info['endpoint']['svip'] = (
remote_info['svip'] + ':3260')
return remote_info
def _create_remote_pairing(self, remote_device):
try:
pairing_info = self._issue_api_request('StartClusterPairing',
{}, version='8.0')['result']
pair_id = self._issue_api_request(
'CompleteClusterPairing',
{'clusterPairingKey': pairing_info['clusterPairingKey']},
version='8.0',
endpoint=remote_device['endpoint'])['result']['clusterPairID']
except exception.SolidFireAPIException as ex:
if 'xPairingAlreadExists' in ex.msg:
LOG.debug('Pairing already exists during init.')
else:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Cluster pairing failed: %s'), ex.msg)
LOG.debug(('Initialized Cluster pair with ID: %s'), pair_id)
remote_device['clusterPairID'] = pair_id
return pair_id
def _get_remote_cluster_info(self, remote_endpoint):
return self._issue_api_request(
'GetClusterInfo',
{},
endpoint=remote_endpoint)['result']['clusterInfo']
def _set_cluster_pairs(self):
if not self.configuration.get('replication_device', None):
self.replication = False
return
existing_pairs = self._issue_api_request(
'ListClusterPairs',
{},
version='8.0')['result']['clusterPairs']
remote_pair = {}
for rd in self.configuration.get('replication_device', []):
remote_endpoint = self._build_endpoint_info(**rd)
remote_info = self._get_remote_cluster_info(remote_endpoint)
remote_info['endpoint'] = remote_endpoint
if not remote_info['endpoint']['svip']:
remote_info['endpoint']['svip'] = remote_info['svip'] + ':3260'
for ep in existing_pairs:
if rd['backend_id'] == ep['mvip']:
remote_pair = ep
LOG.debug("Found remote pair: %s", remote_pair)
remote_info['clusterPairID'] = ep['clusterPairID']
break
if not remote_pair:
# NOTE(jdg): create_remote_pairing sets the
# clusterPairID in remote_info for us
self._create_remote_pairing(remote_info)
self.cluster_pairs.append(remote_info)
LOG.debug("Setting replication_enabled to True.")
self.replication_enabled = True
def _set_active_cluster_info(self, endpoint=None):
if not endpoint:
self.active_cluster_info['endpoint'] = self._build_endpoint_info()
else:
self.active_cluster_info['endpoint'] = endpoint
for k, v in self._issue_api_request(
'GetClusterInfo',
{})['result']['clusterInfo'].items():
self.active_cluster_info[k] = v
# Add a couple extra things that are handy for us
self.active_cluster_info['clusterAPIVersion'] = (
self._issue_api_request('GetClusterVersionInfo',
{})['result']['clusterAPIVersion'])
if self.configuration.get('sf_svip', None):
self.active_cluster_info['svip'] = (
self.configuration.get('sf_svip'))
def _create_provider_id_string(self,
resource_id,
account_or_vol_id):
# NOTE(jdg): We use the same format, but in the case
# of snapshots, we don't have an account id, we instead
# swap that with the parent volume id
return "%s %s %s" % (resource_id,
account_or_vol_id,
self.active_cluster_info['uuid'])
def _init_snapshot_mappings(self, srefs):
updates = []
sf_snaps = self._issue_api_request(
'ListSnapshots', {}, version='6.0')['result']['snapshots']
for s in srefs:
seek_name = '%s%s' % (self.configuration.sf_volume_prefix, s['id'])
sfsnap = next(
(ss for ss in sf_snaps if ss['name'] == seek_name), None)
if sfsnap:
id_string = self._create_provider_id_string(
sfsnap['snapshotID'],
sfsnap['volumeID'])
if s.get('provider_id') != id_string:
updates.append(
{'id': s['id'],
'provider_id': id_string})
return updates
def _init_volume_mappings(self, vrefs):
updates = []
sf_vols = self._issue_api_request('ListActiveVolumes',
{})['result']['volumes']
self.volume_map = {}
for v in vrefs:
seek_name = '%s%s' % (self.configuration.sf_volume_prefix, v['id'])
sfvol = next(
(sv for sv in sf_vols if sv['name'] == seek_name), None)
if sfvol:
if v.get('provider_id', 'nil') != sfvol['volumeID']:
updates.append(
{'id': v['id'],
'provider_id': self._create_provider_id_string(
sfvol['volumeID'], sfvol['accountID'])})
return updates
def update_provider_info(self, vrefs, snaprefs):
volume_updates = self._init_volume_mappings(vrefs)
snapshot_updates = self._init_snapshot_mappings(snaprefs)
return (volume_updates, snapshot_updates)
def _create_template_account(self, account_name):
# We raise an API exception if the account doesn't exist
# We need to take account_prefix settings into consideration
# This just uses the same method to do template account create
# as we use for any other OpenStack account
account_name = self._get_sf_account_name(account_name)
try:
id = self._issue_api_request(
'GetAccountByName',
{'username': account_name})['result']['account']['accountID']
except exception.SolidFireAPIException:
chap_secret = self._generate_random_string(12)
params = {'username': account_name,
'initiatorSecret': chap_secret,
'targetSecret': chap_secret,
'attributes': {}}
id = self._issue_api_request('AddAccount',
params)['result']['accountID']
return id
def _build_endpoint_info(self, **kwargs):
endpoint = {}
endpoint['mvip'] = (
kwargs.get('mvip', self.configuration.san_ip))
endpoint['login'] = (
kwargs.get('login', self.configuration.san_login))
endpoint['passwd'] = (
kwargs.get('passwd', self.configuration.san_password))
endpoint['port'] = (
kwargs.get('port', self.configuration.sf_api_port))
endpoint['url'] = 'https://%s:%s' % (endpoint['mvip'],
endpoint['port'])
endpoint['svip'] = kwargs.get('svip', self.configuration.sf_svip)
if not endpoint.get('mvip', None) and kwargs.get('backend_id', None):
endpoint['mvip'] = kwargs.get('backend_id')
return endpoint
@retry(retry_exc_tuple, tries=6)
def _issue_api_request(self, method, params, version='1.0', endpoint=None):
if params is None:
params = {}
if endpoint is None:
endpoint = self.active_cluster_info['endpoint']
payload = {'method': method, 'params': params}
url = '%s/json-rpc/%s/' % (endpoint['url'], version)
with warnings.catch_warnings():
warnings.simplefilter("ignore", exceptions.InsecureRequestWarning)
req = requests.post(url,
data=json.dumps(payload),
auth=(endpoint['login'], endpoint['passwd']),
verify=False,
timeout=30)
response = req.json()
req.close()
if (('error' in response) and
(response['error']['name'] in self.retryable_errors)):
msg = ('Retryable error (%s) encountered during '
'SolidFire API call.' % response['error']['name'])
LOG.debug(msg)
raise exception.SolidFireRetryableException(message=msg)
if 'error' in response:
msg = _('API response: %s') % response
raise exception.SolidFireAPIException(msg)
return response
def _get_volumes_by_sfaccount(self, account_id, endpoint=None):
"""Get all volumes on cluster for specified account."""
params = {'accountID': account_id}
return self._issue_api_request(
'ListVolumesForAccount',
params,
endpoint=endpoint)['result']['volumes']
def _get_sfaccount_by_name(self, sf_account_name, endpoint=None):
"""Get SolidFire account object by name."""
sfaccount = None
params = {'username': sf_account_name}
try:
data = self._issue_api_request('GetAccountByName',
params,
endpoint=endpoint)
if 'result' in data and 'account' in data['result']:
LOG.debug('Found solidfire account: %s', sf_account_name)
sfaccount = data['result']['account']
except exception.SolidFireAPIException as ex:
if 'xUnknownAccount' in ex.msg:
return sfaccount
else:
raise
return sfaccount
def _get_sf_account_name(self, project_id):
"""Build the SolidFire account name to use."""
prefix = self.configuration.sf_account_prefix or ''
if prefix == 'hostname':
prefix = socket.gethostname()
return '%s%s%s' % (prefix, '-' if prefix else '', project_id)
def _get_sfaccount(self, project_id):
sf_account_name = self._get_sf_account_name(project_id)
sfaccount = self._get_sfaccount_by_name(sf_account_name)
if sfaccount is None:
raise exception.SolidFireAccountNotFound(
account_name=sf_account_name)
return sfaccount
def _create_sfaccount(self, project_id):
"""Create account on SolidFire device if it doesn't already exist.
We're first going to check if the account already exists, if it does
just return it. If not, then create it.
"""
sf_account_name = self._get_sf_account_name(project_id)
sfaccount = self._get_sfaccount_by_name(sf_account_name)
if sfaccount is None:
LOG.debug('solidfire account: %s does not exist, create it...',
sf_account_name)
chap_secret = self._generate_random_string(12)
params = {'username': sf_account_name,
'initiatorSecret': chap_secret,
'targetSecret': chap_secret,
'attributes': {}}
self._issue_api_request('AddAccount', params)
sfaccount = self._get_sfaccount_by_name(sf_account_name)
return sfaccount
def _generate_random_string(self, length):
"""Generates random_string to use for CHAP password."""
char_set = string.ascii_uppercase + string.digits
return ''.join(random.sample(char_set, length))
def _get_model_info(self, sfaccount, sf_volume_id, endpoint=None):
"""Gets the connection info for specified account and volume."""
if endpoint:
iscsi_portal = endpoint['svip']
else:
iscsi_portal = self.active_cluster_info['svip']
if ':' not in iscsi_portal:
iscsi_portal += ':3260'
chap_secret = sfaccount['targetSecret']
found_volume = False
iteration_count = 0
while not found_volume and iteration_count < 600:
volume_list = self._get_volumes_by_sfaccount(
sfaccount['accountID'], endpoint=endpoint)
iqn = None
for v in volume_list:
if v['volumeID'] == sf_volume_id:
iqn = v['iqn']
found_volume = True
break
if not found_volume:
time.sleep(2)
iteration_count += 1
if not found_volume:
LOG.error(_LE('Failed to retrieve volume SolidFire-'
'ID: %s in get_by_account!'), sf_volume_id)
raise exception.VolumeNotFound(volume_id=sf_volume_id)
model_update = {}
# NOTE(john-griffith): SF volumes are always at lun 0
model_update['provider_location'] = ('%s %s %s'
% (iscsi_portal, iqn, 0))
model_update['provider_auth'] = ('CHAP %s %s'
% (sfaccount['username'],
chap_secret))
if not self.configuration.sf_emulate_512:
model_update['provider_geometry'] = ('%s %s' % (4096, 4096))
model_update['provider_id'] = (
self._create_provider_id_string(sf_volume_id,
sfaccount['accountID']))
return model_update
def _snapshot_discovery(self, src_uuid, params, vref):
# NOTE(jdg): First check the SF snapshots
# if we don't find a snap by the given name, just move on to check
# volumes. This may be a running system that was updated from
# before we did snapshots, so need to check both
is_clone = False
sf_vol = None
snap_name = '%s%s' % (self.configuration.sf_volume_prefix, src_uuid)
snaps = self._get_sf_snapshots()
snap = next((s for s in snaps if s["name"] == snap_name), None)
if snap:
params['snapshotID'] = int(snap['snapshotID'])
params['volumeID'] = int(snap['volumeID'])
params['newSize'] = int(vref['size'] * units.Gi)
else:
sf_vol = self._get_sf_volume(src_uuid)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=src_uuid)
params['volumeID'] = int(sf_vol['volumeID'])
params['newSize'] = int(vref['size'] * units.Gi)
is_clone = True
return params, is_clone, sf_vol
def _do_clone_volume(self, src_uuid,
vref, sf_src_snap=None):
"""Create a clone of an existing volume or snapshot."""
attributes = {}
sf_account = self._get_create_account(vref['project_id'])
params = {'name': '%(prefix)s%(id)s' %
{'prefix': self.configuration.sf_volume_prefix,
'id': vref['id']},
'newAccountID': sf_account['accountID']}
is_clone = False
sf_vol = None
if sf_src_snap:
# In some scenarios we are passed the snapshot information that we
# are supposed to clone.
params['snapshotID'] = sf_src_snap['snapshotID']
params['volumeID'] = sf_src_snap['volumeID']
params['newSize'] = int(vref['size'] * units.Gi)
else:
params, is_clone, sf_vol = self._snapshot_discovery(src_uuid,
params,
vref)
data = self._issue_api_request('CloneVolume', params, version='6.0')
if (('result' not in data) or ('volumeID' not in data['result'])):
msg = _("API response: %s") % data
raise exception.SolidFireAPIException(msg)
sf_volume_id = data['result']['volumeID']
# NOTE(jdg): all attributes are copied via clone, need to do an update
# to set any that were provided
qos = self._retrieve_qos_setting(vref)
params = {'volumeID': sf_volume_id}
if qos:
params['qos'] = qos
create_time = vref['created_at'].isoformat()
attributes = {'uuid': vref['id'],
'is_clone': 'True',
'src_uuid': src_uuid,
'created_at': create_time}
params['attributes'] = attributes
data = self._issue_api_request('ModifyVolume', params)
model_update = self._get_model_info(sf_account, sf_volume_id)
if model_update is None:
mesg = _('Failed to get model update from clone')
raise exception.SolidFireAPIException(mesg)
# Increment the usage count, just for data collection
# We're only doing this for clones, not create_from snaps
if is_clone:
data = self._update_attributes(sf_vol)
return (data, sf_account, model_update)
def _update_attributes(self, sf_vol):
cloned_count = sf_vol['attributes'].get('cloned_count', 0)
cloned_count += 1
attributes = sf_vol['attributes']
attributes['cloned_count'] = cloned_count
params = {'volumeID': int(sf_vol['volumeID'])}
params['attributes'] = attributes
return self._issue_api_request('ModifyVolume', params)
def _do_volume_create(self, sf_account, params, endpoint=None):
params['accountID'] = sf_account['accountID']
sf_volid = self._issue_api_request(
'CreateVolume', params, endpoint=endpoint)['result']['volumeID']
return self._get_model_info(sf_account, sf_volid, endpoint=endpoint)
def _do_snapshot_create(self, params):
model_update = {}
snapshot_id = self._issue_api_request(
'CreateSnapshot', params, version='6.0')['result']['snapshotID']
snaps = self._get_sf_snapshots()
snap = (
next((s for s in snaps if int(s["snapshotID"]) ==
int(snapshot_id)), None))
model_update['provider_id'] = (
self._create_provider_id_string(snap['snapshotID'],
snap['volumeID']))
return model_update
def _set_qos_presets(self, volume):
qos = {}
valid_presets = self.sf_qos_dict.keys()
# First look to see if they included a preset
presets = [i.value for i in volume.get('volume_metadata')
if i.key == 'sf-qos' and i.value in valid_presets]
if len(presets) > 0:
if len(presets) > 1:
LOG.warning(_LW('More than one valid preset was '
'detected, using %s'), presets[0])
qos = self.sf_qos_dict[presets[0]]
else:
# look for explicit settings
for i in volume.get('volume_metadata'):
if i.key in self.sf_qos_keys:
qos[i.key] = int(i.value)
return qos
def _set_qos_by_volume_type(self, ctxt, type_id):
qos = {}
volume_type = volume_types.get_volume_type(ctxt, type_id)
qos_specs_id = volume_type.get('qos_specs_id')
specs = volume_type.get('extra_specs')
# NOTE(jdg): We prefer the qos_specs association
# and over-ride any existing
# extra-specs settings if present
if qos_specs_id is not None:
kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
else:
kvs = specs
for key, value in kvs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
if key in self.sf_qos_keys:
qos[key] = int(value)
return qos
def _get_sf_volume(self, uuid, params=None):
if params:
vols = self._issue_api_request(
'ListVolumesForAccount', params)['result']['volumes']
else:
vols = self._issue_api_request(
'ListActiveVolumes', params)['result']['volumes']
found_count = 0
sf_volref = None
for v in vols:
# NOTE(jdg): In the case of "name" we can't
# update that on manage/import, so we use
# the uuid attribute
meta = v.get('attributes')
alt_id = ''
if meta:
alt_id = meta.get('uuid', '')
if uuid in v['name'] or uuid in alt_id:
found_count += 1
sf_volref = v
LOG.debug("Mapped SolidFire volumeID %(volume_id)s "
"to cinder ID %(uuid)s.",
{'volume_id': v['volumeID'], 'uuid': uuid})
if found_count == 0:
# NOTE(jdg): Previously we would raise here, but there are cases
# where this might be a cleanup for a failed delete.
# Until we get better states we'll just log an error
LOG.error(_LE("Volume %s, not found on SF Cluster."), uuid)
if found_count > 1:
LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s."),
{'count': found_count,
'uuid': uuid})
raise exception.DuplicateSfVolumeNames(vol_name=uuid)
return sf_volref
def _get_sf_snapshots(self, sf_volid=None):
params = {}
if sf_volid:
params = {'volumeID': sf_volid}
return self._issue_api_request(
'ListSnapshots', params, version='6.0')['result']['snapshots']
def _create_image_volume(self, context,
image_meta, image_service,
image_id):
with image_utils.TemporaryImages.fetch(image_service,
context,
image_id) as tmp_image:
data = image_utils.qemu_img_info(tmp_image)
fmt = data.file_format
if fmt is None:
raise exception.ImageUnacceptable(
reason=_("'qemu-img info' parsing failed."),
image_id=image_id)
backing_file = data.backing_file
if backing_file is not None:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("fmt=%(fmt)s backed by:%(backing_file)s")
% {'fmt': fmt, 'backing_file': backing_file, })
virtual_size = int(math.ceil(float(data.virtual_size) / units.Gi))
attributes = {}
attributes['image_info'] = {}
attributes['image_info']['image_updated_at'] = (
image_meta['updated_at'].isoformat())
attributes['image_info']['image_name'] = (
image_meta['name'])
attributes['image_info']['image_created_at'] = (
image_meta['created_at'].isoformat())
attributes['image_info']['image_id'] = image_meta['id']
params = {'name': 'OpenStackIMG-%s' % image_id,
'accountID': self.template_account_id,
'sliceCount': 1,
'totalSize': int(virtual_size * units.Gi),
'enable512e': self.configuration.sf_emulate_512,
'attributes': attributes,
'qos': {}}
sf_account = self._issue_api_request(
'GetAccountByID',
{'accountID': self.template_account_id})['result']['account']
template_vol = self._do_volume_create(sf_account, params)
tvol = {}
tvol['id'] = image_id
tvol['provider_location'] = template_vol['provider_location']
tvol['provider_auth'] = template_vol['provider_auth']
connector = {'multipath': False}
conn = self.initialize_connection(tvol, connector)
attach_info = super(SolidFireDriver, self)._connect_device(conn)
properties = 'na'
try:
image_utils.convert_image(tmp_image,
attach_info['device']['path'],
'raw',
run_as_root=True)
data = image_utils.qemu_img_info(attach_info['device']['path'],
run_as_root=True)
if data.file_format != 'raw':
raise exception.ImageUnacceptable(
image_id=image_id,
reason=_("Converted to %(vol_format)s, but format is "
"now %(file_format)s") % {'vol_format': 'raw',
'file_format': data.
file_format})
except Exception as exc:
vol = self._get_sf_volume(image_id)
LOG.error(_LE('Failed image conversion during '
'cache creation: %s'),
exc)
LOG.debug('Removing SolidFire Cache Volume (SF ID): %s',
vol['volumeID'])
self._detach_volume(context, attach_info, tvol, properties)
self._issue_api_request('DeleteVolume', params)
return
self._detach_volume(context, attach_info, tvol, properties)
sf_vol = self._get_sf_volume(image_id, params)
LOG.debug('Successfully created SolidFire Image Template '
'for image-id: %s', image_id)
return sf_vol
def _verify_image_volume(self, context, image_meta, image_service):
# This method just verifies that IF we have a cache volume that
# it's still up to date and current WRT the image in Glance
# ie an image-update hasn't occurred since we grabbed it
# If it's out of date, just delete it and we'll create a new one
# Any other case we don't care and just return without doing anything
params = {'accountID': self.template_account_id}
sf_vol = self._get_sf_volume(image_meta['id'], params)
if sf_vol is None:
return
# Check updated_at field, delete copy and update if needed
if sf_vol['attributes']['image_info']['image_updated_at'] == (
image_meta['updated_at'].isoformat()):
return
else:
# Bummer, it's been updated, delete it
params = {'accountID': self.template_account_id}
params['volumeID'] = sf_vol['volumeID']
self._issue_api_request('DeleteVolume', params)
if not self._create_image_volume(context,
image_meta,
image_service,
image_meta['id']):
msg = _("Failed to create SolidFire Image-Volume")
raise exception.SolidFireAPIException(msg)
def _get_sfaccounts_for_tenant(self, cinder_project_id):
accounts = self._issue_api_request(
'ListAccounts', {})['result']['accounts']
# Note(jdg): On SF we map account-name to OpenStack's tenant ID
# we use tenantID in here to get secondaries that might exist
# Also: we expect this to be sorted, so we get the primary first
# in the list
return sorted([acc for acc in accounts if
cinder_project_id in acc['username']])
def _get_all_active_volumes(self, cinder_uuid=None):
params = {}
volumes = self._issue_api_request('ListActiveVolumes',
params)['result']['volumes']
if cinder_uuid:
vols = ([v for v in volumes if
cinder_uuid in v.name])
else:
vols = [v for v in volumes]
return vols
def _get_all_deleted_volumes(self, cinder_uuid=None):
params = {}
vols = self._issue_api_request('ListDeletedVolumes',
params)['result']['volumes']
if cinder_uuid:
deleted_vols = ([v for v in vols if
cinder_uuid in v['name']])
else:
deleted_vols = [v for v in vols]
return deleted_vols
def _get_account_create_availability(self, accounts):
# we'll check both the primary and the secondary
# if it exists and return whichever one has count
# available.
for acc in accounts:
if self._get_volumes_for_account(
acc['accountID']) > self.max_volumes_per_account:
return acc
if len(accounts) == 1:
sfaccount = self._create_sfaccount(accounts[0]['name'] + '_')
return sfaccount
return None
def _get_create_account(self, proj_id):
# Retrieve SolidFire accountID to be used for creating volumes.
sf_accounts = self._get_sfaccounts_for_tenant(proj_id)
if not sf_accounts:
sf_account = self._create_sfaccount(proj_id)
else:
# Check availability for creates
sf_account = self._get_account_create_availability(sf_accounts)
if not sf_account:
msg = _('Volumes/account exceeded on both primary and '
'secondary SolidFire accounts.')
raise exception.SolidFireDriverException(msg)
return sf_account
def _get_volumes_for_account(self, sf_account_id, cinder_uuid=None):
# ListVolumesForAccount gives both Active and Deleted
# we require the solidfire accountID, uuid of volume
# is optional
params = {'accountID': sf_account_id}
vols = self._issue_api_request('ListVolumesForAccount',
params)['result']['volumes']
if cinder_uuid:
vlist = [v for v in vols if
cinder_uuid in v['name']]
else:
vlist = [v for v in vols]
vlist = sorted(vlist, key=lambda k: k['volumeID'])
return vlist
def _create_vag(self, iqn, vol_id=None):
"""Create a volume access group(vag).
Returns the vag_id.
"""
vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn)
params = {'name': vag_name,
'initiators': [iqn],
'volumes': [vol_id],
'attributes': {'openstack': True}}
try:
result = self._issue_api_request('CreateVolumeAccessGroup',
params,
version='7.0')
return result['result']['volumeAccessGroupID']
except exception.SolidFireAPIException as error:
if xExceededLimit in error.msg:
if iqn in error.msg:
# Initiator double registered.
return self._safe_create_vag(iqn, vol_id)
else:
# VAG limit reached. Purge and start over.
self._purge_vags()
return self._safe_create_vag(iqn, vol_id)
else:
raise
def _safe_create_vag(self, iqn, vol_id=None):
# Potential race condition with simultaneous volume attaches to the
# same host. To help avoid this, VAG creation makes a best attempt at
# finding and using an existing VAG.
vags = self._get_vags_by_name(iqn)
if vags:
# Filter through the vags and find the one with matching initiator
vag = next((v for v in vags if iqn in v['initiators']), None)
if vag:
return vag['volumeAccessGroupID']
else:
# No matches, use the first result, add initiator IQN.
vag_id = vags[0]['volumeAccessGroupID']
return self._add_initiator_to_vag(iqn, vag_id)
return self._create_vag(iqn, vol_id)
def _base_get_vags(self):
params = {}
vags = self._issue_api_request(
'ListVolumeAccessGroups',
params,
version='7.0')['result']['volumeAccessGroups']
return vags
def _get_vags_by_name(self, iqn):
"""Retrieve SolidFire volume access group objects by name.
Returns an array of vags with a matching name value.
Returns an empty array if there are no matches.
"""
vags = self._base_get_vags()
vag_name = re.sub('[^0-9a-zA-Z]+', '-', iqn)
matching_vags = [vag for vag in vags if vag['name'] == vag_name]
return matching_vags
def _add_initiator_to_vag(self, iqn, vag_id):
# Added a vag_id return as there is a chance that we might have to
# create a new VAG if our target VAG is deleted underneath us.
params = {"initiators": [iqn],
"volumeAccessGroupID": vag_id}
try:
self._issue_api_request('AddInitiatorsToVolumeAccessGroup',
params,
version='7.0')
return vag_id
except exception.SolidFireAPIException as error:
if xAlreadyInVolumeAccessGroup in error.msg:
return vag_id
elif xVolumeAccessGroupIDDoesNotExist in error.msg:
# No locking means sometimes a VAG can be removed by a parallel
# volume detach against the same host.
return self._safe_create_vag(iqn)
else:
raise
def _add_volume_to_vag(self, vol_id, iqn, vag_id):
# Added a vag_id return to be consistent with add_initiator_to_vag. It
# isn't necessary but may be helpful in the future.
params = {"volumeAccessGroupID": vag_id,
"volumes": [vol_id]}
try:
self._issue_api_request('AddVolumesToVolumeAccessGroup',
params,
version='7.0')
return vag_id
except exception.SolidFireAPIException as error:
if xAlreadyInVolumeAccessGroup in error.msg:
return vag_id
elif xVolumeAccessGroupIDDoesNotExist in error.msg:
return self._safe_create_vag(iqn, vol_id)
else:
raise
def _remove_volume_from_vag(self, vol_id, vag_id):
params = {"volumeAccessGroupID": vag_id,
"volumes": [vol_id]}
try:
self._issue_api_request('RemoveVolumesFromVolumeAccessGroup',
params,
version='7.0')
except exception.SolidFireAPIException as error:
if xNotInVolumeAccessGroup in error.msg:
pass
elif xVolumeAccessGroupIDDoesNotExist in error.msg:
pass
else:
raise
def _remove_volume_from_vags(self, vol_id):
# Due to all sorts of uncertainty around multiattach, on volume
# deletion we make a best attempt at removing the vol_id from VAGs.
vags = self._base_get_vags()
targets = [v for v in vags if vol_id in v['volumes']]
for vag in targets:
self._remove_volume_from_vag(vol_id, vag['volumeAccessGroupID'])
def _remove_vag(self, vag_id):
params = {"volumeAccessGroupID": vag_id}
try:
self._issue_api_request('DeleteVolumeAccessGroup',
params,
version='7.0')
except exception.SolidFireAPIException as error:
if xVolumeAccessGroupIDDoesNotExist not in error.msg:
raise
def _purge_vags(self, limit=10):
# Purge up to limit number of VAGs that have no active volumes,
# initiators, and an OpenStack attribute. Purge oldest VAGs first.
vags = self._base_get_vags()
targets = [v for v in vags if v['volumes'] == [] and
v['initiators'] == [] and
v['deletedVolumes'] == [] and
v['attributes'].get('openstack')]
sorted_targets = sorted(targets,
key=lambda k: k['volumeAccessGroupID'])
for vag in sorted_targets[:limit]:
self._remove_vag(vag['volumeAccessGroupID'])
def clone_image(self, context,
volume, image_location,
image_meta, image_service):
public = False
# Check out pre-requisites:
# Is template caching enabled?
if not self.configuration.sf_allow_template_caching:
return None, False
# NOTE(jdg): Glance V2 moved from is_public to visibility
# so we check both, as we don't necessarily know or want
# to care which we're using. Will need to look at
# future handling of things like shared and community
# but for now, it's owner or public and that's it
visibility = image_meta.get('visibility', None)
if visibility and visibility == 'public':
public = True
elif image_meta.get('is_public', False):
public = True
else:
if image_meta['owner'] == volume['project_id']:
public = True
if not public:
LOG.warning(_LW("Requested image is not "
"accessible by current Tenant."))
return None, False
try:
self._verify_image_volume(context,
image_meta,
image_service)
except exception.SolidFireAPIException:
return None, False
try:
(data, sfaccount, model) = self._do_clone_volume(image_meta['id'],
volume)
except exception.VolumeNotFound:
if self._create_image_volume(context,
image_meta,
image_service,
image_meta['id']) is None:
# We failed, dump out
return None, False
# Ok, should be good to go now, try it again
(data, sfaccount, model) = self._do_clone_volume(image_meta['id'],
volume)
return model, True
def _retrieve_qos_setting(self, volume):
qos = {}
if (self.configuration.sf_allow_tenant_qos and
volume.get('volume_metadata')is not None):
qos = self._set_qos_presets(volume)
ctxt = context.get_admin_context()
type_id = volume.get('volume_type_id', None)
if type_id is not None:
qos = self._set_qos_by_volume_type(ctxt, type_id)
return qos
def create_volume(self, volume):
"""Create volume on SolidFire device.
The account is where CHAP settings are derived from, volume is
created and exported. Note that the new volume is immediately ready
for use.
One caveat here is that an existing user account must be specified
in the API call to create a new volume. We use a set algorithm to
determine account info based on passed in cinder volume object. First
we check to see if the account already exists (and use it), or if it
does not already exist, we'll go ahead and create it.
"""
slice_count = 1
attributes = {}
sf_account = self._get_create_account(volume['project_id'])
qos = self._retrieve_qos_setting(volume)
create_time = volume['created_at'].isoformat()
attributes = {'uuid': volume['id'],
'is_clone': 'False',
'created_at': create_time}
vname = '%s%s' % (self.configuration.sf_volume_prefix, volume['id'])
params = {'name': vname,
'accountID': sf_account['accountID'],
'sliceCount': slice_count,
'totalSize': int(volume['size'] * units.Gi),
'enable512e': self.configuration.sf_emulate_512,
'attributes': attributes,
'qos': qos}
# NOTE(jdg): Check if we're a migration tgt, if so
# use the old volume-id here for the SF Name
migration_status = volume.get('migration_status', None)
if migration_status and 'target' in migration_status:
k, v = migration_status.split(':')
vname = '%s%s' % (self.configuration.sf_volume_prefix, v)
params['name'] = vname
params['attributes']['migration_uuid'] = volume['id']
params['attributes']['uuid'] = v
model_update = self._do_volume_create(sf_account, params)
try:
rep_settings = self._retrieve_replication_settings(volume)
if self.replication_enabled and rep_settings:
volume['volumeID'] = (
int(model_update['provider_id'].split()[0]))
self._replicate_volume(volume, params,
sf_account, rep_settings)
except exception.SolidFireAPIException:
# NOTE(jdg): Something went wrong after the source create, due to
# the way TFLOW works and it's insistence on retrying the same
# command over and over coupled with the fact that the introduction
# of objects now sets host to None on failures we'll end up with an
# orphaned volume on the backend for every one of these segments
# that fail, for n-retries. Sad Sad Panda!! We'll just do it
# ourselves until we can get a general fix in Cinder further up the
# line
with excutils.save_and_reraise_exception():
sf_volid = int(model_update['provider_id'].split()[0])
self._issue_api_request('DeleteVolume', {'volumeID': sf_volid})
return model_update
def _retrieve_replication_settings(self, volume):
rep_data = {}
ctxt = context.get_admin_context()
type_id = volume.get('volume_type_id', None)
if type_id is not None:
rep_data = self._set_rep_by_volume_type(ctxt, type_id)
return rep_data
def _set_rep_by_volume_type(self, ctxt, type_id):
rep_opts = {}
type_ref = volume_types.get_volume_type(ctxt, type_id)
specs = type_ref.get('extra_specs')
if specs.get('replication', 'disabled').lower() == 'enabled':
rep_opts['targets'] = specs.get(
'solidfire:replication_targets', self.cluster_pairs[0])
return rep_opts
def _replicate_volume(self, volume, src_params,
parent_sfaccount, rep_info):
params = {}
# TODO(jdg): Right now we just go to first pair,
# need to add parsing of rep_info eventually
# in other words "rep_info" is not used yet!
tgt_endpoint = self.cluster_pairs[0]['endpoint']
LOG.debug("Replicating volume on remote cluster: %s", tgt_endpoint)
params['attributes'] = src_params['attributes']
params['username'] = self._get_sf_account_name(volume['project_id'])
try:
params['initiatorSecret'] = parent_sfaccount['initiatorSecret']
params['targetSecret'] = parent_sfaccount['targetSecret']
self._issue_api_request(
'AddAccount',
params,
endpoint=tgt_endpoint)['result']['accountID']
except exception.SolidFireAPIException as ex:
if 'xDuplicateUsername' not in ex.msg:
raise
remote_account = (
self._get_sfaccount_by_name(params['username'],
endpoint=tgt_endpoint))
# Create the volume on the remote cluster w/same params as original
params = src_params
params['accountID'] = remote_account['accountID']
LOG.debug("Create remote volume on: %(endpoint)s with account: "
"%(account)s",
{'endpoint': tgt_endpoint['url'], 'account': remote_account})
model_update = self._do_volume_create(
remote_account, params, endpoint=tgt_endpoint)
tgt_sfid = int(model_update['provider_id'].split()[0])
params = {'volumeID': tgt_sfid, 'access': 'replicationTarget'}
self._issue_api_request('ModifyVolume',
params,
'8.0',
endpoint=tgt_endpoint)
# Enable volume pairing
LOG.debug("Start volume pairing on volume ID: %s",
volume['volumeID'])
params = {'volumeID': volume['volumeID']}
rep_key = self._issue_api_request('StartVolumePairing',
params,
'8.0')['result']['volumePairingKey']
params = {'volumeID': tgt_sfid,
'volumePairingKey': rep_key}
LOG.debug("Issue CompleteVolumePairing request on remote: "
"%(endpoint)s, %(parameters)s",
{'endpoint': tgt_endpoint['url'], 'parameters': params})
self._issue_api_request('CompleteVolumePairing',
params,
'8.0',
endpoint=tgt_endpoint)
LOG.debug("Completed volume pairing.")
return model_update
def create_cloned_volume(self, volume, src_vref):
"""Create a clone of an existing volume."""
(_data, _sfaccount, model) = self._do_clone_volume(
src_vref['id'],
volume)
return model
def delete_volume(self, volume):
"""Delete SolidFire Volume from device.
SolidFire allows multiple volumes with same name,
volumeID is what's guaranteed unique.
"""
sf_vol = None
accounts = self._get_sfaccounts_for_tenant(volume['project_id'])
if accounts is None:
LOG.error(_LE("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"delete_volume operation!"), volume['id'])
LOG.error(_LE("This usually means the volume was never "
"successfully created."))
return
for acc in accounts:
vols = self._get_volumes_for_account(acc['accountID'],
volume['id'])
if vols:
sf_vol = vols[0]
break
if sf_vol is not None:
for vp in sf_vol.get('volumePairs', []):
LOG.debug("Deleting paired volume on remote cluster...")
pair_id = vp['clusterPairID']
for cluster in self.cluster_pairs:
if cluster['clusterPairID'] == pair_id:
params = {'volumeID': vp['remoteVolumeID']}
LOG.debug("Issue Delete request on cluster: "
"%(remote)s with params: %(parameters)s",
{'remote': cluster['endpoint']['url'],
'parameters': params})
self._issue_api_request('DeleteVolume', params,
endpoint=cluster['endpoint'])
if sf_vol['status'] == 'active':
params = {'volumeID': sf_vol['volumeID']}
self._issue_api_request('DeleteVolume', params)
if volume.get('multiattach'):
self._remove_volume_from_vags(sf_vol['volumeID'])
else:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"delete_volume operation!"), volume['id'])
def delete_snapshot(self, snapshot):
"""Delete the specified snapshot from the SolidFire cluster."""
sf_snap_name = '%s%s' % (self.configuration.sf_volume_prefix,
snapshot['id'])
accounts = self._get_sfaccounts_for_tenant(snapshot['project_id'])
snap = None
for acct in accounts:
params = {'accountID': acct['accountID']}
sf_vol = self._get_sf_volume(snapshot['volume_id'], params)
if sf_vol:
sf_snaps = self._get_sf_snapshots(sf_vol['volumeID'])
snap = next((s for s in sf_snaps if s["name"] == sf_snap_name),
None)
if snap:
params = {'snapshotID': snap['snapshotID']}
self._issue_api_request('DeleteSnapshot',
params,
version='6.0')
return
# Make sure it's not "old style" using clones as snaps
LOG.debug("Snapshot not found, checking old style clones.")
self.delete_volume(snapshot)
def create_snapshot(self, snapshot):
sfaccount = self._get_sfaccount(snapshot['project_id'])
if sfaccount is None:
LOG.error(_LE("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"create_snapshot operation!"), snapshot['volume_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(snapshot['volume_id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=snapshot['volume_id'])
params = {'volumeID': sf_vol['volumeID'],
'name': '%s%s' % (self.configuration.sf_volume_prefix,
snapshot['id'])}
return self._do_snapshot_create(params)
def create_volume_from_snapshot(self, volume, snapshot):
"""Create a volume from the specified snapshot."""
(_data, _sfaccount, model) = self._do_clone_volume(
snapshot['id'],
volume)
return model
# Consistency group helpers
def _create_group_snapshot(self, name, sf_volumes):
# Group snapshot is our version of a consistency group snapshot.
vol_ids = [vol['volumeID'] for vol in sf_volumes]
params = {'name': name,
'volumes': vol_ids}
snapshot_id = self._issue_api_request('CreateGroupSnapshot',
params,
version='7.0')
return snapshot_id['result']
def _group_snapshot_creator(self, gsnap_name, src_vol_ids):
# Common helper that takes in an array of OpenStack Volume UUIDs and
# creates a SolidFire group snapshot with them.
vol_names = [self.configuration.sf_volume_prefix + vol_id
for vol_id in src_vol_ids]
active_sf_vols = self._get_all_active_volumes()
target_vols = [vol for vol in active_sf_vols
if vol['name'] in vol_names]
if len(src_vol_ids) != len(target_vols):
msg = (_("Retrieved a different amount of SolidFire volumes for "
"the provided Cinder volumes. Retrieved: %(ret)s "
"Desired: %(des)s") % {"ret": len(target_vols),
"des": len(src_vol_ids)})
raise exception.SolidFireDriverException(msg)
result = self._create_group_snapshot(gsnap_name, target_vols)
return result
def _create_temp_group_snapshot(self, source_cg, source_vols):
# Take a temporary snapshot to create the volumes for a new
# consistency group.
gsnap_name = ("%(prefix)s%(id)s-tmp" %
{"prefix": self.configuration.sf_volume_prefix,
"id": source_cg['id']})
vol_ids = [vol['id'] for vol in source_vols]
self._group_snapshot_creator(gsnap_name, vol_ids)
return gsnap_name
def _list_group_snapshots(self):
result = self._issue_api_request('ListGroupSnapshots',
{},
version='7.0')
return result['result']['groupSnapshots']
def _get_group_snapshot_by_name(self, name):
target_snaps = self._list_group_snapshots()
target = next((snap for snap in target_snaps
if snap['name'] == name), None)
return target
def _delete_group_snapshot(self, gsnapid):
params = {'groupSnapshotID': gsnapid}
self._issue_api_request('DeleteGroupSnapshot',
params,
version='7.0')
def _delete_cgsnapshot_by_name(self, snap_name):
# Common function used to find and delete a snapshot.
target = self._get_group_snapshot_by_name(snap_name)
if not target:
msg = _("Failed to find group snapshot named: %s") % snap_name
raise exception.SolidFireDriverException(msg)
self._delete_group_snapshot(target['groupSnapshotID'])
def _find_linked_snapshot(self, target_uuid, group_snap):
# Because group snapshots name each individual snapshot the group
# snapshot name, we have to trawl through the SolidFire snapshots to
# find the SolidFire snapshot from the group that is linked with the
# SolidFire volumeID that is linked to the Cinder snapshot source
# volume.
source_vol = self._get_sf_volume(target_uuid)
target_snap = next((sn for sn in group_snap['members']
if sn['volumeID'] == source_vol['volumeID']), None)
return target_snap
def _create_clone_from_sf_snapshot(self, target_uuid, src_uuid,
sf_group_snap, vol):
# Find the correct SolidFire backing snapshot.
sf_src_snap = self._find_linked_snapshot(target_uuid,
sf_group_snap)
_data, _sfaccount, model = self._do_clone_volume(src_uuid,
vol,
sf_src_snap)
model['id'] = vol['id']
model['status'] = 'available'
return model
def _map_sf_volumes(self, cinder_volumes, endpoint=None):
"""Get a list of SolidFire volumes.
Creates a list of SolidFire volumes based
on matching a list of cinder volume ID's,
also adds an 'cinder_id' key to match cinder.
"""
vols = self._issue_api_request(
'ListActiveVolumes', {},
endpoint=endpoint)['result']['volumes']
vlist = (
[sfvol for sfvol in vols for cv in cinder_volumes if cv['id'] in
sfvol['name']])
for v in vlist:
v['cinder_id'] = v['name'].split(
self.configuration.sf_volume_prefix)[1]
return vlist
# Required consistency group functions
def create_consistencygroup(self, ctxt, group):
# SolidFire does not have a viable means for storing consistency group
# volume associations. So, we're just going to play along with the
# consistency group song and dance. There will be a lot of no-ops
# because of this.
return {'status': fields.ConsistencyGroupStatus.AVAILABLE}
def create_consistencygroup_from_src(self, ctxt, group, volumes,
cgsnapshot, snapshots,
source_cg, source_vols):
if cgsnapshot and snapshots:
sf_name = self.configuration.sf_volume_prefix + cgsnapshot['id']
sf_group_snap = self._get_group_snapshot_by_name(sf_name)
# Go about creating volumes from provided snaps.
vol_models = []
for vol, snap in zip(volumes, snapshots):
vol_models.append(self._create_clone_from_sf_snapshot(
snap['volume_id'],
snap['id'],
sf_group_snap,
vol))
return ({'status': fields.ConsistencyGroupStatus.AVAILABLE},
vol_models)
elif source_cg and source_vols:
# Create temporary group snapshot.
gsnap_name = self._create_temp_group_snapshot(source_cg,
source_vols)
try:
sf_group_snap = self._get_group_snapshot_by_name(gsnap_name)
# For each temporary snapshot clone the volume.
vol_models = []
for vol in volumes:
vol_models.append(self._create_clone_from_sf_snapshot(
vol['source_volid'],
vol['source_volid'],
sf_group_snap,
vol))
finally:
self._delete_cgsnapshot_by_name(gsnap_name)
return {'status': 'available'}, vol_models
def create_cgsnapshot(self, ctxt, cgsnapshot, snapshots):
vol_ids = [snapshot['volume_id'] for snapshot in snapshots]
vol_names = [self.configuration.sf_volume_prefix + vol_id
for vol_id in vol_ids]
active_sf_vols = self._get_all_active_volumes()
target_vols = [vol for vol in active_sf_vols
if vol['name'] in vol_names]
if len(snapshots) != len(target_vols):
msg = (_("Retrieved a different amount of SolidFire volumes for "
"the provided Cinder snapshots. Retrieved: %(ret)s "
"Desired: %(des)s") % {"ret": len(target_vols),
"des": len(snapshots)})
raise exception.SolidFireDriverException(msg)
snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id']
self._create_group_snapshot(snap_name, target_vols)
return None, None
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
# Similar to create_consistencygroup, SolidFire's lack of a consistency
# group object means there is nothing to update on the cluster.
return None, None, None
def delete_cgsnapshot(self, ctxt, cgsnapshot, snapshots):
snap_name = self.configuration.sf_volume_prefix + cgsnapshot['id']
self._delete_cgsnapshot_by_name(snap_name)
return None, None
def delete_consistencygroup(self, ctxt, group, volumes):
for vol in volumes:
self.delete_volume(vol)
return None, None
def get_volume_stats(self, refresh=False):
"""Get volume status.
If 'refresh' is True, run update first.
The name is a bit misleading as
the majority of the data here is cluster
data
"""
if refresh:
try:
self._update_cluster_status()
except exception.SolidFireAPIException:
pass
return self.cluster_stats
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"extend_volume operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
params = {
'volumeID': sf_vol['volumeID'],
'totalSize': int(new_size * units.Gi)
}
self._issue_api_request('ModifyVolume',
params, version='5.0')
def _update_cluster_status(self):
"""Retrieve status info for the Cluster."""
params = {}
# NOTE(jdg): The SF api provides an UNBELIEVABLE amount
# of stats data, this is just one of the calls
results = self._issue_api_request('GetClusterCapacity', params)
results = results['result']['clusterCapacity']
free_capacity = (
results['maxProvisionedSpace'] - results['usedSpace'])
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or self.__class__.__name__
data["vendor_name"] = 'SolidFire Inc'
data["driver_version"] = self.VERSION
data["storage_protocol"] = 'iSCSI'
data['consistencygroup_support'] = True
# TODO(jdg): should we have a "replication_status" that includes
# enabled, disabled, failed-over, error ?
data['replication_enabled'] = self.replication_enabled
if self.replication_enabled:
data['replication'] = 'enabled'
data['active_cluster_mvip'] = self.active_cluster_info['mvip']
data['total_capacity_gb'] = (
float(results['maxProvisionedSpace'] / units.Gi))
data['free_capacity_gb'] = float(free_capacity / units.Gi)
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = True
data['compression_percent'] = (
results['compressionPercent'])
data['deduplicaton_percent'] = (
results['deDuplicationPercent'])
data['thin_provision_percent'] = (
results['thinProvisioningPercent'])
self.cluster_stats = data
def initialize_connection(self, volume, connector):
"""Initialize the connection and return connection info.
Optionally checks and utilizes volume access groups.
"""
properties = self._sf_initialize_connection(volume, connector)
properties['data']['discard'] = True
return properties
def attach_volume(self, context, volume,
instance_uuid, host_name,
mountpoint):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"attach_volume operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
attributes['attach_time'] = volume.get('attach_time', None)
attributes['attached_to'] = instance_uuid
params = {
'volumeID': sf_vol['volumeID'],
'attributes': attributes
}
self._issue_api_request('ModifyVolume', params)
def terminate_connection(self, volume, properties, force):
return self._sf_terminate_connection(volume,
properties,
force)
def detach_volume(self, context, volume, attachment=None):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"detach_volume operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
attributes['attach_time'] = None
attributes['attached_to'] = None
params = {
'volumeID': sf_vol['volumeID'],
'attributes': attributes
}
self._issue_api_request('ModifyVolume', params)
def accept_transfer(self, context, volume,
new_user, new_project):
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
LOG.error(_LE("Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"accept_transfer operation!"), volume['id'])
raise exception.VolumeNotFound(volume_id=volume['id'])
if new_project != volume['project_id']:
# do a create_sfaccount here as this tenant
# may not exist on the cluster yet
sfaccount = self._create_sfaccount(new_project)
params = {
'volumeID': sf_vol['volumeID'],
'accountID': sfaccount['accountID']
}
self._issue_api_request('ModifyVolume',
params, version='5.0')
volume['project_id'] = new_project
volume['user_id'] = new_user
return self.target_driver.ensure_export(context, volume, None)
def retype(self, ctxt, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities (Not Used).
"""
qos = {}
attributes = {}
sfaccount = self._get_sfaccount(volume['project_id'])
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=volume['id'])
attributes = sf_vol['attributes']
attributes['retyped_at'] = timeutils.utcnow().isoformat()
params = {'volumeID': sf_vol['volumeID']}
qos = self._set_qos_by_volume_type(ctxt, new_type['id'])
if qos:
params['qos'] = qos
self._issue_api_request('ModifyVolume', params)
return True
def manage_existing(self, volume, external_ref):
"""Manages an existing SolidFire Volume (import to Cinder).
Renames the Volume to match the expected name for the volume.
Also need to consider things like QoS, Emulation, account/tenant.
"""
sfid = external_ref.get('source-id', None)
sfname = external_ref.get('name', None)
if sfid is None:
raise exception.SolidFireAPIException(_("Manage existing volume "
"requires 'source-id'."))
# First get the volume on the SF cluster (MUST be active)
params = {'startVolumeID': sfid,
'limit': 1}
vols = self._issue_api_request(
'ListActiveVolumes', params)['result']['volumes']
sf_ref = vols[0]
sfaccount = self._create_sfaccount(volume['project_id'])
attributes = {}
qos = self._retrieve_qos_setting(volume)
import_time = volume['created_at'].isoformat()
attributes = {'uuid': volume['id'],
'is_clone': 'False',
'os_imported_at': import_time,
'old_name': sfname}
params = {'name': volume['name'],
'volumeID': sf_ref['volumeID'],
'accountID': sfaccount['accountID'],
'enable512e': self.configuration.sf_emulate_512,
'attributes': attributes,
'qos': qos}
self._issue_api_request('ModifyVolume',
params, version='5.0')
return self._get_model_info(sfaccount, sf_ref['volumeID'])
def manage_existing_get_size(self, volume, external_ref):
"""Return size of an existing LV for manage_existing.
existing_ref is a dictionary of the form:
{'name': <name of existing volume on SF Cluster>}
"""
sfid = external_ref.get('source-id', None)
if sfid is None:
raise exception.SolidFireAPIException(_("Manage existing get size "
"requires 'id'."))
params = {'startVolumeID': int(sfid),
'limit': 1}
vols = self._issue_api_request(
'ListActiveVolumes', params)['result']['volumes']
return int(math.ceil(float(vols[0]['totalSize']) / units.Gi))
def unmanage(self, volume):
"""Mark SolidFire Volume as unmanaged (export from Cinder)."""
sfaccount = self._get_sfaccount(volume['project_id'])
if sfaccount is None:
LOG.error(_LE("Account for Volume ID %s was not found on "
"the SolidFire Cluster while attempting "
"unmanage operation!"), volume['id'])
raise exception.SolidFireAPIException(_("Failed to find account "
"for volume."))
params = {'accountID': sfaccount['accountID']}
sf_vol = self._get_sf_volume(volume['id'], params)
if sf_vol is None:
raise exception.VolumeNotFound(volume_id=volume['id'])
export_time = timeutils.utcnow().isoformat()
attributes = sf_vol['attributes']
attributes['os_exported_at'] = export_time
params = {'volumeID': int(sf_vol['volumeID']),
'attributes': attributes}
self._issue_api_request('ModifyVolume',
params, version='5.0')
def _failover_volume(self, remote_vol, remote):
"""Modify remote volume to R/W mode."""
self._issue_api_request(
'RemoveVolumePair',
{'volumeID': remote_vol['volumeID']},
endpoint=remote['endpoint'], version='7.0')
params = {'volumeID': remote_vol['volumeID'],
'access': 'readWrite'}
self._issue_api_request('ModifyVolume', params,
endpoint=remote['endpoint'])
def failover_host(self, context, volumes, secondary_id=None):
"""Failover to replication target."""
volume_updates = []
remote = None
if secondary_id:
for rc in self.cluster_pairs:
if rc['mvip'] == secondary_id:
remote = rc
break
if not remote:
LOG.error(_LE("SolidFire driver received failover_host "
"but was unable to find specified replication "
"pair with id: %s."), secondary_id)
raise exception.InvalidReplicationTarget
else:
remote = self.cluster_pairs[0]
if not remote or not self.replication_enabled:
LOG.error(_LE("SolidFire driver received failover_host "
"request, however replication is NOT "
"enabled, or there are no available "
"targets to fail-over to."))
raise exception.UnableToFailOver(reason=_("Failover requested "
"on non replicated "
"backend."))
remote_vols = self._map_sf_volumes(volumes,
endpoint=remote['endpoint'])
primary_vols = self._map_sf_volumes(volumes)
for v in volumes:
remote_vlist = filter(lambda sfv: sfv['cinder_id'] == v['id'],
remote_vols)
if len(remote_vlist) > 0:
remote_vol = remote_vlist[0]
self._failover_volume(remote_vol, remote)
primary_vol = filter(lambda sfv: sfv['cinder_id'] == v['id'],
primary_vols)[0]
if len(primary_vol['volumePairs']) > 0:
self._issue_api_request(
'RemoveVolumePair',
{'volumeID': primary_vol['volumeID']},
version='7.0')
iqn = remote_vol['iqn']
volume_updates.append(
{'volume_id': v['id'],
'updates': {
'provider_location': ('%s %s %s' %
(remote['endpoint']['svip'],
iqn,
0)),
'replication_status': 'failed-over'}})
else:
volume_updates.append({'volume_id': v['id'],
'updates': {'status': 'error', }})
# FIXME(jdg): This introduces a problem for us, up until now our driver
# has been pretty much stateless and has allowed customers to run
# active/active HA c-vol services with SolidFire. The introduction of
# the active_cluster and failed_over attributes is going to break that
# but for now that's going to be the trade off of using replciation
self.active_cluster_info = remote
self.failed_over = True
return remote['mvip'], volume_updates
def freeze_backend(self, context):
"""Freeze backend notification."""
pass
def thaw_backend(self, context):
"""Thaw backend notification."""
pass
class SolidFireISCSI(iscsi_driver.SanISCSITarget):
def __init__(self, *args, **kwargs):
super(SolidFireISCSI, self).__init__(*args, **kwargs)
self.sf_driver = kwargs.get('solidfire_driver')
def __getattr__(self, attr):
if hasattr(self.sf_driver, attr):
return getattr(self.sf_driver, attr)
else:
msg = _('Attribute: %s not found.') % attr
raise NotImplementedError(msg)
def _do_iscsi_export(self, volume):
sfaccount = self._get_sfaccount(volume['project_id'])
model_update = {}
model_update['provider_auth'] = ('CHAP %s %s'
% (sfaccount['username'],
sfaccount['targetSecret']))
return model_update
def create_export(self, context, volume, volume_path):
return self._do_iscsi_export(volume)
def ensure_export(self, context, volume, volume_path):
try:
return self._do_iscsi_export(volume)
except exception.SolidFireAPIException:
return None
# Following are abc's that we make sure are caught and
# paid attention to. In our case we don't use them
# so just stub them out here.
def remove_export(self, context, volume):
pass
def terminate_connection(self, volume, connector, **kwargs):
pass
def _sf_initialize_connection(self, volume, connector):
"""Initialize the connection and return connection info.
Optionally checks and utilizes volume access groups.
"""
if self.configuration.sf_enable_vag:
iqn = connector['initiator']
provider_id = volume['provider_id']
vol_id = int(provider_id.split()[0])
# safe_create_vag may opt to reuse vs create a vag, so we need to
# add our vol_id.
vag_id = self._safe_create_vag(iqn, vol_id)
self._add_volume_to_vag(vol_id, iqn, vag_id)
# Continue along with default behavior
return super(SolidFireISCSI, self).initialize_connection(volume,
connector)
def _sf_terminate_connection(self, volume, properties, force):
"""Terminate the volume connection.
Optionally remove volume from volume access group.
If the VAG is empty then the VAG is also removed.
"""
if self.configuration.sf_enable_vag:
iqn = properties['initiator']
vag = self._get_vags_by_name(iqn)
provider_id = volume['provider_id']
vol_id = int(provider_id.split()[0])
if vag and not volume['multiattach']:
# Multiattach causes problems with removing volumes from VAGs.
# Compromise solution for now is to remove multiattach volumes
# from VAGs during volume deletion.
vag = vag[0]
vag_id = vag['volumeAccessGroupID']
if [vol_id] == vag['volumes']:
self._remove_vag(vag_id)
elif vol_id in vag['volumes']:
self._remove_volume_from_vag(vol_id, vag_id)
return super(SolidFireISCSI, self).terminate_connection(volume,
properties,
force=force)
| apache-2.0 |
josschne/BabyWomp | cocos2d/tools/cocos2d-console/console/cocos2d_jscompile.py | 17 | 11265 | #!/usr/bin/python
# ----------------------------------------------------------------------------
# cocos2d "jscompile" plugin
#
# Copyright 2013 (C) Intel
#
# License: MIT
# ----------------------------------------------------------------------------
'''
"jscompile" plugin for cocos2d command line tool
'''
__docformat__ = 'restructuredtext'
import sys
import subprocess
import os
import json
import inspect
import cocos2d
class CCPluginJSCompile(cocos2d.CCPlugin):
"""
compiles (encodes) and minifies JS files
"""
@staticmethod
def brief_description():
# returns a short description of this module
return "jscompile\tminifies and/or compiles js files"
# This is not the constructor, just an initializator
def init(self, options, workingdir):
"""
Arguments:
- `options`:
"""
self._current_src_dir = None
self._src_dir_arr = self.normalize_path_in_list(options.src_dir_arr)
self._dst_dir = options.dst_dir
self._use_closure_compiler = options.use_closure_compiler
self._config = None
self._workingdir = workingdir
if options.compiler_config != None:
f = open(options.compiler_config)
self._config = json.load(f)
f.close()
self.normalize_path_in_list(self._config["pre_order"])
self.normalize_path_in_list(self._config["post_order"])
self.normalize_path_in_list(self._config["skip"])
self._success = []
self._failure = []
self._js_files = {}
self._compressed_js_path = os.path.join(self._dst_dir, options.compressed_filename)
self._compressed_jsc_path = os.path.join(self._dst_dir, options.compressed_filename+"c")
def normalize_path_in_list(self, list):
for i in list:
tmp = os.path.normpath(i)
list[list.index(i)] = tmp
return list
def get_relative_path(self, jsfile):
try:
# print "current src dir: "+self._current_src_dir
pos = jsfile.index(self._current_src_dir)
if pos != 0:
raise Exception("cannot find src directory in file path.")
# print "origin js path: "+ jsfile
# print "relative path: "+jsfile[len(self._current_src_dir)+1:]
return jsfile[len(self._current_src_dir)+1:]
except ValueError:
raise Exception("cannot find src directory in file path.")
def get_output_file_path(self, jsfile):
"""
Gets output file path by source js file
"""
# create folder for generated file
jsc_filepath = ""
relative_path = self.get_relative_path(jsfile)+"c"
jsc_filepath = os.path.join(self._dst_dir, relative_path)
dst_rootpath = os.path.split(jsc_filepath)[0]
try:
# print "creating dir (%s)" % (dst_rootpath)
os.makedirs(dst_rootpath)
except OSError:
if os.path.exists(dst_rootpath) == False:
# There was an error on creation, so make sure we know about it
raise Exception("Error: cannot create folder in "+dst_rootpath)
# print "return jsc path: "+jsc_filepath
return jsc_filepath
def compile_js(self, jsfile, output_file):
"""
Compiles js file
"""
print "compiling js (%s) to bytecode..." % (jsfile)
jsbcc_exe_path = os.path.join(self._workingdir, "bin", "jsbcc");
ret = subprocess.call(jsbcc_exe_path + " " + jsfile+" "+output_file, shell=True)
if ret == 0:
self._success.append(jsfile)
else:
self._failure.append(jsfile)
print "----------------------------------------"
def compress_js(self):
"""
Compress all js files into one big file.
"""
jsfiles = ""
for src_dir in self._src_dir_arr:
# print "\n----------src:"+src_dir
jsfiles = jsfiles + " --js ".join(self._js_files[src_dir]) + " "
compiler_jar_path = os.path.join(self._workingdir, "bin", "compiler.jar")
command = "java -jar %s --js %s --js_output_file %s" % (compiler_jar_path, jsfiles, self._compressed_js_path)
print "\ncommand:"+command+"\n"
ret = subprocess.call(command, shell=True)
if ret == 0:
print "js files were compressed successfully..."
else:
print "js files were compressed unsuccessfully..."
def deep_iterate_dir(self, rootDir):
for lists in os.listdir(rootDir):
path = os.path.join(rootDir, lists)
if os.path.isdir(path):
self.deep_iterate_dir(path)
elif os.path.isfile(path):
if os.path.splitext(path)[1] == ".js":
self._js_files[self._current_src_dir].append(path)
def index_in_list(self, jsfile, l):
"""
Arguments:
- `self`:
- `jsfile`:
- `l`:
"""
index = -1
for el in l:
if jsfile.rfind(el) != -1:
# print "index:"+str(index+1)+", el:"+el
return index+1
index = index + 1
return -1
def js_filename_pre_order_compare(self, a, b):
"""
"""
pre_order = self._config["pre_order"]
index_a = self.index_in_list(a, pre_order)
index_b = self.index_in_list(b, pre_order)
is_a_in_list = index_a != -1
is_b_in_list = index_b != -1
if is_a_in_list and not is_b_in_list:
return -1
elif not is_a_in_list and is_b_in_list:
return 1
elif is_a_in_list and is_b_in_list:
if index_a > index_b:
return 1
elif index_a < index_b:
return -1
else:
return 0
else:
return 0
def js_filename_post_order_compare(self, a, b):
"""
"""
post_order = self._config["post_order"]
index_a = self.index_in_list(a, post_order)
index_b = self.index_in_list(b, post_order)
is_a_in_list = index_a != -1
is_b_in_list = index_b != -1
if is_a_in_list and not is_b_in_list:
return 1
elif not is_a_in_list and is_b_in_list:
return -1
elif is_a_in_list and is_b_in_list:
if index_a > index_b:
return 1
elif index_a < index_b:
return -1
else:
return 0
else:
return 0
def reorder_js_files(self):
if self._config == None:
return
# print "before:"+str(self._js_files)
for src_dir in self._js_files:
# Remove file in exclude list
need_remove_arr = []
for jsfile in self._js_files[src_dir]:
for exclude_file in self._config["skip"]:
if jsfile.rfind(exclude_file) != -1:
# print "remove:" + jsfile
need_remove_arr.append(jsfile)
for need_remove in need_remove_arr:
self._js_files[src_dir].remove(need_remove)
if (self._config != None):
pre_order = self._config["pre_order"]
self._js_files[src_dir].sort(cmp=self.js_filename_pre_order_compare)
self._js_files[src_dir].sort(cmp=self.js_filename_post_order_compare)
# print '-------------------'
# print "after:" + str(self._js_files)
def handle_all_js_files(self):
"""
Arguments:
- `self`:
"""
if self._use_closure_compiler == True:
self.compress_js()
self.compile_js(self._compressed_js_path, self._compressed_jsc_path)
# remove tmp compressed file
os.remove(self._compressed_js_path)
else:
for src_dir in self._src_dir_arr:
for jsfile in self._js_files[src_dir]:
self._current_src_dir = src_dir
self.compile_js(jsfile, self.get_output_file_path(jsfile))
# will be called from the cocos2d.py script
def run(self, argv):
"""
"""
self.parse_args(argv)
# create output directory
try:
os.makedirs(self._dst_dir)
except OSError:
if os.path.exists(self._dst_dir) == False:
raise Exception("Error: cannot create folder in "+self._dst_dir)
# deep iterate the src directory
for src_dir in self._src_dir_arr:
self._current_src_dir = src_dir
self._js_files[self._current_src_dir] = []
self.deep_iterate_dir(src_dir)
self.reorder_js_files()
self.handle_all_js_files()
print "\nCompilation finished, (%d) files succeed, (%d) files fail." % (len(self._success), len(self._failure))
if len(self._failure) > 0:
print "Failure files are:"
print self._failure
print "------------------------------"
def parse_args(self, argv):
"""
"""
from optparse import OptionParser
parser = OptionParser("usage: %prog jscompile -s src_dir -d dst_dir [-c -o COMPRESSED_FILENAME -j COMPILER_CONFIG]")
parser.add_option("-s", "--src",
action="append", type="string", dest="src_dir_arr",
help="source directory of js files needed to be compiled, supports mutiple source directory")
parser.add_option("-d", "--dst",
action="store", type="string", dest="dst_dir",
help="destination directory of js bytecode files to be stored")
parser.add_option("-c", "--use_closure_compiler",
action="store_true", dest="use_closure_compiler", default=False,
help="Whether to use closure compiler to compress all js files into just a big file")
parser.add_option("-o", "--output_compressed_filename",
action="store", dest="compressed_filename", default="game.min.js",
help="Only available when '-c' option was True")
parser.add_option("-j", "--compiler_config",
action="store", dest="compiler_config",
help="The configuration for closure compiler by using JSON, please refer to compiler_config_sample.json")
(options, args) = parser.parse_args(argv)
# print options
if options.src_dir_arr == None:
raise Exception("Please set source folder by \"-s\" or \"-src\", run ./jscompile.py -h for the usage ")
elif options.dst_dir == None:
raise Exception("Please set destination folder by \"-d\" or \"-dst\", run ./jscompile.py -h for the usage ")
else:
for src_dir in options.src_dir_arr:
if os.path.exists(src_dir) == False:
raise Exception("Error: dir (%s) doesn't exist..." % (src_dir))
# script directory
workingdir = os.path.dirname(inspect.getfile(inspect.currentframe()))
self.init(options, workingdir)
| mit |
Manexware/medical | oemedical_data/__openerp__.py | 1 | 1120 | {
'name': 'OeMedical : Module Data',
'version': '1.0',
'author': "OeMEdical Team",
'category': 'Generic Modules/Others',
'depends': ['oemedical'],
'application': True,
'description': """
About OeMedical Data
---------------------
Core Data for oemedical, is kept as a separate module to overcome need of
localizing core data.
""",
"website": "http://launchpad.net/oemedical",
"licence": "AGPL v3",
"data": [
'data/recreational_drugs.xml',
'data/disease_genes.xml',
'data/medicament_categories.xml',
'data/WHO_products.xml',
'data/WHO_list_of_essential_medicines.xml',
'data/health_specialties.xml',
'data/ethnic_groups.xml',
'data/occupations.xml',
'data/dose_units.xml',
'data/drug_routes.xml',
'data/medicament_form.xml',
'data/medication_frequencies.xml',
'data/disease_categories.xml',
'data/diseases.xml',
],
"demo": [
],
'test':[
],
'css': [
],
'js': [
],
'qweb': [
],
"active": False
}
| gpl-2.0 |
CiscoSystems/tempest | tempest/stress/driver.py | 3 | 8594 | # Copyright 2013 Quanta Research Cambridge, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import os
import signal
import time
from six import moves
from tempest import auth
from tempest import clients
from tempest.common import ssh
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import importutils
from tempest.openstack.common import log as logging
from tempest.stress import cleanup
CONF = config.CONF
LOG = logging.getLogger(__name__)
processes = []
def do_ssh(command, host, ssh_user, ssh_key=None):
ssh_client = ssh.Client(host, ssh_user, key_filename=ssh_key)
try:
return ssh_client.exec_command(command)
except exceptions.SSHExecCommandFailed:
LOG.error('do_ssh raise exception. command:%s, host:%s.'
% (command, host))
return None
def _get_compute_nodes(controller, ssh_user, ssh_key=None):
"""
Returns a list of active compute nodes. List is generated by running
nova-manage on the controller.
"""
nodes = []
cmd = "nova-manage service list | grep ^nova-compute"
output = do_ssh(cmd, controller, ssh_user, ssh_key)
if not output:
return nodes
# For example: nova-compute xg11eth0 nova enabled :-) 2011-10-31 18:57:46
# This is fragile but there is, at present, no other way to get this info.
for line in output.split('\n'):
words = line.split()
if len(words) > 0 and words[4] == ":-)":
nodes.append(words[1])
return nodes
def _has_error_in_logs(logfiles, nodes, ssh_user, ssh_key=None,
stop_on_error=False):
"""
Detect errors in the nova log files on the controller and compute nodes.
"""
grep = 'egrep "ERROR|TRACE" %s' % logfiles
ret = False
for node in nodes:
errors = do_ssh(grep, node, ssh_user, ssh_key)
if len(errors) > 0:
LOG.error('%s: %s' % (node, errors))
ret = True
if stop_on_error:
break
return ret
def sigchld_handler(signalnum, frame):
"""
Signal handler (only active if stop_on_error is True).
"""
for process in processes:
if (not process['process'].is_alive() and
process['process'].exitcode != 0):
signal.signal(signalnum, signal.SIG_DFL)
terminate_all_processes()
break
def terminate_all_processes(check_interval=20):
"""
Goes through the process list and terminates all child processes.
"""
LOG.info("Stopping all processes.")
for process in processes:
if process['process'].is_alive():
try:
process['process'].terminate()
except Exception:
pass
time.sleep(check_interval)
for process in processes:
if process['process'].is_alive():
try:
pid = process['process'].pid
LOG.warn("Process %d hangs. Send SIGKILL." % pid)
os.kill(pid, signal.SIGKILL)
except Exception:
pass
process['process'].join()
def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
"""
Workload driver. Executes an action function against a nova-cluster.
"""
admin_manager = clients.AdminManager()
ssh_user = CONF.stress.target_ssh_user
ssh_key = CONF.stress.target_private_key_path
logfiles = CONF.stress.target_logfiles
log_check_interval = int(CONF.stress.log_check_interval)
default_thread_num = int(CONF.stress.default_thread_number_per_action)
if logfiles:
controller = CONF.stress.target_controller
computes = _get_compute_nodes(controller, ssh_user, ssh_key)
for node in computes:
do_ssh("rm -f %s" % logfiles, node, ssh_user, ssh_key)
for test in tests:
if test.get('use_admin', False):
manager = admin_manager
else:
manager = clients.Manager()
for p_number in moves.xrange(test.get('threads', default_thread_num)):
if test.get('use_isolated_tenants', False):
username = data_utils.rand_name("stress_user")
tenant_name = data_utils.rand_name("stress_tenant")
password = "pass"
identity_client = admin_manager.identity_client
tenant = identity_client.create_tenant(name=tenant_name)
identity_client.create_user(username,
password,
tenant['id'],
"email")
creds = auth.get_credentials(username=username,
password=password,
tenant_name=tenant_name)
manager = clients.Manager(credentials=creds)
test_obj = importutils.import_class(test['action'])
test_run = test_obj(manager, max_runs, stop_on_error)
kwargs = test.get('kwargs', {})
test_run.setUp(**dict(kwargs.iteritems()))
LOG.debug("calling Target Object %s" %
test_run.__class__.__name__)
mp_manager = multiprocessing.Manager()
shared_statistic = mp_manager.dict()
shared_statistic['runs'] = 0
shared_statistic['fails'] = 0
p = multiprocessing.Process(target=test_run.execute,
args=(shared_statistic,))
process = {'process': p,
'p_number': p_number,
'action': test_run.action,
'statistic': shared_statistic}
processes.append(process)
p.start()
if stop_on_error:
# NOTE(mkoderer): only the parent should register the handler
signal.signal(signal.SIGCHLD, sigchld_handler)
end_time = time.time() + duration
had_errors = False
try:
while True:
if max_runs is None:
remaining = end_time - time.time()
if remaining <= 0:
break
else:
remaining = log_check_interval
all_proc_term = True
for process in processes:
if process['process'].is_alive():
all_proc_term = False
break
if all_proc_term:
break
time.sleep(min(remaining, log_check_interval))
if stop_on_error:
if any([True for proc in processes
if proc['statistic']['fails'] > 0]):
break
if not logfiles:
continue
if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
stop_on_error):
had_errors = True
break
except KeyboardInterrupt:
LOG.warning("Interrupted, going to print statistics and exit ...")
if stop_on_error:
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
terminate_all_processes()
sum_fails = 0
sum_runs = 0
LOG.info("Statistics (per process):")
for process in processes:
if process['statistic']['fails'] > 0:
had_errors = True
sum_runs += process['statistic']['runs']
sum_fails += process['statistic']['fails']
LOG.info(" Process %d (%s): Run %d actions (%d failed)" %
(process['p_number'],
process['action'],
process['statistic']['runs'],
process['statistic']['fails']))
LOG.info("Summary:")
LOG.info("Run %d actions (%d failed)" %
(sum_runs, sum_fails))
if not had_errors and CONF.stress.full_clean_stack:
LOG.info("cleaning up")
cleanup.cleanup()
if had_errors:
return 1
else:
return 0
| apache-2.0 |
vipulroxx/sympy | sympy/plotting/experimental_lambdify.py | 22 | 25077 | """ rewrite of lambdify - This stuff is not stable at all.
It is for internal use in the new plotting module.
It may (will! see the Q'n'A in the source) be rewritten.
It's completely self contained. Especially it does not use lambdarepr.
It does not aim to replace the current lambdify. Most importantly it will never
ever support anything else than sympy expressions (no Matrices, dictionaries
and so on).
"""
from __future__ import print_function, division
import re
from sympy import Symbol, NumberSymbol, I, zoo, oo
from sympy.core.compatibility import exec_
from sympy.utilities.iterables import numbered_symbols
# We parse the expression string into a tree that identifies functions. Then
# we translate the names of the functions and we translate also some strings
# that are not names of functions (all this according to translation
# dictionaries).
# If the translation goes to another module (like numpy) the
# module is imported and 'func' is translated to 'module.func'.
# If a function can not be translated, the inner nodes of that part of the
# tree are not translated. So if we have Integral(sqrt(x)), sqrt is not
# translated to np.sqrt and the Integral does not crash.
# A namespace for all this is generated by crawling the (func, args) tree of
# the expression. The creation of this namespace involves many ugly
# workarounds.
# The namespace consists of all the names needed for the sympy expression and
# all the name of modules used for translation. Those modules are imported only
# as a name (import numpy as np) in order to keep the namespace small and
# manageable.
# Please, if there is a bug, do not try to fix it here! Rewrite this by using
# the method proposed in the last Q'n'A below. That way the new function will
# work just as well, be just as simple, but it wont need any new workarounds.
# If you insist on fixing it here, look at the workarounds in the function
# sympy_expression_namespace and in lambdify.
# Q: Why are you not using python abstract syntax tree?
# A: Because it is more complicated and not much more powerful in this case.
# Q: What if I have Symbol('sin') or g=Function('f')?
# A: You will break the algorithm. We should use srepr to defend against this?
# The problem with Symbol('sin') is that it will be printed as 'sin'. The
# parser will distinguish it from the function 'sin' because functions are
# detected thanks to the opening parenthesis, but the lambda expression won't
# understand the difference if we have also the sin function.
# The solution (complicated) is to use srepr and maybe ast.
# The problem with the g=Function('f') is that it will be printed as 'f' but in
# the global namespace we have only 'g'. But as the same printer is used in the
# constructor of the namespace there will be no problem.
# Q: What if some of the printers are not printing as expected?
# A: The algorithm wont work. You must use srepr for those cases. But even
# srepr may not print well. All problems with printers should be considered
# bugs.
# Q: What about _imp_ functions?
# A: Those are taken care for by evalf. A special case treatment will work
# faster but it's not worth the code complexity.
# Q: Will ast fix all possible problems?
# A: No. You will always have to use some printer. Even srepr may not work in
# some cases. But if the printer does not work, that should be considered a
# bug.
# Q: Is there same way to fix all possible problems?
# A: Probably by constructing our strings ourself by traversing the (func,
# args) tree and creating the namespace at the same time. That actually sounds
# good.
from sympy.external import import_module
import warnings
#TODO debuging output
class vectorized_lambdify(object):
""" Return a sufficiently smart, vectorized and lambdified function.
Returns only reals.
This function uses experimental_lambdify to created a lambdified
expression ready to be used with numpy. Many of the functions in sympy
are not implemented in numpy so in some cases we resort to python cmath or
even to evalf.
The following translations are tried:
only numpy complex
- on errors raised by sympy trying to work with ndarray:
only python cmath and then vectorize complex128
When using python cmath there is no need for evalf or float/complex
because python cmath calls those.
This function never tries to mix numpy directly with evalf because numpy
does not understand sympy Float. If this is needed one can use the
float_wrap_evalf/complex_wrap_evalf options of experimental_lambdify or
better one can be explicit about the dtypes that numpy works with.
Check numpy bug http://projects.scipy.org/numpy/ticket/1013 to know what
types of errors to expect.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.lambda_func = experimental_lambdify(args, expr, use_np=True)
self.vector_func = self.lambda_func
self.failure = False
def __call__(self, *args):
np = import_module('numpy')
np_old_err = np.seterr(invalid='raise')
try:
temp_args = (np.array(a, dtype=np.complex) for a in args)
results = self.vector_func(*temp_args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
except Exception as e:
#DEBUG: print 'Error', type(e), e
if ((isinstance(e, TypeError)
and 'unhashable type: \'numpy.ndarray\'' in str(e))
or
(isinstance(e, ValueError)
and ('Invalid limits given:' in str(e)
or 'negative dimensions are not allowed' in str(e) # XXX
or 'sequence too large; must be smaller than 32' in str(e)))): # XXX
# Almost all functions were translated to numpy, but some were
# left as sympy functions. They recieved an ndarray as an
# argument and failed.
# sin(ndarray(...)) raises "unhashable type"
# Integral(x, (x, 0, ndarray(...))) raises "Invalid limits"
# other ugly exceptions that are not well understood (marked with XXX)
# TODO: Cleanup the ugly special cases marked with xxx above.
# Solution: use cmath and vectorize the final lambda.
self.lambda_func = experimental_lambdify(
self.args, self.expr, use_python_cmath=True)
self.vector_func = np.vectorize(
self.lambda_func, otypes=[np.complex])
results = self.vector_func(*args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
else:
# Complete failure. One last try with no translations, only
# wrapping in complex((...).evalf()) and returning the real
# part.
if self.failure:
raise e
else:
self.failure = True
self.lambda_func = experimental_lambdify(
self.args, self.expr, use_evalf=True,
complex_wrap_evalf=True)
self.vector_func = np.vectorize(
self.lambda_func, otypes=[np.complex])
results = self.vector_func(*args)
results = np.ma.masked_where(
np.abs(results.imag) > 1e-7 * np.abs(results),
results.real, copy=False)
warnings.warn('The evaluation of the expression is'
' problematic. We are trying a failback method'
' that may still work. Please report this as a bug.')
finally:
np.seterr(**np_old_err)
return results
class lambdify(object):
"""Returns the lambdified function.
This function uses experimental_lambdify to create a lambdified
expression. It uses cmath to lambdify the expression. If the function
is not implemented in python cmath, python cmath calls evalf on those
functions.
"""
def __init__(self, args, expr):
self.args = args
self.expr = expr
self.lambda_func = experimental_lambdify(args, expr, use_evalf=True,
use_python_cmath=True)
self.failure = False
def __call__(self, args):
args = complex(args)
try:
#The result can be sympy.Float. Hence wrap it with complex type.
result = complex(self.lambda_func(args))
if abs(result.imag) > 1e-7 * abs(result):
return None
else:
return result.real
except Exception as e:
# The exceptions raised by sympy, cmath are not consistent and
# hence it is not possible to specify all the exceptions that
# are to be caught. Presently there are no cases for which the code
# reaches this block other than ZeroDivisionError and complex
# comparision. Also the exception is caught only once. If the
# exception repeats itself,
# then it is not caught and the corresponding error is raised.
# XXX: Remove catching all exceptions once the plotting module
# is heavily tested.
if isinstance(e, ZeroDivisionError):
return None
elif isinstance(e, TypeError) and ('no ordering relation is'
' defined for complex numbers'
in str(e)):
self.lambda_func = experimental_lambdify(self.args, self.expr,
use_evalf=True,
use_python_math=True)
result = self.lambda_func(args.real)
return result
else:
if self.failure:
raise e
#Failure
#Try wrapping it with complex(..).evalf()
self.failure = True
self.lambda_func = experimental_lambdify(self.args, self.expr,
use_evalf=True,
complex_wrap_evalf=True)
result = self.lambda_func(args)
warnings.warn('The evaluation of the expression is'
' problematic. We are trying a failback method'
' that may still work. Please report this as a bug.')
if abs(result.imag) > 1e-7 * abs(result):
return None
else:
return result.real
def experimental_lambdify(*args, **kwargs):
l = Lambdifier(*args, **kwargs)
return l.lambda_func
class Lambdifier(object):
def __init__(self, args, expr, print_lambda=False, use_evalf=False,
float_wrap_evalf=False, complex_wrap_evalf=False,
use_np=False, use_python_math=False, use_python_cmath=False,
use_interval=False):
self.print_lambda = print_lambda
self.use_evalf = use_evalf
self.float_wrap_evalf = float_wrap_evalf
self.complex_wrap_evalf = complex_wrap_evalf
self.use_np = use_np
self.use_python_math = use_python_math
self.use_python_cmath = use_python_cmath
self.use_interval = use_interval
# Constructing the argument string
# - check
if not all([isinstance(a, Symbol) for a in args]):
raise ValueError('The arguments must be Symbols.')
# - use numbered symbols
syms = numbered_symbols(exclude=expr.free_symbols)
newargs = [next(syms) for i in args]
expr = expr.xreplace(dict(zip(args, newargs)))
argstr = ', '.join([str(a) for a in newargs])
del syms, newargs, args
# Constructing the translation dictionaries and making the translation
self.dict_str = self.get_dict_str()
self.dict_fun = self.get_dict_fun()
exprstr = str(expr)
newexpr = self.tree2str_translate(self.str2tree(exprstr))
# Constructing the namespaces
namespace = {}
namespace.update(self.sympy_atoms_namespace(expr))
namespace.update(self.sympy_expression_namespace(expr))
# XXX Workaround
# Ugly workaround because Pow(a,Half) prints as sqrt(a)
# and sympy_expression_namespace can not catch it.
from sympy import sqrt
namespace.update({'sqrt': sqrt})
# End workaround.
if use_python_math:
namespace.update({'math': __import__('math')})
if use_python_cmath:
namespace.update({'cmath': __import__('cmath')})
if use_np:
try:
namespace.update({'np': __import__('numpy')})
except ImportError:
raise ImportError(
'experimental_lambdify failed to import numpy.')
if use_interval:
namespace.update({'imath': __import__(
'sympy.plotting.intervalmath', fromlist=['intervalmath'])})
namespace.update({'math': __import__('math')})
# Construct the lambda
if self.print_lambda:
print(newexpr)
eval_str = 'lambda %s : ( %s )' % (argstr, newexpr)
exec_("from __future__ import division; MYNEWLAMBDA = %s" % eval_str, namespace)
self.lambda_func = namespace['MYNEWLAMBDA']
##############################################################################
# Dicts for translating from sympy to other modules
##############################################################################
###
# builtins
###
# Functions with different names in builtins
builtin_functions_different = {
'Min': 'min',
'Max': 'max',
'Abs': 'abs',
}
# Strings that should be translated
builtin_not_functions = {
'I': '1j',
'oo': '1e400',
}
###
# numpy
###
# Functions that are the same in numpy
numpy_functions_same = [
'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'exp', 'log',
'sqrt', 'floor', 'conjugate',
]
# Functions with different names in numpy
numpy_functions_different = {
"acos": "arccos",
"acosh": "arccosh",
"arg": "angle",
"asin": "arcsin",
"asinh": "arcsinh",
"atan": "arctan",
"atan2": "arctan2",
"atanh": "arctanh",
"ceiling": "ceil",
"im": "imag",
"ln": "log",
"Max": "amax",
"Min": "amin",
"re": "real",
"Abs": "abs",
}
# Strings that should be translated
numpy_not_functions = {
'pi': 'np.pi',
'oo': 'np.inf',
'E': 'np.e',
}
###
# python math
###
# Functions that are the same in math
math_functions_same = [
'sin', 'cos', 'tan', 'asin', 'acos', 'atan', 'atan2',
'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh',
'exp', 'log', 'erf', 'sqrt', 'floor', 'factorial', 'gamma',
]
# Functions with different names in math
math_functions_different = {
'ceiling': 'ceil',
'ln': 'log',
'loggamma': 'lgamma'
}
# Strings that should be translated
math_not_functions = {
'pi': 'math.pi',
'E': 'math.e',
}
###
# python cmath
###
# Functions that are the same in cmath
cmath_functions_same = [
'sin', 'cos', 'tan', 'asin', 'acos', 'atan',
'sinh', 'cosh', 'tanh', 'asinh', 'acosh', 'atanh',
'exp', 'log', 'sqrt',
]
# Functions with different names in cmath
cmath_functions_different = {
'ln': 'log',
'arg': 'phase',
}
# Strings that should be translated
cmath_not_functions = {
'pi': 'cmath.pi',
'E': 'cmath.e',
}
###
# intervalmath
###
interval_not_functions = {
'pi': 'math.pi',
'E': 'math.e'
}
interval_functions_same = [
'sin', 'cos', 'exp', 'tan', 'atan', 'log',
'sqrt', 'cosh', 'sinh', 'tanh', 'floor',
'acos', 'asin', 'acosh', 'asinh', 'atanh',
'Abs', 'And', 'Or'
]
interval_functions_different = {
'Min': 'imin',
'Max': 'imax',
'ceiling': 'ceil',
}
###
# mpmath, etc
###
#TODO
###
# Create the final ordered tuples of dictionaries
###
# For strings
def get_dict_str(self):
dict_str = dict(self.builtin_not_functions)
if self.use_np:
dict_str.update(self.numpy_not_functions)
if self.use_python_math:
dict_str.update(self.math_not_functions)
if self.use_python_cmath:
dict_str.update(self.cmath_not_functions)
if self.use_interval:
dict_str.update(self.interval_not_functions)
return dict_str
# For functions
def get_dict_fun(self):
dict_fun = dict(self.builtin_functions_different)
if self.use_np:
for s in self.numpy_functions_same:
dict_fun[s] = 'np.' + s
for k, v in self.numpy_functions_different.items():
dict_fun[k] = 'np.' + v
if self.use_python_math:
for s in self.math_functions_same:
dict_fun[s] = 'math.' + s
for k, v in self.math_functions_different.items():
dict_fun[k] = 'math.' + v
if self.use_python_cmath:
for s in self.cmath_functions_same:
dict_fun[s] = 'cmath.' + s
for k, v in self.cmath_functions_different.items():
dict_fun[k] = 'cmath.' + v
if self.use_interval:
for s in self.interval_functions_same:
dict_fun[s] = 'imath.' + s
for k, v in self.interval_functions_different.items():
dict_fun[k] = 'imath.' + v
return dict_fun
##############################################################################
# The translator functions, tree parsers, etc.
##############################################################################
def str2tree(self, exprstr):
"""Converts an expression string to a tree.
Functions are represented by ('func_name(', tree_of_arguments).
Other expressions are (head_string, mid_tree, tail_str).
Expressions that do not contain functions are directly returned.
Examples:
>>> from sympy.abc import x, y, z
>>> from sympy import Integral, sin
>>> from sympy.plotting.experimental_lambdify import Lambdifier
>>> str2tree = Lambdifier([x], x).str2tree
>>> str2tree(str(Integral(x, (x, 1, y))))
('', ('Integral(', 'x, (x, 1, y)'), ')')
>>> str2tree(str(x+y))
'x + y'
>>> str2tree(str(x+y*sin(z)+1))
('x + y*', ('sin(', 'z'), ') + 1')
>>> str2tree('sin(y*(y + 1.1) + (sin(y)))')
('', ('sin(', ('y*(y + 1.1) + (', ('sin(', 'y'), '))')), ')')
"""
#matches the first 'function_name('
first_par = re.search(r'(\w+\()', exprstr)
if first_par is None:
return exprstr
else:
start = first_par.start()
end = first_par.end()
head = exprstr[:start]
func = exprstr[start:end]
tail = exprstr[end:]
count = 0
for i, c in enumerate(tail):
if c == '(':
count += 1
elif c == ')':
count -= 1
if count == -1:
break
func_tail = self.str2tree(tail[:i])
tail = self.str2tree(tail[i:])
return (head, (func, func_tail), tail)
@classmethod
def tree2str(cls, tree):
"""Converts a tree to string without translations.
Examples:
>>> from sympy.abc import x, y, z
>>> from sympy import Integral, sin
>>> from sympy.plotting.experimental_lambdify import Lambdifier
>>> str2tree = Lambdifier([x], x).str2tree
>>> tree2str = Lambdifier([x], x).tree2str
>>> tree2str(str2tree(str(x+y*sin(z)+1)))
'x + y*sin(z) + 1'
"""
if isinstance(tree, str):
return tree
else:
return ''.join(map(cls.tree2str, tree))
def tree2str_translate(self, tree):
"""Converts a tree to string with translations.
Function names are translated by translate_func.
Other strings are translated by translate_str.
"""
if isinstance(tree, str):
return self.translate_str(tree)
elif isinstance(tree, tuple) and len(tree) == 2:
return self.translate_func(tree[0][:-1], tree[1])
else:
return ''.join([self.tree2str_translate(t) for t in tree])
def translate_str(self, estr):
"""Translate substrings of estr using in order the dictionaries in
dict_tuple_str."""
for pattern, repl in self.dict_str.items():
estr = re.sub(pattern, repl, estr)
return estr
def translate_func(self, func_name, argtree):
"""Translate function names and the tree of arguments.
If the function name is not in the dictionaries of dict_tuple_fun then the
function is surrounded by a float((...).evalf()).
The use of float is necessary as np.<function>(sympy.Float(..)) raises an
error."""
if func_name in self.dict_fun:
new_name = self.dict_fun[func_name]
argstr = self.tree2str_translate(argtree)
return new_name + '(' + argstr
else:
template = '(%s(%s)).evalf(' if self.use_evalf else '%s(%s'
if self.float_wrap_evalf:
template = 'float(%s)' % template
elif self.complex_wrap_evalf:
template = 'complex(%s)' % template
return template % (func_name, self.tree2str(argtree))
##############################################################################
# The namespace constructors
##############################################################################
@classmethod
def sympy_expression_namespace(cls, expr):
"""Traverses the (func, args) tree of an expression and creates a sympy
namespace. All other modules are imported only as a module name. That way
the namespace is not poluted and rests quite small. It probably causes much
more variable lookups and so it takes more time, but there are no tests on
that for the moment."""
if expr is None:
return {}
else:
funcname = str(expr.func)
# XXX Workaround
# Here we add an ugly workaround because str(func(x))
# is not always the same as str(func). Eg
# >>> str(Integral(x))
# "Integral(x)"
# >>> str(Integral)
# "<class 'sympy.integrals.integrals.Integral'>"
# >>> str(sqrt(x))
# "sqrt(x)"
# >>> str(sqrt)
# "<function sqrt at 0x3d92de8>"
# >>> str(sin(x))
# "sin(x)"
# >>> str(sin)
# "sin"
# Either one of those can be used but not all at the same time.
# The code considers the sin example as the right one.
regexlist = [
r'<class \'sympy[\w.]*?.([\w]*)\'>$',
# the example Integral
r'<function ([\w]*) at 0x[\w]*>$', # the example sqrt
]
for r in regexlist:
m = re.match(r, funcname)
if m is not None:
funcname = m.groups()[0]
# End of the workaround
# XXX debug: print funcname
args_dict = {}
for a in expr.args:
if (isinstance(a, Symbol) or
isinstance(a, NumberSymbol) or
a in [I, zoo, oo]):
continue
else:
args_dict.update(cls.sympy_expression_namespace(a))
args_dict.update({funcname: expr.func})
return args_dict
@staticmethod
def sympy_atoms_namespace(expr):
"""For no real reason this function is separated from
sympy_expression_namespace. It can be moved to it."""
atoms = expr.atoms(Symbol, NumberSymbol, I, zoo, oo)
d = {}
for a in atoms:
# XXX debug: print 'atom:' + str(a)
d[str(a)] = a
return d
| bsd-3-clause |
glwu/python-for-android | python3-alpha/extra_modules/pyxmpp2/utils.py | 46 | 3459 | #
# (C) Copyright 2003-2011 Jacek Konieczny <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License Version
# 2.1 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""Utility functions for the pyxmpp package."""
__docformat__ = "restructuredtext en"
def xml_elements_equal(element1, element2, ignore_level1_cdata = False):
"""Check if two XML elements are equal.
:Parameters:
- `element1`: the first element to compare
- `element2`: the other element to compare
- `ignore_level1_cdata`: if direct text children of the elements
should be ignored for the comparision
:Types:
- `element1`: :etree:`ElementTree.Element`
- `element2`: :etree:`ElementTree.Element`
- `ignore_level1_cdata`: `bool`
:Returntype: `bool`
"""
# pylint: disable-msg=R0911
if None in (element1, element2) or element1.tag != element2.tag:
return False
attrs1 = list(element1.items())
attrs1.sort()
attrs2 = list(element2.items())
attrs2.sort()
if not ignore_level1_cdata:
if element1.text != element2.text:
return False
if attrs1 != attrs2:
return False
if len(element1) != len(element2):
return False
for child1, child2 in zip(element1, element2):
if child1.tag != child2.tag:
return False
if not ignore_level1_cdata:
if element1.text != element2.text:
return False
if not xml_elements_equal(child1, child2):
return False
return True
import time
import datetime
_MINUTE = datetime.timedelta(minutes = 1)
_NULLDELTA = datetime.timedelta()
def datetime_utc_to_local(utc):
"""
An ugly hack to convert naive :std:`datetime.datetime` object containing
UTC time to a naive :std:`datetime.datetime` object with local time.
It seems standard Python 2.3 library doesn't provide any better way to
do that.
"""
# pylint: disable-msg=C0103
ts = time.time()
cur = datetime.datetime.fromtimestamp(ts)
cur_utc = datetime.datetime.utcfromtimestamp(ts)
offset = cur - cur_utc
t = utc
d = datetime.timedelta(hours = 2)
while d > _MINUTE:
local = t + offset
tm = local.timetuple()
tm = tm[0:8] + (0, )
ts = time.mktime(tm)
u = datetime.datetime.utcfromtimestamp(ts)
diff = u - utc
if diff < _MINUTE and diff > -_MINUTE:
break
if diff > _NULLDELTA:
offset -= d
else:
offset += d
d //= 2
return local
def datetime_local_to_utc(local):
"""
Simple function to convert naive :std:`datetime.datetime` object containing
local time to a naive :std:`datetime.datetime` object with UTC time.
"""
timestamp = time.mktime(local.timetuple())
return datetime.datetime.utcfromtimestamp(timestamp)
# vi: sts=4 et sw=4
| apache-2.0 |
Lambdanaut/crits | crits/raw_data/api.py | 21 | 4479 | from django.core.urlresolvers import reverse
from tastypie import authorization
from tastypie.authentication import MultiAuthentication
from crits.raw_data.raw_data import RawData
from crits.raw_data.handlers import handle_raw_data_file
from crits.core.api import CRITsApiKeyAuthentication, CRITsSessionAuthentication
from crits.core.api import CRITsSerializer, CRITsAPIResource
class RawDataResource(CRITsAPIResource):
"""
Class to handle everything related to the RawData API.
Currently supports GET and POST.
"""
class Meta:
object_class = RawData
allowed_methods = ('get', 'post', 'patch')
resource_name = "raw_data"
authentication = MultiAuthentication(CRITsApiKeyAuthentication(),
CRITsSessionAuthentication())
authorization = authorization.Authorization()
serializer = CRITsSerializer()
def get_object_list(self, request):
"""
Use the CRITsAPIResource to get our objects but provide the class to get
the objects from.
:param request: The incoming request.
:type request: :class:`django.http.HttpRequest`
:returns: Resulting objects in the specified format (JSON by default).
"""
return super(RawDataResource, self).get_object_list(request, RawData)
def obj_create(self, bundle, **kwargs):
"""
Handles creating RawData through the API.
:param bundle: Bundle containing the information to create the RawData.
:type bundle: Tastypie Bundle object.
:returns: HttpResponse.
"""
analyst = bundle.request.user.username
type_ = bundle.data.get('upload_type', None)
content = {'return_code': 1,
'type': 'RawData'}
if not type_:
content['message'] = 'Must provide an upload type.'
self.crits_response(content)
if type_ not in ('metadata', 'file'):
content['message'] = 'Not a valid upload type.'
self.crits_response(content)
if type_ == 'metadata':
data = bundle.data.get('data', None)
elif type_ == 'file':
file_ = bundle.data.get('filedata', None)
if not file_:
content['message'] = "Upload type of 'file' but no file uploaded."
self.crits_response(content)
data = file_.read()
source = bundle.data.get('source', None)
description = bundle.data.get('description', '')
title = bundle.data.get('title', None)
data_type = bundle.data.get('data_type', None)
tool_name = bundle.data.get('tool_name', '')
tool_version = bundle.data.get('tool_version', '')
tool_details = bundle.data.get('tool_details', '')
link_id = bundle.data.get('link_id', None)
copy_rels = bundle.data.get('copy_relationships', False)
method = bundle.data.get('method', None) or 'Upload'
reference = bundle.data.get('reference', None)
bucket_list = bundle.data.get('bucket_list', None)
ticket = bundle.data.get('ticket', None)
if not title:
content['message'] = "Must provide a title."
self.crits_response(content)
if not data_type:
content['message'] = "Must provide a data type."
self.crits_response(content)
result = handle_raw_data_file(data, source, analyst,
description, title, data_type,
tool_name, tool_version, tool_details,
link_id,
method=method,
reference=reference,
copy_rels=copy_rels,
bucket_list=bucket_list,
ticket=ticket)
if result.get('message'):
content['message'] = result.get('message')
if result.get('_id'):
url = reverse('api_dispatch_detail',
kwargs={'resource_name': 'raw_data',
'api_name': 'v1',
'pk': str(result.get('_id'))})
content['url'] = url
content['id'] = str(result.get('_id'))
if result['success']:
content['return_code'] = 0
self.crits_response(content)
| mit |
jmhsi/justin_tinker | data_science/courses/learning_dl_packages/models/research/attention_ocr/python/utils.py | 16 | 2879 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to support building models for StreetView text transcription."""
import tensorflow as tf
from tensorflow.contrib import slim
def logits_to_log_prob(logits):
"""Computes log probabilities using numerically stable trick.
This uses two numerical stability tricks:
1) softmax(x) = softmax(x - c) where c is a constant applied to all
arguments. If we set c = max(x) then the softmax is more numerically
stable.
2) log softmax(x) is not numerically stable, but we can stabilize it
by using the identity log softmax(x) = x - log sum exp(x)
Args:
logits: Tensor of arbitrary shape whose last dimension contains logits.
Returns:
A tensor of the same shape as the input, but with corresponding log
probabilities.
"""
with tf.variable_scope('log_probabilities'):
reduction_indices = len(logits.shape.as_list()) - 1
max_logits = tf.reduce_max(
logits, reduction_indices=reduction_indices, keep_dims=True)
safe_logits = tf.subtract(logits, max_logits)
sum_exp = tf.reduce_sum(
tf.exp(safe_logits),
reduction_indices=reduction_indices,
keep_dims=True)
log_probs = tf.subtract(safe_logits, tf.log(sum_exp))
return log_probs
def variables_to_restore(scope=None, strip_scope=False):
"""Returns a list of variables to restore for the specified list of methods.
It is supposed that variable name starts with the method's scope (a prefix
returned by _method_scope function).
Args:
methods_names: a list of names of configurable methods.
strip_scope: if True will return variable names without method's scope.
If methods_names is None will return names unchanged.
model_scope: a scope for a whole model.
Returns:
a dictionary mapping variable names to variables for restore.
"""
if scope:
variable_map = {}
method_variables = slim.get_variables_to_restore(include=[scope])
for var in method_variables:
if strip_scope:
var_name = var.op.name[len(scope) + 1:]
else:
var_name = var.op.name
variable_map[var_name] = var
return variable_map
else:
return {v.op.name: v for v in slim.get_variables_to_restore()}
| apache-2.0 |
madphysicist/numpy | numpy/core/_add_newdocs.py | 2 | 184288 | """
This is only meant to add docs to objects defined in C-extension modules.
The purpose is to allow easier editing of the docstrings without
requiring a re-compile.
NOTE: Many of the methods of ndarray have corresponding functions.
If you update these docstrings, please keep also the ones in
core/fromnumeric.py, core/defmatrix.py up-to-date.
"""
from numpy.core.function_base import add_newdoc
from numpy.core.overrides import array_function_like_doc
###############################################################################
#
# flatiter
#
# flatiter needs a toplevel description
#
###############################################################################
add_newdoc('numpy.core', 'flatiter',
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any array `x`.
It allows iterating over the array as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in row-major, C-style order (the last
index varying the fastest). The iterator can also be indexed using
basic slicing or advanced indexing.
See Also
--------
ndarray.flat : Return a flat iterator over an array.
ndarray.flatten : Returns a flattened copy of an array.
Notes
-----
A `flatiter` iterator can not be constructed directly from Python code
by calling the `flatiter` constructor.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
<class 'numpy.flatiter'>
>>> for item in fl:
... print(item)
...
0
1
2
3
4
5
>>> fl[2:4]
array([2, 3])
""")
# flatiter attributes
add_newdoc('numpy.core', 'flatiter', ('base',
"""
A reference to the array that is iterated over.
Examples
--------
>>> x = np.arange(5)
>>> fl = x.flat
>>> fl.base is x
True
"""))
add_newdoc('numpy.core', 'flatiter', ('coords',
"""
An N-dimensional tuple of current coordinates.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.coords
(0, 0)
>>> next(fl)
0
>>> fl.coords
(0, 1)
"""))
add_newdoc('numpy.core', 'flatiter', ('index',
"""
Current flat index into the array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl.index
0
>>> next(fl)
0
>>> fl.index
1
"""))
# flatiter functions
add_newdoc('numpy.core', 'flatiter', ('__array__',
"""__array__(type=None) Get array from iterator
"""))
add_newdoc('numpy.core', 'flatiter', ('copy',
"""
copy()
Get a copy of the iterator as a 1-D array.
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> fl = x.flat
>>> fl.copy()
array([0, 1, 2, 3, 4, 5])
"""))
###############################################################################
#
# nditer
#
###############################################################################
add_newdoc('numpy.core', 'nditer',
"""
nditer(op, flags=None, op_flags=None, op_dtypes=None, order='K', casting='safe', op_axes=None, itershape=None, buffersize=0)
Efficient multi-dimensional iterator object to iterate over arrays.
To get started using this object, see the
:ref:`introductory guide to array iteration <arrays.nditer>`.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
flags : sequence of str, optional
Flags to control the behavior of the iterator.
* ``buffered`` enables buffering when required.
* ``c_index`` causes a C-order index to be tracked.
* ``f_index`` causes a Fortran-order index to be tracked.
* ``multi_index`` causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
* ``common_dtype`` causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
* ``copy_if_overlap`` causes the iterator to determine if read
operands have overlap with write operands, and make temporary
copies as necessary to avoid overlap. False positives (needless
copying) are possible in some cases.
* ``delay_bufalloc`` delays allocation of the buffers until
a reset() call is made. Allows ``allocate`` operands to
be initialized before their values are copied into the buffers.
* ``external_loop`` causes the ``values`` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
* ``grow_inner`` allows the ``value`` array sizes to be made
larger than the buffer size when both ``buffered`` and
``external_loop`` is used.
* ``ranged`` allows the iterator to be restricted to a sub-range
of the iterindex values.
* ``refs_ok`` enables iteration of reference types, such as
object arrays.
* ``reduce_ok`` enables iteration of ``readwrite`` operands
which are broadcasted, also known as reduction operands.
* ``zerosize_ok`` allows `itersize` to be zero.
op_flags : list of list of str, optional
This is a list of flags for each operand. At minimum, one of
``readonly``, ``readwrite``, or ``writeonly`` must be specified.
* ``readonly`` indicates the operand will only be read from.
* ``readwrite`` indicates the operand will be read from and written to.
* ``writeonly`` indicates the operand will only be written to.
* ``no_broadcast`` prevents the operand from being broadcasted.
* ``contig`` forces the operand data to be contiguous.
* ``aligned`` forces the operand data to be aligned.
* ``nbo`` forces the operand data to be in native byte order.
* ``copy`` allows a temporary read-only copy if required.
* ``updateifcopy`` allows a temporary read-write copy if required.
* ``allocate`` causes the array to be allocated if it is None
in the ``op`` parameter.
* ``no_subtype`` prevents an ``allocate`` operand from using a subtype.
* ``arraymask`` indicates that this operand is the mask to use
for selecting elements when writing to operands with the
'writemasked' flag set. The iterator does not enforce this,
but when writing from a buffer back to the array, it only
copies those elements indicated by this mask.
* ``writemasked`` indicates that only elements where the chosen
``arraymask`` operand is True will be written to.
* ``overlap_assume_elementwise`` can be used to mark operands that are
accessed only in the iterator order, to allow less conservative
copying when ``copy_if_overlap`` is present.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
order : {'C', 'F', 'A', 'K'}, optional
Controls the iteration order. 'C' means C order, 'F' means
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
affects the element memory order of ``allocate`` operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when making a copy
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
treated as `newaxis`.
itershape : tuple of ints, optional
The desired shape of the iterator. This allows ``allocate`` operands
with a dimension mapped by op_axes not corresponding to a dimension
of a different operand to get a value not equal to 1 for that
dimension.
buffersize : int, optional
When buffering is enabled, controls the size of the temporary
buffers. Set to 0 for the default value.
Attributes
----------
dtypes : tuple of dtype(s)
The data types of the values provided in `value`. This may be
different from the operand data types if buffering is enabled.
Valid only before the iterator is closed.
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
If True, the iterator was created with the ``delay_bufalloc`` flag,
and no reset() function was called on it yet.
has_index : bool
If True, the iterator was created with either the ``c_index`` or
the ``f_index`` flag, and the property `index` can be used to
retrieve it.
has_multi_index : bool
If True, the iterator was created with the ``multi_index`` flag,
and the property `multi_index` can be used to retrieve it.
index
When the ``c_index`` or ``f_index`` flag was used, this property
provides access to the index. Raises a ValueError if accessed
and ``has_index`` is False.
iterationneedsapi : bool
Whether iteration requires access to the Python API, for example
if one of the operands is an object array.
iterindex : int
An index which matches the order of iteration.
itersize : int
Size of the iterator.
itviews
Structured view(s) of `operands` in memory, matching the reordered
and optimized iterator access pattern. Valid only before the iterator
is closed.
multi_index
When the ``multi_index`` flag was used, this property
provides access to the index. Raises a ValueError if accessed
accessed and ``has_multi_index`` is False.
ndim : int
The dimensions of the iterator.
nop : int
The number of iterator operands.
operands : tuple of operand(s)
The array(s) to be iterated over. Valid only before the iterator is
closed.
shape : tuple of ints
Shape tuple, the shape of the iterator.
value
Value of ``operands`` at current iteration. Normally, this is a
tuple of array scalars, but if the flag ``external_loop`` is used,
it is a tuple of one dimensional arrays.
Notes
-----
`nditer` supersedes `flatiter`. The iterator implementation behind
`nditer` is also exposed by the NumPy C API.
The Python exposure supplies two iteration interfaces, one which follows
the Python iterator protocol, and another which mirrors the C-style
do-while pattern. The native Python approach is better in most cases, but
if you need the coordinates or index of an iterator, use the C-style pattern.
Examples
--------
Here is how we might write an ``iter_add`` function, using the
Python iterator protocol:
>>> def iter_add_py(x, y, out=None):
... addop = np.add
... it = np.nditer([x, y, out], [],
... [['readonly'], ['readonly'], ['writeonly','allocate']])
... with it:
... for (a, b, c) in it:
... addop(a, b, out=c)
... return it.operands[2]
Here is the same function, but following the C-style pattern:
>>> def iter_add(x, y, out=None):
... addop = np.add
... it = np.nditer([x, y, out], [],
... [['readonly'], ['readonly'], ['writeonly','allocate']])
... with it:
... while not it.finished:
... addop(it[0], it[1], out=it[2])
... it.iternext()
... return it.operands[2]
Here is an example outer product function:
>>> def outer_it(x, y, out=None):
... mulop = np.multiply
... it = np.nditer([x, y, out], ['external_loop'],
... [['readonly'], ['readonly'], ['writeonly', 'allocate']],
... op_axes=[list(range(x.ndim)) + [-1] * y.ndim,
... [-1] * x.ndim + list(range(y.ndim)),
... None])
... with it:
... for (a, b, c) in it:
... mulop(a, b, out=c)
... return it.operands[2]
>>> a = np.arange(2)+1
>>> b = np.arange(3)+1
>>> outer_it(a,b)
array([[1, 2, 3],
[2, 4, 6]])
Here is an example function which operates like a "lambda" ufunc:
>>> def luf(lamdaexpr, *args, **kwargs):
... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)'''
... nargs = len(args)
... op = (kwargs.get('out',None),) + args
... it = np.nditer(op, ['buffered','external_loop'],
... [['writeonly','allocate','no_broadcast']] +
... [['readonly','nbo','aligned']]*nargs,
... order=kwargs.get('order','K'),
... casting=kwargs.get('casting','safe'),
... buffersize=kwargs.get('buffersize',0))
... while not it.finished:
... it[0] = lamdaexpr(*it[1:])
... it.iternext()
... return it.operands[0]
>>> a = np.arange(5)
>>> b = np.ones(5)
>>> luf(lambda i,j:i*i + j/2, a, b)
array([ 0.5, 1.5, 4.5, 9.5, 16.5])
If operand flags `"writeonly"` or `"readwrite"` are used the
operands may be views into the original data with the
`WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a
context manager or the `nditer.close` method must be called before
using the result. The temporary data will be written back to the
original data when the `__exit__` function is called but not before:
>>> a = np.arange(6, dtype='i4')[::-2]
>>> with np.nditer(a, [],
... [['writeonly', 'updateifcopy']],
... casting='unsafe',
... op_dtypes=[np.dtype('f4')]) as i:
... x = i.operands[0]
... x[:] = [-1, -2, -3]
... # a still unchanged here
>>> a, x
(array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32))
It is important to note that once the iterator is exited, dangling
references (like `x` in the example) may or may not share data with
the original data `a`. If writeback semantics were active, i.e. if
`x.base.flags.writebackifcopy` is `True`, then exiting the iterator
will sever the connection between `x` and `a`, writing to `x` will
no longer write to `a`. If writeback semantics are not active, then
`x.data` will still point at some part of `a.data`, and writing to
one will affect the other.
Context management and the `close` method appeared in version 1.15.0.
""")
# nditer methods
add_newdoc('numpy.core', 'nditer', ('copy',
"""
copy()
Get a copy of the iterator in its current state.
Examples
--------
>>> x = np.arange(10)
>>> y = x + 1
>>> it = np.nditer([x, y])
>>> next(it)
(array(0), array(1))
>>> it2 = it.copy()
>>> next(it2)
(array(1), array(2))
"""))
add_newdoc('numpy.core', 'nditer', ('operands',
"""
operands[`Slice`]
The array(s) to be iterated over. Valid only before the iterator is closed.
"""))
add_newdoc('numpy.core', 'nditer', ('debug_print',
"""
debug_print()
Print the current state of the `nditer` instance and debug info to stdout.
"""))
add_newdoc('numpy.core', 'nditer', ('enable_external_loop',
"""
enable_external_loop()
When the "external_loop" was not used during construction, but
is desired, this modifies the iterator to behave as if the flag
was specified.
"""))
add_newdoc('numpy.core', 'nditer', ('iternext',
"""
iternext()
Check whether iterations are left, and perform a single internal iteration
without returning the result. Used in the C-style pattern do-while
pattern. For an example, see `nditer`.
Returns
-------
iternext : bool
Whether or not there are iterations left.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_axis',
"""
remove_axis(i)
Removes axis `i` from the iterator. Requires that the flag "multi_index"
be enabled.
"""))
add_newdoc('numpy.core', 'nditer', ('remove_multi_index',
"""
remove_multi_index()
When the "multi_index" flag was specified, this removes it, allowing
the internal iteration structure to be optimized further.
"""))
add_newdoc('numpy.core', 'nditer', ('reset',
"""
reset()
Reset the iterator to its initial state.
"""))
add_newdoc('numpy.core', 'nested_iters',
"""
Create nditers for use in nested loops
Create a tuple of `nditer` objects which iterate in nested loops over
different axes of the op argument. The first iterator is used in the
outermost loop, the last in the innermost loop. Advancing one will change
the subsequent iterators to point at its new element.
Parameters
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
axes : list of list of int
Each item is used as an "op_axes" argument to an nditer
flags, op_flags, op_dtypes, order, casting, buffersize (optional)
See `nditer` parameters of the same name
Returns
-------
iters : tuple of nditer
An nditer for each item in `axes`, outermost first
See Also
--------
nditer
Examples
--------
Basic usage. Note how y is the "flattened" version of
[a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified
the first iter's axes as [1]
>>> a = np.arange(12).reshape(2, 3, 2)
>>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"])
>>> for x in i:
... print(i.multi_index)
... for y in j:
... print('', j.multi_index, y)
(0,)
(0, 0) 0
(0, 1) 1
(1, 0) 6
(1, 1) 7
(1,)
(0, 0) 2
(0, 1) 3
(1, 0) 8
(1, 1) 9
(2,)
(0, 0) 4
(0, 1) 5
(1, 0) 10
(1, 1) 11
""")
add_newdoc('numpy.core', 'nditer', ('close',
"""
close()
Resolve all writeback semantics in writeable operands.
.. versionadded:: 1.15.0
See Also
--------
:ref:`nditer-context-manager`
"""))
###############################################################################
#
# broadcast
#
###############################################################################
add_newdoc('numpy.core', 'broadcast',
"""
Produce an object that mimics broadcasting.
Parameters
----------
in1, in2, ... : array_like
Input parameters.
Returns
-------
b : broadcast object
Broadcast the input parameters against one another, and
return an object that encapsulates the result.
Amongst others, it has ``shape`` and ``nd`` properties, and
may be used as an iterator.
See Also
--------
broadcast_arrays
broadcast_to
broadcast_shapes
Examples
--------
Manually adding two vectors, using broadcasting:
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> out = np.empty(b.shape)
>>> out.flat = [u+v for (u,v) in b]
>>> out
array([[5., 6., 7.],
[6., 7., 8.],
[7., 8., 9.]])
Compare against built-in broadcasting:
>>> x + y
array([[5, 6, 7],
[6, 7, 8],
[7, 8, 9]])
""")
# attributes
add_newdoc('numpy.core', 'broadcast', ('index',
"""
current index in broadcasted result
Examples
--------
>>> x = np.array([[1], [2], [3]])
>>> y = np.array([4, 5, 6])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> next(b), next(b), next(b)
((1, 4), (1, 5), (1, 6))
>>> b.index
3
"""))
add_newdoc('numpy.core', 'broadcast', ('iters',
"""
tuple of iterators along ``self``'s "components."
Returns a tuple of `numpy.flatiter` objects, one for each "component"
of ``self``.
See Also
--------
numpy.flatiter
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
>>> next(row), next(col)
(1, 4)
"""))
add_newdoc('numpy.core', 'broadcast', ('ndim',
"""
Number of dimensions of broadcasted result. Alias for `nd`.
.. versionadded:: 1.12.0
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.ndim
2
"""))
add_newdoc('numpy.core', 'broadcast', ('nd',
"""
Number of dimensions of broadcasted result. For code intended for NumPy
1.12.0 and later the more consistent `ndim` is preferred.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.nd
2
"""))
add_newdoc('numpy.core', 'broadcast', ('numiter',
"""
Number of iterators possessed by the broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.numiter
2
"""))
add_newdoc('numpy.core', 'broadcast', ('shape',
"""
Shape of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.shape
(3, 3)
"""))
add_newdoc('numpy.core', 'broadcast', ('size',
"""
Total size of broadcasted result.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.size
9
"""))
add_newdoc('numpy.core', 'broadcast', ('reset',
"""
reset()
Reset the broadcasted result's iterator(s).
Parameters
----------
None
Returns
-------
None
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.index
0
>>> next(b), next(b), next(b)
((1, 4), (2, 4), (3, 4))
>>> b.index
3
>>> b.reset()
>>> b.index
0
"""))
###############################################################################
#
# numpy functions
#
###############################################################################
add_newdoc('numpy.core.multiarray', 'array',
"""
array(object, dtype=None, *, copy=True, order='K', subok=False, ndmin=0,
like=None)
Create an array.
Parameters
----------
object : array_like
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
dtype : data-type, optional
The desired data-type for the array. If not given, then the type will
be determined as the minimum type required to hold the objects in the
sequence.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy will
only be made if __array__ returns a copy, if obj is a nested sequence,
or if a copy is needed to satisfy any of the other requirements
(`dtype`, `order`, etc.).
order : {'K', 'A', 'C', 'F'}, optional
Specify the memory layout of the array. If object is not an array, the
newly created array will be in C order (row major) unless 'F' is
specified, in which case it will be in Fortran order (column major).
If object is an array the following holds.
===== ========= ===================================================
order no copy copy=True
===== ========= ===================================================
'K' unchanged F & C order preserved, otherwise most similar order
'A' unchanged F order if input is F and not C, otherwise C order
'C' C order C order
'F' F order F order
===== ========= ===================================================
When ``copy=False`` and a copy is made for other reasons, the result is
the same as if ``copy=True``, with some exceptions for 'A', see the
Notes section. The default order is 'K'.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
ndmin : int, optional
Specifies the minimum number of dimensions that the resulting
array should have. Ones will be pre-pended to the shape as
needed to meet this requirement.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Notes
-----
When order is 'A' and `object` is an array in neither 'C' nor 'F' order,
and a copy is forced by a change in dtype, then the order of the result is
not necessarily 'C' as expected. This is likely a bug.
Examples
--------
>>> np.array([1, 2, 3])
array([1, 2, 3])
Upcasting:
>>> np.array([1, 2, 3.0])
array([ 1., 2., 3.])
More than one dimension:
>>> np.array([[1, 2], [3, 4]])
array([[1, 2],
[3, 4]])
Minimum dimensions 2:
>>> np.array([1, 2, 3], ndmin=2)
array([[1, 2, 3]])
Type provided:
>>> np.array([1, 2, 3], dtype=complex)
array([ 1.+0.j, 2.+0.j, 3.+0.j])
Data-type consisting of more than one element:
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')])
>>> x['a']
array([1, 3])
Creating an array from sub-classes:
>>> np.array(np.mat('1 2; 3 4'))
array([[1, 2],
[3, 4]])
>>> np.array(np.mat('1 2; 3 4'), subok=True)
matrix([[1, 2],
[3, 4]])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'empty',
"""
empty(shape, dtype=float, order='C', *, like=None)
Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
Shape of the empty array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
Desired output data-type for the array, e.g, `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: 'C'
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data of the given shape, dtype, and
order. Object arrays will be initialized to None.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Notes
-----
`empty`, unlike `zeros`, does not set the array values to zero,
and may therefore be marginally faster. On the other hand, it requires
the user to manually set all the values in the array, and should be
used with caution.
Examples
--------
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
[ 2.13182611e-314, 3.06959433e-309]]) #uninitialized
>>> np.empty([2, 2], dtype=int)
array([[-1073741821, -1067949133],
[ 496041986, 19249760]]) #uninitialized
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
Return a new scalar array of the given type initialized with obj.
This function is meant mainly for pickle support. `dtype` must be a
valid data-type descriptor. If `dtype` corresponds to an object
descriptor, then `obj` can be any object, otherwise `obj` must be a
string. If `obj` is not given, it will be interpreted as None for object
type and as zeros for all other types.
""")
add_newdoc('numpy.core.multiarray', 'zeros',
"""
zeros(shape, dtype=float, order='C', *, like=None)
Return a new array of given shape and type, filled with zeros.
Parameters
----------
shape : int or tuple of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: 'C'
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and order.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
full : Return a new array of given shape filled with value.
Examples
--------
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
array([[ 0.],
[ 0.]])
>>> s = (2,2)
>>> np.zeros(s)
array([[ 0., 0.],
[ 0., 0.]])
>>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype
array([(0, 0), (0, 0)],
dtype=[('x', '<i4'), ('y', '<i4')])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'set_typeDict',
"""set_typeDict(dict)
Set the internal dictionary that can look up an array type using a
registered code.
""")
add_newdoc('numpy.core.multiarray', 'fromstring',
"""
fromstring(string, dtype=float, count=-1, sep='', *, like=None)
A new 1-D array initialized from text data in a string.
Parameters
----------
string : str
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
the data must be in exactly this format. Most builtin numeric types are
supported and extension types may be supported.
.. versionadded:: 1.18.0
Complex dtypes.
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
length of the data.
sep : str, optional
The string separating numbers in the data; extra whitespace between
elements is also ignored.
.. deprecated:: 1.14
Passing ``sep=''``, the default, is deprecated since it will
trigger the deprecated binary mode of this function. This mode
interprets `string` as binary bytes, rather than ASCII text with
decimal numbers, an operation which is better spelt
``frombuffer(string, dtype, count)``. If `string` contains unicode
text, the binary mode of `fromstring` will first encode it into
bytes using either utf-8 (python 3) or the default encoding
(python 2), neither of which produce sane results.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
arr : ndarray
The constructed array.
Raises
------
ValueError
If the string is not the correct size to satisfy the requested
`dtype` and `count`.
See Also
--------
frombuffer, fromfile, fromiter
Examples
--------
>>> np.fromstring('1 2', dtype=int, sep=' ')
array([1, 2])
>>> np.fromstring('1, 2', dtype=int, sep=',')
array([1, 2])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'compare_chararrays',
"""
compare_chararrays(a, b, cmp_op, rstrip)
Performs element-wise comparison of two string arrays using the
comparison operator specified by `cmp_op`.
Parameters
----------
a, b : array_like
Arrays to be compared.
cmp_op : {"<", "<=", "==", ">=", ">", "!="}
Type of comparison.
rstrip : Boolean
If True, the spaces at the end of Strings are removed before the comparison.
Returns
-------
out : ndarray
The output array of type Boolean with the same shape as a and b.
Raises
------
ValueError
If `cmp_op` is not valid.
TypeError
If at least one of `a` or `b` is a non-string array
Examples
--------
>>> a = np.array(["a", "b", "cde"])
>>> b = np.array(["a", "a", "dec"])
>>> np.compare_chararrays(a, b, ">", True)
array([False, True, False])
""")
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iter, dtype, count=-1, *, like=None)
Create a new 1-dimensional array from an iterable object.
Parameters
----------
iter : iterable object
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
The output array.
Notes
-----
Specify `count` to improve performance. It allows ``fromiter`` to
pre-allocate the output array, instead of resizing it on demand.
Examples
--------
>>> iterable = (x*x for x in range(5))
>>> np.fromiter(iterable, float)
array([ 0., 1., 4., 9., 16.])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
fromfile(file, dtype=float, count=-1, sep='', offset=0, *, like=None)
Construct an array from data in a text or binary file.
A highly efficient way of reading binary data with a known data-type,
as well as parsing simply formatted text files. Data written using the
`tofile` method can be read using this function.
Parameters
----------
file : file or str or Path
Open file object or filename.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
Most builtin numeric types are supported and extension types may be supported.
.. versionadded:: 1.18.0
Complex dtypes.
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
sep : str
Separator between items if file is a text file.
Empty ("") separator means the file should be treated as binary.
Spaces (" ") in the separator match zero or more whitespace characters.
A separator consisting only of spaces must match at least one
whitespace.
offset : int
The offset (in bytes) from the file's current position. Defaults to 0.
Only permitted for binary files.
.. versionadded:: 1.17.0
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
See also
--------
load, save
ndarray.tofile
loadtxt : More flexible way of loading data from a text file.
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
data storage, as the binary files generated are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
Examples
--------
Construct an ndarray:
>>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]),
... ('temp', float)])
>>> x = np.zeros((1,), dtype=dt)
>>> x['time']['min'] = 10; x['temp'] = 98.25
>>> x
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
Save the raw data to disk:
>>> import tempfile
>>> fname = tempfile.mkstemp()[1]
>>> x.tofile(fname)
Read the raw data from disk:
>>> np.fromfile(fname, dtype=dt)
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
The recommended way to store and load data:
>>> np.save(fname, x)
>>> np.load(fname + '.npy')
array([((10, 0), 98.25)],
dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', 'frombuffer',
"""
frombuffer(buffer, dtype=float, count=-1, offset=0, *, like=None)
Interpret a buffer as a 1-dimensional array.
Parameters
----------
buffer : buffer_like
An object that exposes the buffer interface.
dtype : data-type, optional
Data-type of the returned array; default: float.
count : int, optional
Number of items to read. ``-1`` means all data in the buffer.
offset : int, optional
Start reading the buffer from this offset (in bytes); default: 0.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Notes
-----
If the buffer has data that is not in machine byte-order, this should
be specified as part of the data-type, e.g.::
>>> dt = np.dtype(int)
>>> dt = dt.newbyteorder('>')
>>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP
The data of the resulting array will not be byteswapped, but will be
interpreted correctly.
Examples
--------
>>> s = b'hello world'
>>> np.frombuffer(s, dtype='S1', count=5, offset=6)
array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1')
>>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8)
array([1, 2], dtype=uint8)
>>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3)
array([1, 2, 3], dtype=uint8)
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
add_newdoc('numpy.core.multiarray', 'correlate',
"""cross_correlate(a,v, mode=0)""")
add_newdoc('numpy.core.multiarray', 'arange',
"""
arange([start,] stop[, step,], dtype=None, *, like=None)
Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
When using a non-integer step, such as 0.1, the results will often not
be consistent. It is better to use `numpy.linspace` for these cases.
Parameters
----------
start : integer or real, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : integer or real
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : integer or real, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
See Also
--------
numpy.linspace : Evenly spaced numbers with careful handling of endpoints.
numpy.ogrid: Arrays of evenly spaced numbers in N-dimensions.
numpy.mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions.
Examples
--------
>>> np.arange(3)
array([0, 1, 2])
>>> np.arange(3.0)
array([ 0., 1., 2.])
>>> np.arange(3,7)
array([3, 4, 5, 6])
>>> np.arange(3,7,2)
array([3, 5])
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
))
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number.
""")
add_newdoc('numpy.core.multiarray', '_reconstruct',
"""_reconstruct(subtype, shape, dtype)
Construct an empty array. Used by Pickles.
""")
add_newdoc('numpy.core.multiarray', 'set_string_function',
"""
set_string_function(f, repr=1)
Internal method to set a function to be used when pretty printing arrays.
""")
add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
"""
set_numeric_ops(op1=func1, op2=func2, ...)
Set numerical operators for array objects.
.. deprecated:: 1.16
For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`.
For ndarray subclasses, define the ``__array_ufunc__`` method and
override the relevant ufunc.
Parameters
----------
op1, op2, ... : callable
Each ``op = func`` pair describes an operator to be replaced.
For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace
addition by modulus 5 addition.
Returns
-------
saved_ops : list of callables
A list of all operators, stored before making replacements.
Notes
-----
.. WARNING::
Use with care! Incorrect usage may lead to memory errors.
A function replacing an operator cannot make use of that operator.
For example, when replacing add, you may not use ``+``. Instead,
directly call ufuncs.
Examples
--------
>>> def add_mod5(x, y):
... return np.add(x, y) % 5
...
>>> old_funcs = np.set_numeric_ops(add=add_mod5)
>>> x = np.arange(12).reshape((3, 4))
>>> x + x
array([[0, 2, 4, 1],
[3, 0, 2, 4],
[1, 3, 0, 2]])
>>> ignore = np.set_numeric_ops(**old_funcs) # restore operators
""")
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
Returns the data type with the smallest size and smallest scalar
kind to which both ``type1`` and ``type2`` may be safely cast.
The returned data type is always in native byte order.
This function is symmetric, but rarely associative.
Parameters
----------
type1 : dtype or dtype specifier
First data type.
type2 : dtype or dtype specifier
Second data type.
Returns
-------
out : dtype
The promoted data type.
Notes
-----
.. versionadded:: 1.6.0
Starting in NumPy 1.9, promote_types function now returns a valid string
length when given an integer or float dtype as one argument and a string
dtype as another argument. Previously it always returned the input string
dtype, even if it wasn't long enough to store the max integer/float value
converted to a string.
See Also
--------
result_type, dtype, can_cast
Examples
--------
>>> np.promote_types('f4', 'f8')
dtype('float64')
>>> np.promote_types('i8', 'f4')
dtype('float64')
>>> np.promote_types('>i8', '<c8')
dtype('complex128')
>>> np.promote_types('i4', 'S8')
dtype('S11')
An example of a non-associative case:
>>> p = np.promote_types
>>> p('S', p('i1', 'u1'))
dtype('S6')
>>> p(p('S', 'i1'), 'u1')
dtype('S4')
""")
add_newdoc('numpy.core.multiarray', 'c_einsum',
"""
c_einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe')
*This documentation shadows that of the native python implementation of the `einsum` function,
except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.*
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of array_like
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout of the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
optimize : {False, True, 'greedy', 'optimal'}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
function. See ``np.einsum_path`` for more details. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
Notes
-----
.. versionadded:: 1.6.0
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`numpy.trace`.
* Return a diagonal, :py:func:`numpy.diag`.
* Array axis summations, :py:func:`numpy.sum`.
* Transpositions and permutations, :py:func:`numpy.transpose`.
* Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
* Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
* Tensor contractions, :py:func:`numpy.tensordot`.
* Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <numpy.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view (changed in version 1.10.0).
`einsum` also provides an alternative way to provide the subscripts
and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
If the output shape is not provided in this format `einsum` will be
calculated in implicit mode, otherwise it will be performed explicitly.
The examples below have corresponding `einsum` calls with the two
parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [0,1], [0])
array([ 10, 35, 60, 85, 110])
>>> np.sum(a, axis=1)
array([ 10, 35, 60, 85, 110])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10, 35, 60, 85, 110])
>>> np.einsum(a, [Ellipsis,1], [Ellipsis])
array([ 10, 35, 60, 85, 110])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('ij->ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.transpose(c)
array([[0, 3],
[1, 4],
[2, 5]])
Vector inner products:
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(',ij', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
Writeable returned arrays (since version 1.10.0):
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
""")
##############################################################################
#
# Documentation for ndarray attributes and methods
#
##############################################################################
##############################################################################
#
# ndarray object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray',
"""
ndarray(shape, dtype=float, buffer=None, offset=0,
strides=None, order=None)
An array object represents a multidimensional, homogeneous array
of fixed-size items. An associated data-type object describes the
format of each element in the array (its byte-order, how many bytes it
occupies in memory, whether it is an integer, a floating point number,
or something else, etc.)
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `numpy` module and examine the
methods and attributes of an array.
Parameters
----------
(for the __new__ method; see Notes below)
shape : tuple of ints
Shape of created array.
dtype : data-type, optional
Any object that can be interpreted as a numpy data type.
buffer : object exposing buffer interface, optional
Used to fill the array with data.
offset : int, optional
Offset of array data in buffer.
strides : tuple of ints, optional
Strides of data in memory.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Attributes
----------
T : ndarray
Transpose of the array.
data : buffer
The array's elements, in memory.
dtype : dtype object
Describes the format of the elements in the array.
flags : dict
Dictionary containing information related to memory use, e.g.,
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc.
flat : numpy.flatiter object
Flattened version of the array as an iterator. The iterator
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for
assignment examples; TODO).
imag : ndarray
Imaginary part of the array.
real : ndarray
Real part of the array.
size : int
Number of elements in the array.
itemsize : int
The memory use of each array element in bytes.
nbytes : int
The total number of bytes required to store the array data,
i.e., ``itemsize * size``.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
strides : tuple of ints
The step-size required to move from one element to the next in
memory. For example, a contiguous ``(3, 4)`` array of type
``int16`` in C-order has strides ``(8, 2)``. This implies that
to move from element to element in memory requires jumps of 2 bytes.
To move from row-to-row, one needs to jump 8 bytes at a time
(``2 * 4``).
ctypes : ctypes object
Class containing properties of the array needed for interaction
with ctypes.
base : ndarray
If the array is a view into another array, that array is its `base`
(unless that array is also a view). The `base` array is where the
array data is actually stored.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
dtype : Create a data-type.
Notes
-----
There are two modes of creating an array using ``__new__``:
1. If `buffer` is None, then only `shape`, `dtype`, and `order`
are used.
2. If `buffer` is an object exposing the buffer interface, then
all keywords are interpreted.
No ``__init__`` method is needed because the array is fully initialized
after the ``__new__`` method.
Examples
--------
These examples illustrate the low-level `ndarray` constructor. Refer
to the `See Also` section above for easier ways of constructing an
ndarray.
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
array([[0.0e+000, 0.0e+000], # random
[ nan, 2.5e-323]])
Second mode:
>>> np.ndarray((2,), buffer=np.array([1,2,3]),
... offset=np.int_().itemsize,
... dtype=int) # offset = 1*itemsize, i.e. skip first element
array([2, 3])
""")
##############################################################################
#
# ndarray attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__',
"""Array protocol: Python side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__',
"""None."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__',
"""Array priority."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
Examples
--------
The base of an array that owns its memory is None:
>>> x = np.array([1,2,3,4])
>>> x.base is None
True
Slicing creates a view, whose memory is shared with x:
>>> y = x[2:]
>>> y.base is x
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
"""
An object to simplify the interaction of the array with the ctypes
module.
This attribute creates an object that makes it easier to use arrays
when calling shared libraries with the ctypes module. The returned
object has, among others, data, shape, and strides attributes (see
Notes below) which themselves return ctypes objects that can be used
as arguments to a shared library.
Parameters
----------
None
Returns
-------
c : Python object
Possessing attributes data, shape, strides, etc.
See Also
--------
numpy.ctypeslib
Notes
-----
Below are the public attributes of this object which were documented
in "Guide to NumPy" (we have omitted undocumented public attributes,
as well as documented private attributes):
.. autoattribute:: numpy.core._internal._ctypes.data
:noindex:
.. autoattribute:: numpy.core._internal._ctypes.shape
:noindex:
.. autoattribute:: numpy.core._internal._ctypes.strides
:noindex:
.. automethod:: numpy.core._internal._ctypes.data_as
:noindex:
.. automethod:: numpy.core._internal._ctypes.shape_as
:noindex:
.. automethod:: numpy.core._internal._ctypes.strides_as
:noindex:
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
are not returned and errors may be raised instead. In particular,
the object will still have the ``as_parameter`` attribute which will
return an integer equal to the data attribute.
Examples
--------
>>> import ctypes
>>> x = np.array([[0, 1], [2, 3]], dtype=np.int32)
>>> x
array([[0, 1],
[2, 3]], dtype=int32)
>>> x.ctypes.data
31962608 # may vary
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32))
<__main__.LP_c_uint object at 0x7ff2fc1fc200> # may vary
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint32)).contents
c_uint(0)
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_uint64)).contents
c_ulong(4294967296)
>>> x.ctypes.shape
<numpy.core._internal.c_long_Array_2 object at 0x7ff2fc1fce60> # may vary
>>> x.ctypes.strides
<numpy.core._internal.c_long_Array_2 object at 0x7ff2fc1ff320> # may vary
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('data',
"""Python buffer object pointing to the start of the array's data."""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype',
"""
Data-type of the array's elements.
Parameters
----------
None
Returns
-------
d : numpy dtype object
See Also
--------
numpy.dtype
Examples
--------
>>> x
array([[0, 1],
[2, 3]])
>>> x.dtype
dtype('int32')
>>> type(x.dtype)
<type 'numpy.dtype'>
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag',
"""
The imaginary part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.imag
array([ 0. , 0.70710678])
>>> x.imag.dtype
dtype('float64')
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize',
"""
Length of one array element in bytes.
Examples
--------
>>> x = np.array([1,2,3], dtype=np.float64)
>>> x.itemsize
8
>>> x = np.array([1,2,3], dtype=np.complex128)
>>> x.itemsize
16
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags',
"""
Information about the memory layout of the array.
Attributes
----------
C_CONTIGUOUS (C)
The data is in a single, C-style contiguous segment.
F_CONTIGUOUS (F)
The data is in a single, Fortran-style contiguous segment.
OWNDATA (O)
The array owns the memory it uses or borrows it from another object.
WRITEABLE (W)
The data area can be written to. Setting this to False locks
the data, making it read-only. A view (slice, etc.) inherits WRITEABLE
from its base array at creation time, but a view of a writeable
array may be subsequently locked while the base array remains writeable.
(The opposite is not true, in that a view of a locked array may not
be made writeable. However, currently, locking a base object does not
lock any views that already reference it, so under that circumstance it
is possible to alter the contents of a locked array via a previously
created writeable view onto it.) Attempting to change a non-writeable
array raises a RuntimeError exception.
ALIGNED (A)
The data and all elements are aligned appropriately for the hardware.
WRITEBACKIFCOPY (X)
This array is a copy of some other array. The C-API function
PyArray_ResolveWritebackIfCopy must be called before deallocating
to the base array will be updated with the contents of this array.
UPDATEIFCOPY (U)
(Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array.
When this array is
deallocated, the base array will be updated with the contents of
this array.
FNC
F_CONTIGUOUS and not C_CONTIGUOUS.
FORC
F_CONTIGUOUS or C_CONTIGUOUS (one-segment test).
BEHAVED (B)
ALIGNED and WRITEABLE.
CARRAY (CA)
BEHAVED and C_CONTIGUOUS.
FARRAY (FA)
BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS.
Notes
-----
The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``),
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag
names are only supported in dictionary access.
Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be
changed by the user, via direct assignment to the attribute or dictionary
entry, or by calling `ndarray.setflags`.
The array flags cannot be set arbitrarily:
- UPDATEIFCOPY can only be set ``False``.
- WRITEBACKIFCOPY can only be set ``False``.
- ALIGNED can only be set ``True`` if the data is truly aligned.
- WRITEABLE can only be set ``True`` if the array owns its own memory
or the ultimate owner of the memory exposes a writeable buffer
interface or is a string.
Arrays can be both C-style and Fortran-style contiguous simultaneously.
This is clear for 1-dimensional arrays, but can also be true for higher
dimensional arrays.
Even for contiguous arrays a stride for a given dimension
``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1``
or the array has no elements.
It does *not* generally hold that ``self.strides[-1] == self.itemsize``
for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for
Fortran-style contiguous arrays is true.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
"""
A 1-D iterator over the array.
This is a `numpy.flatiter` instance, which acts similarly to, but is not
a subclass of, Python's built-in iterator object.
See Also
--------
flatten : Return a copy of the array collapsed into one dimension.
flatiter
Examples
--------
>>> x = np.arange(1, 7).reshape(2, 3)
>>> x
array([[1, 2, 3],
[4, 5, 6]])
>>> x.flat[3]
4
>>> x.T
array([[1, 4],
[2, 5],
[3, 6]])
>>> x.T.flat[3]
5
>>> type(x.flat)
<class 'numpy.flatiter'>
An assignment example:
>>> x.flat = 3; x
array([[3, 3, 3],
[3, 3, 3]])
>>> x.flat[[1,4]] = 1; x
array([[3, 1, 3],
[3, 1, 3]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes',
"""
Total bytes consumed by the elements of the array.
Notes
-----
Does not include memory consumed by non-element attributes of the
array object.
Examples
--------
>>> x = np.zeros((3,5,2), dtype=np.complex128)
>>> x.nbytes
480
>>> np.prod(x.shape) * x.itemsize
480
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim',
"""
Number of array dimensions.
Examples
--------
>>> x = np.array([1, 2, 3])
>>> x.ndim
1
>>> y = np.zeros((2, 3, 4))
>>> y.ndim
3
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('real',
"""
The real part of the array.
Examples
--------
>>> x = np.sqrt([1+0j, 0+1j])
>>> x.real
array([ 1. , 0.70710678])
>>> x.real.dtype
dtype('float64')
See Also
--------
numpy.real : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape',
"""
Tuple of array dimensions.
The shape property is usually used to get the current shape of an array,
but may also be used to reshape the array in-place by assigning a tuple of
array dimensions to it. As with `numpy.reshape`, one of the new shape
dimensions can be -1, in which case its value is inferred from the size of
the array and the remaining dimensions. Reshaping an array in-place will
fail if a copy is required.
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.shape
(4,)
>>> y = np.zeros((2, 3, 4))
>>> y.shape
(2, 3, 4)
>>> y.shape = (3, 8)
>>> y
array([[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0.]])
>>> y.shape = (3, 6)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: total size of new array must be unchanged
>>> np.zeros((4,2))[::2].shape = (-1,)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: Incompatible shape for in-place modification. Use
`.reshape()` to make a copy with the desired shape.
See Also
--------
numpy.reshape : similar function
ndarray.reshape : similar method
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
"""
Number of elements in the array.
Equal to ``np.prod(a.shape)``, i.e., the product of the array's
dimensions.
Notes
-----
`a.size` returns a standard arbitrary precision Python integer. This
may not be the case with other methods of obtaining the same value
(like the suggested ``np.prod(a.shape)``, which returns an instance
of ``np.int_``), and may be relevant if the value is used further in
calculations that may overflow a fixed size integer type.
Examples
--------
>>> x = np.zeros((3, 5, 2), dtype=np.complex128)
>>> x.size
30
>>> np.prod(x.shape)
30
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
"""
Tuple of bytes to step in each dimension when traversing an array.
The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a`
is::
offset = sum(np.array(i) * a.strides)
A more detailed explanation of strides can be found in the
"ndarray.rst" file in the NumPy reference guide.
Notes
-----
Imagine an array of 32-bit integers (each 4 bytes)::
x = np.array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]], dtype=np.int32)
This array is stored in memory as 40 bytes, one after the other
(known as a contiguous block of memory). The strides of an array tell
us how many bytes we have to skip in memory to move to the next position
along a certain axis. For example, we have to skip 4 bytes (1 value) to
move to the next column, but 20 bytes (5 values) to get to the same
position in the next row. As such, the strides for the array `x` will be
``(20, 4)``.
See Also
--------
numpy.lib.stride_tricks.as_strided
Examples
--------
>>> y = np.reshape(np.arange(2*3*4), (2,3,4))
>>> y
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
>>> y.strides
(48, 16, 4)
>>> y[1,1,1]
17
>>> offset=sum(y.strides * np.array((1,1,1)))
>>> offset/y.itemsize
17
>>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0)
>>> x.strides
(32, 4, 224, 1344)
>>> i = np.array([3,5,2,2])
>>> offset = sum(i * x.strides)
>>> x[3,5,2,2]
813
>>> offset / x.itemsize
813
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""
The transposed array.
Same as ``self.transpose()``.
Examples
--------
>>> x = np.array([[1.,2.],[3.,4.]])
>>> x
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T
array([[ 1., 3.],
[ 2., 4.]])
>>> x = np.array([1.,2.,3.,4.])
>>> x
array([ 1., 2., 3., 4.])
>>> x.T
array([ 1., 2., 3., 4.])
See Also
--------
transpose
"""))
##############################################################################
#
# ndarray methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
""" a.__array__([dtype], /) -> reference if type unchanged, copy otherwise.
Returns either a new reference to self if dtype is not given or a new array
of provided data type if dtype is different from the current dtype of the
array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
"""a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
"""a.__array_wrap__(obj) -> Object of same type as ndarray object a.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__',
"""a.__copy__()
Used if :func:`copy.copy` is called on an array. Returns a copy of the array.
Equivalent to ``a.copy(order='K')``.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__',
"""a.__deepcopy__(memo, /) -> Deep copy of array.
Used if :func:`copy.deepcopy` is called on an array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__',
"""a.__reduce__()
For pickling.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__',
"""a.__setstate__(state, /)
For unpickling.
The `state` argument must be a sequence that contains the following
elements:
Parameters
----------
version : int
optional pickle version. If omitted defaults to 0.
shape : tuple
dtype : data-type
isFortran : bool
rawdata : string or list
a binary string with the data (or a list if 'a' is an object array)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('all',
"""
a.all(axis=None, out=None, keepdims=False, *, where=True)
Returns True if all elements evaluate to True.
Refer to `numpy.all` for full documentation.
See Also
--------
numpy.all : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('any',
"""
a.any(axis=None, out=None, keepdims=False, *, where=True)
Returns True if any of the elements of `a` evaluate to True.
Refer to `numpy.any` for full documentation.
See Also
--------
numpy.any : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax',
"""
a.argmax(axis=None, out=None)
Return indices of the maximum values along the given axis.
Refer to `numpy.argmax` for full documentation.
See Also
--------
numpy.argmax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
"""
a.argmin(axis=None, out=None)
Return indices of the minimum values along the given axis.
Refer to `numpy.argmin` for detailed documentation.
See Also
--------
numpy.argmin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
a.argsort(axis=-1, kind=None, order=None)
Returns the indices that would sort this array.
Refer to `numpy.argsort` for full documentation.
See Also
--------
numpy.argsort : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition',
"""
a.argpartition(kth, axis=-1, kind='introselect', order=None)
Returns the indices that would partition this array.
Refer to `numpy.argpartition` for full documentation.
.. versionadded:: 1.8.0
See Also
--------
numpy.argpartition : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
"""
a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True)
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
By default, astype always returns a newly allocated array. If this
is set to false, and the `dtype`, `order`, and `subok`
requirements are satisfied, the input array is returned instead
of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array, with dtype, order
given by `dtype`, `order`.
Notes
-----
.. versionchanged:: 1.17.0
Casting between a simple data type and a structured one is possible only
for "unsafe" casting. Casting to multiple fields is allowed, but
casting from multiple fields is not.
.. versionchanged:: 1.9.0
Casting from numeric to string types in 'safe' casting mode requires
that the string dtype length is long enough to store the max
integer/float value converted.
Raises
------
ComplexWarning
When casting from complex to float or int. To avoid this,
one should use ``a.real.astype(t)``.
Examples
--------
>>> x = np.array([1, 2, 2.5])
>>> x
array([1. , 2. , 2.5])
>>> x.astype(int)
array([1, 2, 2])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
"""
a.byteswap(inplace=False)
Swap the bytes of the array elements
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
Arrays of byte-strings are not swapped. The real and imaginary
parts of a complex number are swapped individually.
Parameters
----------
inplace : bool, optional
If ``True``, swap bytes in-place, default is ``False``.
Returns
-------
out : ndarray
The byteswapped array. If `inplace` is ``True``, this is
a view to self.
Examples
--------
>>> A = np.array([1, 256, 8755], dtype=np.int16)
>>> list(map(hex, A))
['0x1', '0x100', '0x2233']
>>> A.byteswap(inplace=True)
array([ 256, 1, 13090], dtype=int16)
>>> list(map(hex, A))
['0x100', '0x1', '0x3322']
Arrays of byte-strings are not swapped
>>> A = np.array([b'ceg', b'fac'])
>>> A.byteswap()
array([b'ceg', b'fac'], dtype='|S3')
``A.newbyteorder().byteswap()`` produces an array with the same values
but different representation in memory
>>> A = np.array([1, 2, 3])
>>> A.view(np.uint8)
array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
0, 0], dtype=uint8)
>>> A.newbyteorder().byteswap(inplace=True)
array([1, 2, 3])
>>> A.view(np.uint8)
array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0,
0, 3], dtype=uint8)
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
"""
a.choose(choices, out=None, mode='raise')
Use an index array to construct a new array from a set of choices.
Refer to `numpy.choose` for full documentation.
See Also
--------
numpy.choose : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
a.clip(min=None, max=None, out=None, **kwargs)
Return an array whose values are limited to ``[min, max]``.
One of max or min must be given.
Refer to `numpy.clip` for full documentation.
See Also
--------
numpy.clip : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress',
"""
a.compress(condition, axis=None, out=None)
Return selected slices of this array along given axis.
Refer to `numpy.compress` for full documentation.
See Also
--------
numpy.compress : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj',
"""
a.conj()
Complex-conjugate all elements.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate',
"""
a.conjugate()
Return the complex conjugate, element-wise.
Refer to `numpy.conjugate` for full documentation.
See Also
--------
numpy.conjugate : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy',
"""
a.copy(order='C')
Return a copy of the array.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the copy. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible. (Note that this function and :func:`numpy.copy` are very
similar, but have different default values for their order=
arguments.)
See also
--------
numpy.copy
numpy.copyto
Examples
--------
>>> x = np.array([[1,2,3],[4,5,6]], order='F')
>>> y = x.copy()
>>> x.fill(0)
>>> x
array([[0, 0, 0],
[0, 0, 0]])
>>> y
array([[1, 2, 3],
[4, 5, 6]])
>>> y.flags['C_CONTIGUOUS']
True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod',
"""
a.cumprod(axis=None, dtype=None, out=None)
Return the cumulative product of the elements along the given axis.
Refer to `numpy.cumprod` for full documentation.
See Also
--------
numpy.cumprod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum',
"""
a.cumsum(axis=None, dtype=None, out=None)
Return the cumulative sum of the elements along the given axis.
Refer to `numpy.cumsum` for full documentation.
See Also
--------
numpy.cumsum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal',
"""
a.diagonal(offset=0, axis1=0, axis2=1)
Return specified diagonals. In NumPy 1.9 the returned array is a
read-only view instead of a copy as in previous NumPy versions. In
a future version the read-only restriction will be removed.
Refer to :func:`numpy.diagonal` for full documentation.
See Also
--------
numpy.diagonal : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dot',
"""
a.dot(b, out=None)
Dot product of two arrays.
Refer to `numpy.dot` for full documentation.
See Also
--------
numpy.dot : equivalent function
Examples
--------
>>> a = np.eye(2)
>>> b = np.ones((2, 2)) * 2
>>> a.dot(b)
array([[2., 2.],
[2., 2.]])
This array method can be conveniently chained:
>>> a.dot(b).dot(b)
array([[8., 8.],
[8., 8.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
"""a.dump(file)
Dump a pickle of the array to the specified file.
The array can be read back with pickle.load or numpy.load.
Parameters
----------
file : str or Path
A string naming the dump file.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps',
"""
a.dumps()
Returns the pickle of the array as a string.
pickle.loads or numpy.loads will convert the string back to an array.
Parameters
----------
None
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
"""
a.fill(value)
Fill the array with a scalar value.
Parameters
----------
value : scalar
All elements of `a` will be assigned this value.
Examples
--------
>>> a = np.array([1, 2])
>>> a.fill(0)
>>> a
array([0, 0])
>>> a = np.empty(2)
>>> a.fill(1)
>>> a
array([1., 1.])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten',
"""
a.flatten(order='C')
Return a copy of the array collapsed into one dimension.
Parameters
----------
order : {'C', 'F', 'A', 'K'}, optional
'C' means to flatten in row-major (C-style) order.
'F' means to flatten in column-major (Fortran-
style) order. 'A' means to flatten in column-major
order if `a` is Fortran *contiguous* in memory,
row-major order otherwise. 'K' means to flatten
`a` in the order the elements occur in memory.
The default is 'C'.
Returns
-------
y : ndarray
A copy of the input array, flattened to one dimension.
See Also
--------
ravel : Return a flattened array.
flat : A 1-D flat iterator over the array.
Examples
--------
>>> a = np.array([[1,2], [3,4]])
>>> a.flatten()
array([1, 2, 3, 4])
>>> a.flatten('F')
array([1, 3, 2, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
"""
a.getfield(dtype, offset=0)
Returns a field of the given array as a certain type.
A field is a view of the array data with a given data-type. The values in
the view are determined by the given type and the offset into the current
array in bytes. The offset needs to be such that the view dtype fits in the
array dtype; for example an array of dtype complex128 has 16-byte elements.
If taking a view with a 32-bit integer (4 bytes), the offset needs to be
between 0 and 12 bytes.
Parameters
----------
dtype : str or dtype
The data type of the view. The dtype size of the view can not be larger
than that of the array itself.
offset : int
Number of bytes to skip before beginning the element view.
Examples
--------
>>> x = np.diag([1.+1.j]*2)
>>> x[1, 1] = 2 + 4.j
>>> x
array([[1.+1.j, 0.+0.j],
[0.+0.j, 2.+4.j]])
>>> x.getfield(np.float64)
array([[1., 0.],
[0., 2.]])
By choosing an offset of 8 bytes we can select the complex part of the
array for our view:
>>> x.getfield(np.float64, offset=8)
array([[1., 0.],
[0., 4.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
"""
a.item(*args)
Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
\\*args : Arguments (variable number and type)
* none: in this case, the method only works for arrays
with one element (`a.size == 1`), which element is
copied into a standard Python scalar object and returned.
* int_type: this argument is interpreted as a flat index into
the array, specifying which element to copy and return.
* tuple of int_types: functions as does a single int_type argument,
except that the argument is interpreted as an nd-index into the
array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable
Python scalar
Notes
-----
When the data type of `a` is longdouble or clongdouble, item() returns
a scalar array object because there is no available Python scalar that
would not lose information. Void arrays return a buffer object for item(),
unless fields are defined, in which case a tuple is returned.
`item` is very similar to a[args], except, instead of an array scalar,
a standard Python scalar is returned. This can be useful for speeding up
access to elements of the array and doing arithmetic on elements of the
array using Python's optimized math.
Examples
--------
>>> np.random.seed(123)
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[2, 2, 6],
[1, 3, 6],
[1, 0, 1]])
>>> x.item(3)
1
>>> x.item(7)
0
>>> x.item((0, 1))
2
>>> x.item((2, 2))
1
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
"""
a.itemset(*args)
Insert scalar into an array (scalar is cast to array's dtype, if possible)
There must be at least 1 argument, and define the last argument
as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster
than ``a[args] = item``. The item should be a scalar value and `args`
must select a single item in the array `a`.
Parameters
----------
\\*args : Arguments
If one argument: a scalar, only used in case `a` is of size 1.
If two arguments: the last argument is the value to be set
and must be a scalar, the first argument specifies a single array
element location. It is either an int or a tuple.
Notes
-----
Compared to indexing syntax, `itemset` provides some speed increase
for placing a scalar into a particular location in an `ndarray`,
if you must do this. However, generally this is discouraged:
among other problems, it complicates the appearance of the code.
Also, when using `itemset` (and `item`) inside a loop, be sure
to assign the methods to a local variable to avoid the attribute
look-up at each loop iteration.
Examples
--------
>>> np.random.seed(123)
>>> x = np.random.randint(9, size=(3, 3))
>>> x
array([[2, 2, 6],
[1, 3, 6],
[1, 0, 1]])
>>> x.itemset(4, 0)
>>> x.itemset((2, 2), 9)
>>> x
array([[2, 2, 6],
[1, 0, 6],
[1, 0, 9]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
a.max(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
Return the maximum along a given axis.
Refer to `numpy.amax` for full documentation.
See Also
--------
numpy.amax : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
"""
a.mean(axis=None, dtype=None, out=None, keepdims=False, *, where=True)
Returns the average of the array elements along given axis.
Refer to `numpy.mean` for full documentation.
See Also
--------
numpy.mean : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
a.min(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
Return the minimum along a given axis.
Refer to `numpy.amin` for full documentation.
See Also
--------
numpy.amin : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S', /)
Return the array with the same data viewed with a different byte order.
Equivalent to::
arr.view(arr.dtype.newbytorder(new_order))
Changes are also made in all fields and sub-arrays of the array data
type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'little'} - little endian
* {'>', 'big'} - big endian
* '=' - native order, equivalent to `sys.byteorder`
* {'|', 'I'} - ignore (no change to byte order)
The default value ('S') results in swapping the current
byte order.
Returns
-------
new_arr : array
New array object with the dtype reflecting given change to the
byte order.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
"""
a.nonzero()
Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
a.prod(axis=None, dtype=None, out=None, keepdims=False, initial=1, where=True)
Return the product of the array elements over the given axis
Refer to `numpy.prod` for full documentation.
See Also
--------
numpy.prod : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp',
"""
a.ptp(axis=None, out=None, keepdims=False)
Peak to peak (maximum - minimum) value along a given axis.
Refer to `numpy.ptp` for full documentation.
See Also
--------
numpy.ptp : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""
a.put(indices, values, mode='raise')
Set ``a.flat[n] = values[n]`` for all `n` in indices.
Refer to `numpy.put` for full documentation.
See Also
--------
numpy.put : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
a.ravel([order])
Return a flattened array.
Refer to `numpy.ravel` for full documentation.
See Also
--------
numpy.ravel : equivalent function
ndarray.flat : a flat iterator on the array.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat',
"""
a.repeat(repeats, axis=None)
Repeat elements of an array.
Refer to `numpy.repeat` for full documentation.
See Also
--------
numpy.repeat : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape',
"""
a.reshape(shape, order='C')
Returns an array containing the same data with a new shape.
Refer to `numpy.reshape` for full documentation.
See Also
--------
numpy.reshape : equivalent function
Notes
-----
Unlike the free function `numpy.reshape`, this method on `ndarray` allows
the elements of the shape parameter to be passed in as separate arguments.
For example, ``a.reshape(10, 11)`` is equivalent to
``a.reshape((10, 11))``.
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
"""
a.resize(new_shape, refcheck=True)
Change shape and size of array in-place.
Parameters
----------
new_shape : tuple of ints, or `n` ints
Shape of resized array.
refcheck : bool, optional
If False, reference count will not be checked. Default is True.
Returns
-------
None
Raises
------
ValueError
If `a` does not own its own data or references or views to it exist,
and the data memory must be changed.
PyPy only: will always raise if the data memory must be changed, since
there is no reliable way to determine if references or views to it
exist.
SystemError
If the `order` keyword argument is specified. This behaviour is a
bug in NumPy.
See Also
--------
resize : Return a new array with the specified shape.
Notes
-----
This reallocates space for the data area if necessary.
Only contiguous arrays (data elements consecutive in memory) can be
resized.
The purpose of the reference count check is to make sure you
do not use this array as a buffer for another Python object and then
reallocate the memory. However, reference counts can increase in
other ways so if you are sure that you have not shared the memory
for this array with another Python object, then you may safely set
`refcheck` to False.
Examples
--------
Shrinking an array: array is flattened (in the order that the data are
stored in memory), resized, and reshaped:
>>> a = np.array([[0, 1], [2, 3]], order='C')
>>> a.resize((2, 1))
>>> a
array([[0],
[1]])
>>> a = np.array([[0, 1], [2, 3]], order='F')
>>> a.resize((2, 1))
>>> a
array([[0],
[2]])
Enlarging an array: as above, but missing entries are filled with zeros:
>>> b = np.array([[0, 1], [2, 3]])
>>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple
>>> b
array([[0, 1, 2],
[3, 0, 0]])
Referencing an array prevents resizing...
>>> c = a
>>> a.resize((1, 1))
Traceback (most recent call last):
...
ValueError: cannot resize an array that references or is referenced ...
Unless `refcheck` is False:
>>> a.resize((1, 1), refcheck=False)
>>> a
array([[0]])
>>> c
array([[0]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('round',
"""
a.round(decimals=0, out=None)
Return `a` with each element rounded to the given number of decimals.
Refer to `numpy.around` for full documentation.
See Also
--------
numpy.around : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted',
"""
a.searchsorted(v, side='left', sorter=None)
Find indices where elements of v should be inserted in a to maintain order.
For full documentation, see `numpy.searchsorted`
See Also
--------
numpy.searchsorted : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
"""
a.setfield(val, dtype, offset=0)
Put a value into a specified place in a field defined by a data-type.
Place `val` into `a`'s field defined by `dtype` and beginning `offset`
bytes into the field.
Parameters
----------
val : object
Value to be placed in field.
dtype : dtype object
Data-type of the field in which to place `val`.
offset : int, optional
The number of bytes into the field at which to place `val`.
Returns
-------
None
See Also
--------
getfield
Examples
--------
>>> x = np.eye(3)
>>> x.getfield(np.float64)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
>>> x.setfield(3, np.int32)
>>> x.getfield(np.int32)
array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]], dtype=int32)
>>> x
array([[1.0e+000, 1.5e-323, 1.5e-323],
[1.5e-323, 1.0e+000, 1.5e-323],
[1.5e-323, 1.5e-323, 1.0e+000]])
>>> x.setfield(np.eye(3), np.int32)
>>> x
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
"""
a.setflags(write=None, align=None, uic=None)
Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY),
respectively.
These Boolean-valued flags affect how numpy interprets the memory
area used by `a` (see Notes below). The ALIGNED flag can only
be set to True if the data is actually aligned according to the type.
The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set
to True. The flag WRITEABLE can only be set to True if the array owns its
own memory, or the ultimate owner of the memory exposes a writeable buffer
interface, or is a string. (The exception for string is made so that
unpickling can be done without copying memory.)
Parameters
----------
write : bool, optional
Describes whether or not `a` can be written to.
align : bool, optional
Describes whether or not `a` is aligned properly for its type.
uic : bool, optional
Describes whether or not `a` is a copy of another "base" array.
Notes
-----
Array flags provide information about how the memory area used
for the array is to be interpreted. There are 7 Boolean flags
in use, only four of which can be changed by the user:
WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED.
WRITEABLE (W) the data area can be written to;
ALIGNED (A) the data and strides are aligned appropriately for the hardware
(as determined by the compiler);
UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY;
WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced
by .base). When the C-API function PyArray_ResolveWritebackIfCopy is
called, the base array will be updated with the contents of this array.
All flags can be accessed using the single (upper case) letter as well
as the full name.
Examples
--------
>>> y = np.array([[3, 1, 7],
... [2, 0, 0],
... [8, 5, 9]])
>>> y
array([[3, 1, 7],
[2, 0, 0],
[8, 5, 9]])
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
>>> y.setflags(write=0, align=0)
>>> y.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : True
WRITEABLE : False
ALIGNED : False
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
>>> y.setflags(uic=1)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: cannot set WRITEBACKIFCOPY flag to True
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
a.sort(axis=-1, kind=None, order=None)
Sort an array in-place. Refer to `numpy.sort` for full documentation.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
and 'mergesort' use timsort under the covers and, in general, the
actual implementation will vary with datatype. The 'mergesort' option
is retained for backwards compatibility.
.. versionchanged:: 1.15.0.
The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.sort : Return a sorted copy of an array.
numpy.argsort : Indirect sort.
numpy.lexsort : Indirect stable sort on multiple keys.
numpy.searchsorted : Find elements in sorted array.
numpy.partition: Partial sort.
Notes
-----
See `numpy.sort` for notes on the different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4], [3,1]])
>>> a.sort(axis=1)
>>> a
array([[1, 4],
[1, 3]])
>>> a.sort(axis=0)
>>> a
array([[1, 3],
[1, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a
array([(b'c', 1), (b'a', 2)],
dtype=[('x', 'S1'), ('y', '<i8')])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
"""
a.partition(kth, axis=-1, kind='introselect', order=None)
Rearranges the elements in the array in such a way that the value of the
element in kth position is in the position it would be in a sorted array.
All elements smaller than the kth element are moved before this element and
all equal or greater are moved behind it. The ordering of the elements in
the two partitions is undefined.
.. versionadded:: 1.8.0
Parameters
----------
kth : int or sequence of ints
Element index to partition by. The kth element value will be in its
final sorted position and all smaller elements will be moved before it
and all equal or greater elements behind it.
The order of all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need to be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.partition : Return a parititioned copy of an array.
argpartition : Indirect partition.
sort : Full sort.
Notes
-----
See ``np.partition`` for notes on the different algorithms.
Examples
--------
>>> a = np.array([3, 4, 2, 1])
>>> a.partition(3)
>>> a
array([2, 1, 3, 4])
>>> a.partition((1, 3))
>>> a
array([1, 2, 3, 4])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze',
"""
a.squeeze(axis=None)
Remove axes of length one from `a`.
Refer to `numpy.squeeze` for full documentation.
See Also
--------
numpy.squeeze : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
"""
a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True)
Returns the standard deviation of the array elements along given axis.
Refer to `numpy.std` for full documentation.
See Also
--------
numpy.std : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True)
Return the sum of the array elements over the given axis.
Refer to `numpy.sum` for full documentation.
See Also
--------
numpy.sum : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes',
"""
a.swapaxes(axis1, axis2)
Return a view of the array with `axis1` and `axis2` interchanged.
Refer to `numpy.swapaxes` for full documentation.
See Also
--------
numpy.swapaxes : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('take',
"""
a.take(indices, axis=None, out=None, mode='raise')
Return an array formed from the elements of `a` at the given indices.
Refer to `numpy.take` for full documentation.
See Also
--------
numpy.take : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
"""
a.tofile(fid, sep="", format="%s")
Write array to a file as text or binary (default).
Data is always written in 'C' order, independent of the order of `a`.
The data produced by this method can be recovered using the function
fromfile().
Parameters
----------
fid : file or str or Path
An open file object, or a string containing a filename.
.. versionchanged:: 1.17.0
`pathlib.Path` objects are now accepted.
sep : str
Separator between array items for text output.
If "" (empty), a binary file is written, equivalent to
``file.write(a.tobytes())``.
format : str
Format string for text file output.
Each entry in the array is formatted to text by first converting
it to the closest Python type, and then using "format" % item.
Notes
-----
This is a convenience function for quick storage of array data.
Information on endianness and precision is lost, so this method is not a
good choice for files intended to archive data or transport data between
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
When fid is a file object, array contents are directly written to the
file, bypassing the file object's ``write`` method. As a result, tofile
cannot be used with files objects supporting compression (e.g., GzipFile)
or file-like objects that do not support ``fileno()`` (e.g., BytesIO).
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""
a.tolist()
Return the array as an ``a.ndim``-levels deep nested list of Python scalars.
Return a copy of the array data as a (nested) Python list.
Data items are converted to the nearest compatible builtin Python type, via
the `~numpy.ndarray.item` function.
If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will
not be a list at all, but a simple Python scalar.
Parameters
----------
none
Returns
-------
y : object, or list of object, or list of list of object, or ...
The possibly nested list of array elements.
Notes
-----
The array may be recreated via ``a = np.array(a.tolist())``, although this
may sometimes lose precision.
Examples
--------
For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``,
except that ``tolist`` changes numpy scalars to Python scalars:
>>> a = np.uint32([1, 2])
>>> a_list = list(a)
>>> a_list
[1, 2]
>>> type(a_list[0])
<class 'numpy.uint32'>
>>> a_tolist = a.tolist()
>>> a_tolist
[1, 2]
>>> type(a_tolist[0])
<class 'int'>
Additionally, for a 2D array, ``tolist`` applies recursively:
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
[array([1, 2]), array([3, 4])]
>>> a.tolist()
[[1, 2], [3, 4]]
The base case for this recursion is a 0D array:
>>> a = np.array(1)
>>> list(a)
Traceback (most recent call last):
...
TypeError: iteration over a 0-d array
>>> a.tolist()
1
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tobytes', """
a.tobytes(order='C')
Construct Python bytes containing the raw data bytes in the array.
Constructs Python bytes showing a copy of the raw contents of
data memory. The bytes object is produced in C-order by default.
This behavior is controlled by the ``order`` parameter.
.. versionadded:: 1.9.0
Parameters
----------
order : {'C', 'F', 'A'}, optional
Controls the memory layout of the bytes object. 'C' means C-order,
'F' means F-order, 'A' (short for *Any*) means 'F' if `a` is
Fortran contiguous, 'C' otherwise. Default is 'C'.
Returns
-------
s : bytes
Python bytes exhibiting a copy of `a`'s raw data.
Examples
--------
>>> x = np.array([[0, 1], [2, 3]], dtype='<u2')
>>> x.tobytes()
b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00'
>>> x.tobytes('C') == x.tobytes()
True
>>> x.tobytes('F')
b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00'
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('tostring', r"""
a.tostring(order='C')
A compatibility alias for `tobytes`, with exactly the same behavior.
Despite its name, it returns `bytes` not `str`\ s.
.. deprecated:: 1.19.0
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace',
"""
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None)
Return the sum along diagonals of the array.
Refer to `numpy.trace` for full documentation.
See Also
--------
numpy.trace : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
"""
a.transpose(*axes)
Returns a view of the array with axes transposed.
For a 1-D array this has no effect, as a transposed vector is simply the
same vector. To convert a 1-D array into a 2D column vector, an additional
dimension must be added. `np.atleast2d(a).T` achieves this, as does
`a[:, np.newaxis]`.
For a 2-D array, this is a standard matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : ndarray
View of `a`, with axes suitably permuted.
See Also
--------
ndarray.T : Array property returning the array transposed.
ndarray.reshape : Give a new shape to an array without changing its data.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> a
array([[1, 2],
[3, 4]])
>>> a.transpose()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0)
array([[1, 3],
[2, 4]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
"""
a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False, *, where=True)
Returns the variance of the array elements, along given axis.
Refer to `numpy.var` for full documentation.
See Also
--------
numpy.var : equivalent function
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
a.view([dtype][, type])
New view of array with the same data.
.. note::
Passing None for ``dtype`` is different from omitting the parameter,
since the former invokes ``dtype(None)`` which is an alias for
``dtype('float_')``.
Parameters
----------
dtype : data-type or ndarray sub-class, optional
Data-type descriptor of the returned view, e.g., float32 or int16.
Omitting it results in the view having the same data-type as `a`.
This argument can also be specified as an ndarray sub-class, which
then specifies the type of the returned object (this is equivalent to
setting the ``type`` parameter).
type : Python type, optional
Type of the returned view, e.g., ndarray or matrix. Again, omission
of the parameter results in type preservation.
Notes
-----
``a.view()`` is used two different ways:
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view
of the array's memory with a different data-type. This can cause a
reinterpretation of the bytes of memory.
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just
returns an instance of `ndarray_subclass` that looks at the same array
(same shape, dtype, etc.) This does not cause a reinterpretation of the
memory.
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of
bytes per entry than the previous dtype (for example, converting a
regular array to a structured array), then the behavior of the view
cannot be predicted just from the superficial appearance of ``a`` (shown
by ``print(a)``). It also depends on exactly how ``a`` is stored in
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus
defined as a slice or transpose, etc., the view may give different
results.
Examples
--------
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
Viewing array data using a different type and dtype:
>>> y = x.view(dtype=np.int16, type=np.matrix)
>>> y
matrix([[513]], dtype=int16)
>>> print(type(y))
<class 'numpy.matrix'>
Creating a view on a structured array so it can be used in calculations
>>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
>>> xv = x.view(dtype=np.int8).reshape(-1,2)
>>> xv
array([[1, 2],
[3, 4]], dtype=int8)
>>> xv.mean(0)
array([2., 3.])
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
>>> x
array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')])
Using a view to convert an array to a recarray:
>>> z = x.view(np.recarray)
>>> z.a
array([1, 3], dtype=int8)
Views share data:
>>> x[0] = (9, 10)
>>> z[0]
(9, 10)
Views that change the dtype size (bytes per entry) should normally be
avoided on arrays defined by slices, transposes, fortran-ordering, etc.:
>>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16)
>>> y = x[:, 0:2]
>>> y
array([[1, 2],
[4, 5]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
...
ValueError: To change to a dtype of a different size, the array must be C-contiguous
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
array([[(1, 2)],
[(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')])
"""))
##############################################################################
#
# umath functions
#
##############################################################################
add_newdoc('numpy.core.umath', 'frompyfunc',
"""
frompyfunc(func, nin, nout, *[, identity])
Takes an arbitrary Python function and returns a NumPy ufunc.
Can be used, for example, to add broadcasting to a built-in Python
function (see Examples section).
Parameters
----------
func : Python function object
An arbitrary Python function.
nin : int
The number of input arguments.
nout : int
The number of objects returned by `func`.
identity : object, optional
The value to use for the `~numpy.ufunc.identity` attribute of the resulting
object. If specified, this is equivalent to setting the underlying
C ``identity`` field to ``PyUFunc_IdentityValue``.
If omitted, the identity is set to ``PyUFunc_None``. Note that this is
_not_ equivalent to setting the identity to ``None``, which implies the
operation is reorderable.
Returns
-------
out : ufunc
Returns a NumPy universal function (``ufunc``) object.
See Also
--------
vectorize : Evaluates pyfunc over input arrays using broadcasting rules of numpy.
Notes
-----
The returned ufunc always returns PyObject arrays.
Examples
--------
Use frompyfunc to add broadcasting to the Python function ``oct``:
>>> oct_array = np.frompyfunc(oct, 1, 1)
>>> oct_array(np.array((10, 30, 100)))
array(['0o12', '0o36', '0o144'], dtype=object)
>>> np.array((oct(10), oct(30), oct(100))) # for comparison
array(['0o12', '0o36', '0o144'], dtype='<U5')
""")
add_newdoc('numpy.core.umath', 'geterrobj',
"""
geterrobj()
Return the current object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `geterrobj` is used internally by the other
functions that get and set error handling behavior (`geterr`, `seterr`,
`geterrcall`, `seterrcall`).
Returns
-------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
seterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrobj() # first get the defaults
[8192, 521, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> old_bufsize = np.setbufsize(20000)
>>> old_err = np.seterr(divide='raise')
>>> old_handler = np.seterrcall(err_handler)
>>> np.geterrobj()
[8192, 521, <function err_handler at 0x91dcaac>]
>>> old_err = np.seterr(all='ignore')
>>> np.base_repr(np.geterrobj()[1], 8)
'0'
>>> old_err = np.seterr(divide='warn', over='log', under='call',
... invalid='print')
>>> np.base_repr(np.geterrobj()[1], 8)
'4351'
""")
add_newdoc('numpy.core.umath', 'seterrobj',
"""
seterrobj(errobj)
Set the object that defines floating-point error handling.
The error object contains all information that defines the error handling
behavior in NumPy. `seterrobj` is used internally by the other
functions that set error handling behavior (`seterr`, `seterrcall`).
Parameters
----------
errobj : list
The error object, a list containing three elements:
[internal numpy buffer size, error mask, error callback function].
The error mask is a single integer that holds the treatment information
on all four floating point errors. The information for each error type
is contained in three bits of the integer. If we print it in base 8, we
can see what treatment is set for "invalid", "under", "over", and
"divide" (in that order). The printed string can be interpreted with
* 0 : 'ignore'
* 1 : 'warn'
* 2 : 'raise'
* 3 : 'call'
* 4 : 'print'
* 5 : 'log'
See Also
--------
geterrobj, seterr, geterr, seterrcall, geterrcall
getbufsize, setbufsize
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> old_errobj = np.geterrobj() # first get the defaults
>>> old_errobj
[8192, 521, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> new_errobj = [20000, 12, err_handler]
>>> np.seterrobj(new_errobj)
>>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
'14'
>>> np.geterr()
{'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
>>> np.geterrcall() is err_handler
True
""")
##############################################################################
#
# compiled_base functions
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'add_docstring',
"""
add_docstring(obj, docstring)
Add a docstring to a built-in obj if possible.
If the obj already has a docstring raise a RuntimeError
If this routine does not know how to add a docstring to the object
raise a TypeError
""")
add_newdoc('numpy.core.umath', '_add_newdoc_ufunc',
"""
add_ufunc_docstring(ufunc, new_docstring)
Replace the docstring for a ufunc with new_docstring.
This method will only work if the current docstring for
the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.)
Parameters
----------
ufunc : numpy.ufunc
A ufunc whose current doc is NULL.
new_docstring : string
The new docstring for the ufunc.
Notes
-----
This method allocates memory for new_docstring on
the heap. Technically this creates a mempory leak, since this
memory will not be reclaimed until the end of the program
even if the ufunc itself is removed. However this will only
be a problem if the user is repeatedly creating ufuncs with
no documentation, adding documentation via add_newdoc_ufunc,
and then throwing away the ufunc.
""")
add_newdoc('numpy.core.multiarray', '_set_madvise_hugepage',
"""
_set_madvise_hugepage(enabled: bool) -> bool
Set or unset use of ``madvise (2)`` MADV_HUGEPAGE support when
allocating the array data. Returns the previously set value.
See `global_state` for more information.
""")
add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g',
"""
format_float_OSprintf_g(val, precision)
Print a floating point scalar using the system's printf function,
equivalent to:
printf("%.*g", precision, val);
for half/float/double, or replacing 'g' by 'Lg' for longdouble. This
method is designed to help cross-validate the format_float_* methods.
Parameters
----------
val : python float or numpy floating scalar
Value to format.
precision : non-negative integer, optional
Precision given to printf.
Returns
-------
rep : string
The string representation of the floating point value
See Also
--------
format_float_scientific
format_float_positional
""")
##############################################################################
#
# Documentation for ufunc attributes and methods
#
##############################################################################
##############################################################################
#
# ufunc object
#
##############################################################################
add_newdoc('numpy.core', 'ufunc',
"""
Functions that operate element by element on whole arrays.
To see the documentation for a specific ufunc, use `info`. For
example, ``np.info(np.sin)``. Because ufuncs are written in C
(for speed) and linked into Python with NumPy's ufunc facility,
Python's help() function finds this page whenever help() is called
on a ufunc.
A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`.
**Calling ufuncs:** ``op(*x[, out], where=True, **kwargs)``
Apply `op` to the arguments `*x` elementwise, broadcasting the arguments.
The broadcasting rules are:
* Dimensions of length 1 may be prepended to either array.
* Arrays may be repeated along dimensions of length 1.
Parameters
----------
*x : array_like
Input arrays.
out : ndarray, None, or tuple of ndarray and None, optional
Alternate array object(s) in which to put the result; if provided, it
must have a shape that the inputs broadcast to. A tuple of arrays
(possible only as a keyword argument) must have length equal to the
number of outputs; use None for uninitialized outputs to be
allocated by the ufunc.
where : array_like, optional
This condition is broadcast over the input. At locations where the
condition is True, the `out` array will be set to the ufunc result.
Elsewhere, the `out` array will retain its original value.
Note that if an uninitialized `out` array is created via the default
``out=None``, locations within it where the condition is False will
remain uninitialized.
**kwargs
For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`.
Returns
-------
r : ndarray or tuple of ndarray
`r` will have the shape that the arrays in `x` broadcast to; if `out` is
provided, it will be returned. If not, `r` will be allocated and
may contain uninitialized values. If the function has more than one
output, then the result will be a tuple of arrays.
""")
##############################################################################
#
# ufunc attributes
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('identity',
"""
The identity value.
Data attribute containing the identity element for the ufunc, if it has one.
If it does not, the attribute value is None.
Examples
--------
>>> np.add.identity
0
>>> np.multiply.identity
1
>>> np.power.identity
1
>>> print(np.exp.identity)
None
"""))
add_newdoc('numpy.core', 'ufunc', ('nargs',
"""
The number of arguments.
Data attribute containing the number of arguments the ufunc takes, including
optional ones.
Notes
-----
Typically this value will be one more than what you might expect because all
ufuncs take the optional "out" argument.
Examples
--------
>>> np.add.nargs
3
>>> np.multiply.nargs
3
>>> np.power.nargs
3
>>> np.exp.nargs
2
"""))
add_newdoc('numpy.core', 'ufunc', ('nin',
"""
The number of inputs.
Data attribute containing the number of arguments the ufunc treats as input.
Examples
--------
>>> np.add.nin
2
>>> np.multiply.nin
2
>>> np.power.nin
2
>>> np.exp.nin
1
"""))
add_newdoc('numpy.core', 'ufunc', ('nout',
"""
The number of outputs.
Data attribute containing the number of arguments the ufunc treats as output.
Notes
-----
Since all ufuncs can take output arguments, this will always be (at least) 1.
Examples
--------
>>> np.add.nout
1
>>> np.multiply.nout
1
>>> np.power.nout
1
>>> np.exp.nout
1
"""))
add_newdoc('numpy.core', 'ufunc', ('ntypes',
"""
The number of types.
The number of numerical NumPy types - of which there are 18 total - on which
the ufunc can operate.
See Also
--------
numpy.ufunc.types
Examples
--------
>>> np.add.ntypes
18
>>> np.multiply.ntypes
18
>>> np.power.ntypes
17
>>> np.exp.ntypes
7
>>> np.remainder.ntypes
14
"""))
add_newdoc('numpy.core', 'ufunc', ('types',
"""
Returns a list with types grouped input->output.
Data attribute listing the data-type "Domain-Range" groupings the ufunc can
deliver. The data-types are given using the character codes.
See Also
--------
numpy.ufunc.ntypes
Examples
--------
>>> np.add.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.multiply.types
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D',
'GG->G', 'OO->O']
>>> np.power.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G',
'OO->O']
>>> np.exp.types
['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O']
>>> np.remainder.types
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L',
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O']
"""))
add_newdoc('numpy.core', 'ufunc', ('signature',
"""
Definition of the core elements a generalized ufunc operates on.
The signature determines how the dimensions of each input/output array
are split into core and loop dimensions:
1. Each dimension in the signature is matched to a dimension of the
corresponding passed-in array, starting from the end of the shape tuple.
2. Core dimensions assigned to the same label in the signature must have
exactly matching sizes, no broadcasting is performed.
3. The core dimensions are removed from all inputs and the remaining
dimensions are broadcast together, defining the loop dimensions.
Notes
-----
Generalized ufuncs are used internally in many linalg functions, and in
the testing suite; the examples below are taken from these.
For ufuncs that operate on scalars, the signature is None, which is
equivalent to '()' for every argument.
Examples
--------
>>> np.core.umath_tests.matrix_multiply.signature
'(m,n),(n,p)->(m,p)'
>>> np.linalg._umath_linalg.det.signature
'(m,m)->()'
>>> np.add.signature is None
True # equivalent to '(),()->()'
"""))
##############################################################################
#
# ufunc methods
#
##############################################################################
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
reduce(array, axis=0, dtype=None, out=None, keepdims=False, initial=<no value>, where=True)
Reduces `array`'s dimension by one, by applying ufunc along one axis.
Let :math:`array.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then
:math:`ufunc.reduce(array, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` =
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying
ufunc to each :math:`array[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`.
For a one-dimensional array, reduce produces results equivalent to:
::
r = op.identity # op = ufunc
for i in range(len(A)):
r = op(r, A[i])
return r
For example, add.reduce() is equivalent to sum().
Parameters
----------
array : array_like
The array to act on.
axis : None or int or tuple of ints, optional
Axis or axes along which a reduction is performed.
The default (`axis` = 0) is perform a reduction over the first
dimension of the input array. `axis` may be negative, in
which case it counts from the last to the first axis.
.. versionadded:: 1.7.0
If this is None, a reduction is performed over all the axes.
If this is a tuple of ints, a reduction is performed on multiple
axes, instead of a single axis or all the axes as before.
For operations which are either not commutative or not associative,
doing a reduction over multiple axes is not well-defined. The
ufuncs do not currently raise an exception in this case, but will
likely do so in the future.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data-type of the output array if this is provided, or
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `array`.
.. versionadded:: 1.7.0
initial : scalar, optional
The value with which to start the reduction.
If the ufunc has no identity or the dtype is object, this defaults
to None - otherwise it defaults to ufunc.identity.
If ``None`` is given, the first element of the reduction is used,
and an error is thrown if the reduction is empty.
.. versionadded:: 1.15.0
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `array`, and selects elements to include in the reduction. Note
that for ufuncs like ``minimum`` that do not have an identity
defined, one has to pass in also ``initial``.
.. versionadded:: 1.17.0
Returns
-------
r : ndarray
The reduced array. If `out` was supplied, `r` is a reference to it.
Examples
--------
>>> np.multiply.reduce([2,3,5])
30
A multi-dimensional array example:
>>> X = np.arange(8).reshape((2,2,2))
>>> X
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.add.reduce(X, 0)
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X) # confirm: default axis value is 0
array([[ 4, 6],
[ 8, 10]])
>>> np.add.reduce(X, 1)
array([[ 2, 4],
[10, 12]])
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
You can use the ``initial`` keyword argument to initialize the reduction
with a different value, and ``where`` to select specific elements to include:
>>> np.add.reduce([10], initial=5)
15
>>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10)
array([14., 14.])
>>> a = np.array([10., np.nan, 10])
>>> np.add.reduce(a, where=~np.isnan(a))
20.0
Allows reductions of empty arrays where they would normally fail, i.e.
for ufuncs without an identity.
>>> np.minimum.reduce([], initial=np.inf)
inf
>>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False])
array([ 1., 10.])
>>> np.minimum.reduce([])
Traceback (most recent call last):
...
ValueError: zero-size array to reduction operation minimum which has no identity
"""))
add_newdoc('numpy.core', 'ufunc', ('accumulate',
"""
accumulate(array, axis=0, dtype=None, out=None)
Accumulate the result of applying the operator to all elements.
For a one-dimensional array, accumulate produces results equivalent to::
r = np.empty(len(A))
t = op.identity # op = the ufunc being applied to A's elements
for i in range(len(A)):
t = op(t, A[i])
r[i] = t
return r
For example, add.accumulate() is equivalent to np.cumsum().
For a multi-dimensional array, accumulate is applied along only one
axis (axis zero by default; see Examples below) so repeated use is
necessary if one wants to accumulate over multiple axes.
Parameters
----------
array : array_like
The array to act on.
axis : int, optional
The axis along which to apply the accumulation; default is zero.
dtype : data-type code, optional
The data-type used to represent the intermediate results. Defaults
to the data-type of the output array if such is provided, or the
the data-type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
Returns
-------
r : ndarray
The accumulated values. If `out` was supplied, `r` is a reference to
`out`.
Examples
--------
1-D array examples:
>>> np.add.accumulate([2, 3, 5])
array([ 2, 5, 10])
>>> np.multiply.accumulate([2, 3, 5])
array([ 2, 6, 30])
2-D array examples:
>>> I = np.eye(2)
>>> I
array([[1., 0.],
[0., 1.]])
Accumulate along axis 0 (rows), down columns:
>>> np.add.accumulate(I, 0)
array([[1., 0.],
[1., 1.]])
>>> np.add.accumulate(I) # no axis specified = axis zero
array([[1., 0.],
[1., 1.]])
Accumulate along axis 1 (columns), through rows:
>>> np.add.accumulate(I, 1)
array([[1., 1.],
[0., 1.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('reduceat',
"""
reduceat(array, indices, axis=0, dtype=None, out=None)
Performs a (local) reduce with specified slices over a single axis.
For i in ``range(len(indices))``, `reduceat` computes
``ufunc.reduce(array[indices[i]:indices[i+1]])``, which becomes the i-th
generalized "row" parallel to `axis` in the final result (i.e., in a
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if
`axis = 1`, it becomes the i-th column). There are three exceptions to this:
* when ``i = len(indices) - 1`` (so for the last index),
``indices[i+1] = array.shape[axis]``.
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is
simply ``array[indices[i]]``.
* if ``indices[i] >= len(array)`` or ``indices[i] < 0``, an error is raised.
The shape of the output depends on the size of `indices`, and may be
larger than `array` (this happens if ``len(indices) > array.shape[axis]``).
Parameters
----------
array : array_like
The array to act on.
indices : array_like
Paired indices, comma separated (not colon), specifying slices to
reduce.
axis : int, optional
The axis along which to apply the reduceat.
dtype : data-type code, optional
The type used to represent the intermediate results. Defaults
to the data type of the output array if this is provided, or
the data type of the input array if no output array is provided.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or None,
a freshly-allocated array is returned. For consistency with
``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
Tuples are allowed for keyword argument.
Returns
-------
r : ndarray
The reduced values. If `out` was supplied, `r` is a reference to
`out`.
Notes
-----
A descriptive example:
If `array` is 1-D, the function `ufunc.accumulate(array)` is the same as
``ufunc.reduceat(array, indices)[::2]`` where `indices` is
``range(len(array) - 1)`` with a zero placed
in every other element:
``indices = zeros(2 * len(array) - 1)``,
``indices[1::2] = range(1, len(array))``.
Don't be fooled by this attribute's name: `reduceat(array)` is not
necessarily smaller than `array`.
Examples
--------
To take the running sum of four successive values:
>>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2]
array([ 6, 10, 14, 18])
A 2-D example:
>>> x = np.linspace(0, 15, 16).reshape(4,4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
::
# reduce such that the result has the following five rows:
# [row1 + row2 + row3]
# [row4]
# [row2]
# [row3]
# [row1 + row2 + row3 + row4]
>>> np.add.reduceat(x, [0, 3, 1, 2, 0])
array([[12., 15., 18., 21.],
[12., 13., 14., 15.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[24., 28., 32., 36.]])
::
# reduce such that result has the following two columns:
# [col1 * col2 * col3, col4]
>>> np.multiply.reduceat(x, [0, 3], 1)
array([[ 0., 3.],
[ 120., 7.],
[ 720., 11.],
[2184., 15.]])
"""))
add_newdoc('numpy.core', 'ufunc', ('outer',
r"""
outer(A, B, /, **kwargs)
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`.
Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of
``op.outer(A, B)`` is an array of dimension M + N such that:
.. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] =
op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}])
For `A` and `B` one-dimensional, this is equivalent to::
r = empty(len(A),len(B))
for i in range(len(A)):
for j in range(len(B)):
r[i,j] = op(A[i], B[j]) # op = ufunc in question
Parameters
----------
A : array_like
First array
B : array_like
Second array
kwargs : any
Arguments to pass on to the ufunc. Typically `dtype` or `out`.
Returns
-------
r : ndarray
Output array
See Also
--------
numpy.outer : A less powerful version of ``np.multiply.outer``
that `ravel`\ s all inputs to 1D. This exists
primarily for compatibility with old code.
tensordot : ``np.tensordot(a, b, axes=((), ()))`` and
``np.multiply.outer(a, b)`` behave same for all
dimensions of a and b.
Examples
--------
>>> np.multiply.outer([1, 2, 3], [4, 5, 6])
array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]])
A multi-dimensional example:
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> A.shape
(2, 3)
>>> B = np.array([[1, 2, 3, 4]])
>>> B.shape
(1, 4)
>>> C = np.multiply.outer(A, B)
>>> C.shape; C
(2, 3, 1, 4)
array([[[[ 1, 2, 3, 4]],
[[ 2, 4, 6, 8]],
[[ 3, 6, 9, 12]]],
[[[ 4, 8, 12, 16]],
[[ 5, 10, 15, 20]],
[[ 6, 12, 18, 24]]]])
"""))
add_newdoc('numpy.core', 'ufunc', ('at',
"""
at(a, indices, b=None, /)
Performs unbuffered in place operation on operand 'a' for elements
specified by 'indices'. For addition ufunc, this method is equivalent to
``a[indices] += b``, except that results are accumulated for elements that
are indexed more than once. For example, ``a[[0,0]] += 1`` will only
increment the first element once because of buffering, whereas
``add.at(a, [0,0], 1)`` will increment the first element twice.
.. versionadded:: 1.8.0
Parameters
----------
a : array_like
The array to perform in place operation on.
indices : array_like or tuple
Array like index object or slice object for indexing into first
operand. If first operand has multiple dimensions, indices can be a
tuple of array like index objects or slice objects.
b : array_like
Second operand for ufuncs requiring two operands. Operand must be
broadcastable over first operand after indexing or slicing.
Examples
--------
Set items 0 and 1 to their negative values:
>>> a = np.array([1, 2, 3, 4])
>>> np.negative.at(a, [0, 1])
>>> a
array([-1, -2, 3, 4])
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
>>> np.add.at(a, [0, 1, 2, 2], 1)
>>> a
array([2, 3, 5, 4])
Add items 0 and 1 in first array to second array,
and store results in first array:
>>> a = np.array([1, 2, 3, 4])
>>> b = np.array([1, 2])
>>> np.add.at(a, [0, 1], b)
>>> a
array([2, 4, 3, 4])
"""))
##############################################################################
#
# Documentation for dtype attributes and methods
#
##############################################################################
##############################################################################
#
# dtype object
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype',
"""
dtype(dtype, align=False, copy=False)
Create a data type object.
A numpy array is homogeneous, and contains elements described by a
dtype object. A dtype object can be constructed from different
combinations of fundamental numeric types.
Parameters
----------
dtype
Object to be converted to a data type object.
align : bool, optional
Add padding to the fields to match what a C compiler would output
for a similar C-struct. Can be ``True`` only if `obj` is a dictionary
or a comma-separated string. If a struct dtype is being created,
this also sets a sticky alignment flag ``isalignedstruct``.
copy : bool, optional
Make a new copy of the data-type object. If ``False``, the result
may just be a reference to a built-in data-type object.
See also
--------
result_type
Examples
--------
Using array-scalar type:
>>> np.dtype(np.int16)
dtype('int16')
Structured type, one field name 'f1', containing int16:
>>> np.dtype([('f1', np.int16)])
dtype([('f1', '<i2')])
Structured type, one field named 'f1', in itself containing a structured
type with one field:
>>> np.dtype([('f1', [('f1', np.int16)])])
dtype([('f1', [('f1', '<i2')])])
Structured type, two fields: the first field contains an unsigned int, the
second an int32:
>>> np.dtype([('f1', np.uint64), ('f2', np.int32)])
dtype([('f1', '<u8'), ('f2', '<i4')])
Using array-protocol type strings:
>>> np.dtype([('a','f8'),('b','S10')])
dtype([('a', '<f8'), ('b', 'S10')])
Using comma-separated field formats. The shape is (2,3):
>>> np.dtype("i4, (2,3)f8")
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))])
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
>>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)])
dtype([('hello', '<i8', (3,)), ('world', 'V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
the offsets in bytes:
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')]))
Using dictionaries. Two fields named 'gender' and 'age':
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
dtype([('gender', 'S1'), ('age', 'u1')])
Offsets in bytes, here 0 and 25:
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
dtype([('surname', 'S25'), ('age', 'u1')])
""")
##############################################################################
#
# dtype attributes
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
"""
The required alignment (bytes) of this data-type according to the compiler.
More information is available in the C-API section of the manual.
Examples
--------
>>> x = np.dtype('i4')
>>> x.alignment
4
>>> x = np.dtype(float)
>>> x.alignment
8
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
"""
A character indicating the byte-order of this data-type object.
One of:
=== ==============
'=' native
'<' little-endian
'>' big-endian
'|' not applicable
=== ==============
All built-in data-type objects have byteorder either '=' or '|'.
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.byteorder
'='
>>> # endian is not relevant for 8 bit numbers
>>> np.dtype('i1').byteorder
'|'
>>> # or ASCII strings
>>> np.dtype('S2').byteorder
'|'
>>> # Even if specific code is given, and it is native
>>> # '=' is the byteorder
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> dt = np.dtype(native_code + 'i2')
>>> dt.byteorder
'='
>>> # Swapped code shows up as itself
>>> dt = np.dtype(swapped_code + 'i2')
>>> dt.byteorder == swapped_code
True
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('char',
"""A unique character code for each of the 21 different built-in types.
Examples
--------
>>> x = np.dtype(float)
>>> x.char
'd'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
`__array_interface__` description of the data-type.
The format is that required by the 'descr' key in the
`__array_interface__` attribute.
Warning: This attribute exists specifically for `__array_interface__`,
and passing it directly to `np.dtype` will not accurately reconstruct
some dtypes (e.g., scalar and subarray dtypes).
Examples
--------
>>> x = np.dtype(float)
>>> x.descr
[('', '<f8')]
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.descr
[('name', '<U16'), ('grades', '<f8', (2,))]
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
"""
Dictionary of named fields defined for this data type, or ``None``.
The dictionary is indexed by keys that are the names of the fields.
Each entry in the dictionary is a tuple fully describing the field::
(dtype, offset[, title])
Offset is limited to C int, which is signed and usually 32 bits.
If present, the optional title can be any object (if it is a string
or unicode then it will also be a key in the fields dictionary,
otherwise it's meta-data). Notice also that the first two elements
of the tuple can be passed directly as arguments to the ``ndarray.getfield``
and ``ndarray.setfield`` methods.
See Also
--------
ndarray.getfield, ndarray.setfield
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> print(dt.fields)
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)}
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
"""
Bit-flags describing how this data type is to be interpreted.
Bit-masks are in `numpy.core.multiarray` as the constants
`ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`,
`NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation
of these flags is in C-API documentation; they are largely useful
for user-defined data-types.
The following example demonstrates that operations on this particular
dtype requires Python C-API.
Examples
--------
>>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
>>> x.flags
16
>>> np.core.multiarray.NEEDS_PYAPI
16
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
"""
Boolean indicating whether this dtype contains any reference-counted
objects in any fields or sub-dtypes.
Recall that what is actually in the ndarray memory representing
the Python object is the memory address of that object (a pointer).
Special handling may be required, and this attribute is useful for
distinguishing data types that may contain arbitrary Python objects
and data-types that won't.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
"""
Integer indicating how this dtype relates to the built-in dtypes.
Read-only.
= ========================================================================
0 if this is a structured array type, with fields
1 if this is a dtype compiled into numpy (such as ints, floats etc)
2 if the dtype is for a user-defined numpy type
A user-defined type uses the numpy C-API machinery to extend
numpy to handle a new array type. See
:ref:`user.user-defined-data-types` in the NumPy manual.
= ========================================================================
Examples
--------
>>> dt = np.dtype('i2')
>>> dt.isbuiltin
1
>>> dt = np.dtype('f8')
>>> dt.isbuiltin
1
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.isbuiltin
0
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isnative',
"""
Boolean indicating whether the byte order of this dtype is native
to the platform.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
"""
Boolean indicating whether the dtype is a struct which maintains
field alignment. This flag is sticky, so when combining multiple
structs together, it is preserved and produces new dtypes which
are also aligned.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
"""
The element size of this data-type object.
For 18 of the 21 types this number is fixed by the data-type.
For the flexible data-types, this number can be anything.
Examples
--------
>>> arr = np.array([[1, 2], [3, 4]])
>>> arr.dtype
dtype('int64')
>>> arr.itemsize
8
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.itemsize
80
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""
A character code (one of 'biufcmMOSUV') identifying the general kind of data.
= ======================
b boolean
i signed integer
u unsigned integer
f floating-point
c complex floating-point
m timedelta
M datetime
O object
S (byte-)string
U Unicode
V void
= ======================
Examples
--------
>>> dt = np.dtype('i4')
>>> dt.kind
'i'
>>> dt = np.dtype('f8')
>>> dt.kind
'f'
>>> dt = np.dtype([('field1', 'f8')])
>>> dt.kind
'V'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('metadata',
"""
Either ``None`` or a readonly dictionary of metadata (mappingproxy).
The metadata field can be set using any dictionary at data-type
creation. NumPy currently has no uniform approach to propagating
metadata; although some array operations preserve it, there is no
guarantee that others will.
.. warning::
Although used in certain projects, this feature was long undocumented
and is not well supported. Some aspects of metadata propagation
are expected to change in the future.
Examples
--------
>>> dt = np.dtype(float, metadata={"key": "value"})
>>> dt.metadata["key"]
'value'
>>> arr = np.array([1, 2, 3], dtype=dt)
>>> arr.dtype.metadata
mappingproxy({'key': 'value'})
Adding arrays with identical datatypes currently preserves the metadata:
>>> (arr + arr).dtype.metadata
mappingproxy({'key': 'value'})
But if the arrays have different dtype metadata, the metadata may be
dropped:
>>> dt2 = np.dtype(float, metadata={"key2": "value2"})
>>> arr2 = np.array([3, 2, 1], dtype=dt2)
>>> (arr + arr2).dtype.metadata is None
True # The metadata field is cleared so None is returned
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
"""
A bit-width name for this data-type.
Un-sized flexible data-type objects do not have this attribute.
Examples
--------
>>> x = np.dtype(float)
>>> x.name
'float64'
>>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
>>> x.name
'void640'
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('names',
"""
Ordered list of field names, or ``None`` if there are no fields.
The names are ordered according to increasing byte offset. This can be
used, for example, to walk through all of the named fields in offset order.
Examples
--------
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
>>> dt.names
('name', 'grades')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('num',
"""
A unique number for each of the 21 different built-in types.
These are roughly ordered from least-to-most precision.
Examples
--------
>>> dt = np.dtype(str)
>>> dt.num
19
>>> dt = np.dtype(float)
>>> dt.num
12
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
"""
Shape tuple of the sub-array if this data type describes a sub-array,
and ``()`` otherwise.
Examples
--------
>>> dt = np.dtype(('i4', 4))
>>> dt.shape
(4,)
>>> dt = np.dtype(('i4', (2, 3)))
>>> dt.shape
(2, 3)
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('ndim',
"""
Number of dimensions of the sub-array if this data type describes a
sub-array, and ``0`` otherwise.
.. versionadded:: 1.13.0
Examples
--------
>>> x = np.dtype(float)
>>> x.ndim
0
>>> x = np.dtype((float, 8))
>>> x.ndim
1
>>> x = np.dtype(('i4', (3, 4)))
>>> x.ndim
2
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('str',
"""The array-protocol typestring of this data-type object."""))
add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
"""
Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and
None otherwise.
The *shape* is the fixed shape of the sub-array described by this
data type, and *item_dtype* the data type of the array.
If a field whose dtype object has this attribute is retrieved,
then the extra dimensions implied by *shape* are tacked on to
the end of the retrieved array.
See Also
--------
dtype.base
Examples
--------
>>> x = numpy.dtype('8f')
>>> x.subdtype
(dtype('float32'), (8,))
>>> x = numpy.dtype('i2')
>>> x.subdtype
>>>
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('base',
"""
Returns dtype for the base element of the subarrays,
regardless of their dimension or shape.
See Also
--------
dtype.subdtype
Examples
--------
>>> x = numpy.dtype('8f')
>>> x.base
dtype('float32')
>>> x = numpy.dtype('i2')
>>> x.base
dtype('int16')
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('type',
"""The type object used to instantiate a scalar of this data-type."""))
##############################################################################
#
# dtype methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder',
"""
newbyteorder(new_order='S', /)
Return a new dtype with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
Parameters
----------
new_order : string, optional
Byte order to force; a value from the byte order specifications
below. The default value ('S') results in swapping the current
byte order. `new_order` codes can be any of:
* 'S' - swap dtype from current to opposite endian
* {'<', 'little'} - little endian
* {'>', 'big'} - big endian
* '=' - native order
* {'|', 'I'} - ignore (no change to byte order)
Returns
-------
new_dtype : dtype
New dtype object with the given change to the byte order.
Notes
-----
Changes are also made in all fields and sub-arrays of the data type.
Examples
--------
>>> import sys
>>> sys_is_le = sys.byteorder == 'little'
>>> native_code = sys_is_le and '<' or '>'
>>> swapped_code = sys_is_le and '>' or '<'
>>> native_dt = np.dtype(native_code+'i2')
>>> swapped_dt = np.dtype(swapped_code+'i2')
>>> native_dt.newbyteorder('S') == swapped_dt
True
>>> native_dt.newbyteorder() == swapped_dt
True
>>> native_dt == swapped_dt.newbyteorder('S')
True
>>> native_dt == swapped_dt.newbyteorder('=')
True
>>> native_dt == swapped_dt.newbyteorder('N')
True
>>> native_dt == native_dt.newbyteorder('|')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('<')
True
>>> np.dtype('<i2') == native_dt.newbyteorder('L')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('>')
True
>>> np.dtype('>i2') == native_dt.newbyteorder('B')
True
"""))
##############################################################################
#
# Datetime-related Methods
#
##############################################################################
add_newdoc('numpy.core.multiarray', 'busdaycalendar',
"""
busdaycalendar(weekmask='1111100', holidays=None)
A business day calendar object that efficiently stores information
defining valid days for the busday family of functions.
The default valid days are Monday through Friday ("business days").
A busdaycalendar object can be specified with any set of weekly
valid days, plus an optional "holiday" dates that always will be invalid.
Once a busdaycalendar object is created, the weekmask and holidays
cannot be modified.
.. versionadded:: 1.7.0
Parameters
----------
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates, no matter which
weekday they fall upon. Holiday dates may be specified in any
order, and NaT (not-a-time) dates are ignored. This list is
saved in a normalized form that is suited for fast calculations
of valid days.
Returns
-------
out : busdaycalendar
A business day calendar object containing the specified
weekmask and holidays values.
See Also
--------
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Attributes
----------
Note: once a busdaycalendar object is created, you cannot modify the
weekmask or holidays. The attributes return copies of internal data.
weekmask : (copy) seven-element array of bool
holidays : (copy) sorted array of datetime64[D]
Examples
--------
>>> # Some important days in July
... bdd = np.busdaycalendar(
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
>>> # Default is Monday to Friday weekdays
... bdd.weekmask
array([ True, True, True, True, True, False, False])
>>> # Any holidays already on the weekend are removed
... bdd.holidays
array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
""")
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
"""A copy of the seven-element boolean mask indicating valid days."""))
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
"""A copy of the holiday array indicating additional invalid days."""))
add_newdoc('numpy.core.multiarray', 'normalize_axis_index',
"""
normalize_axis_index(axis, ndim, msg_prefix=None)
Normalizes an axis index, `axis`, such that is a valid positive index into
the shape of array with `ndim` dimensions. Raises an AxisError with an
appropriate message if this is not possible.
Used internally by all axis-checking logic.
.. versionadded:: 1.13.0
Parameters
----------
axis : int
The un-normalized index of the axis. Can be negative
ndim : int
The number of dimensions of the array that `axis` should be normalized
against
msg_prefix : str
A prefix to put before the message, typically the name of the argument
Returns
-------
normalized_axis : int
The normalized axis index, such that `0 <= normalized_axis < ndim`
Raises
------
AxisError
If the axis index is invalid, when `-ndim <= axis < ndim` is false.
Examples
--------
>>> normalize_axis_index(0, ndim=3)
0
>>> normalize_axis_index(1, ndim=3)
1
>>> normalize_axis_index(-1, ndim=3)
2
>>> normalize_axis_index(3, ndim=3)
Traceback (most recent call last):
...
AxisError: axis 3 is out of bounds for array of dimension 3
>>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg')
Traceback (most recent call last):
...
AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3
""")
add_newdoc('numpy.core.multiarray', 'datetime_data',
"""
datetime_data(dtype, /)
Get information about the step size of a date or time type.
The returned tuple can be passed as the second argument of `numpy.datetime64` and
`numpy.timedelta64`.
Parameters
----------
dtype : dtype
The dtype object, which must be a `datetime64` or `timedelta64` type.
Returns
-------
unit : str
The :ref:`datetime unit <arrays.dtypes.dateunits>` on which this dtype
is based.
count : int
The number of base units in a step.
Examples
--------
>>> dt_25s = np.dtype('timedelta64[25s]')
>>> np.datetime_data(dt_25s)
('s', 25)
>>> np.array(10, dt_25s).astype('timedelta64[s]')
array(250, dtype='timedelta64[s]')
The result can be used to construct a datetime that uses the same units
as a timedelta
>>> np.datetime64('2010', np.datetime_data(dt_25s))
numpy.datetime64('2010-01-01T00:00:00','25s')
""")
##############################################################################
#
# Documentation for `generic` attributes and methods
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'generic',
"""
Base class for numpy scalar types.
Class from which most (all?) numpy scalar types are derived. For
consistency, exposes the same API as `ndarray`, despite many
consequent attributes being either "get-only," or completely irrelevant.
This is the class from which it is strongly suggested users should derive
custom scalar types.
""")
# Attributes
def refer_to_array_attribute(attr, method=True):
docstring = """
Scalar {} identical to the corresponding array attribute.
Please see `ndarray.{}`.
"""
return attr, docstring.format("method" if method else "attribute", attr)
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('T', method=False))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('base', method=False))
add_newdoc('numpy.core.numerictypes', 'generic', ('data',
"""Pointer to start of data."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('dtype',
"""Get array data-descriptor."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flags',
"""The integer value of flags."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('flat',
"""A 1-D view of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('imag',
"""The imaginary part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize',
"""The length of one element in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes',
"""The length of the scalar in bytes."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('ndim',
"""The number of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('real',
"""The real part of the scalar."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('shape',
"""Tuple of array dimensions."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('size',
"""The number of elements in the gentype."""))
add_newdoc('numpy.core.numerictypes', 'generic', ('strides',
"""Tuple of bytes steps in each dimension."""))
# Methods
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('all'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('any'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('argmax'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('argmin'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('argsort'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('astype'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('byteswap'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('choose'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('clip'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('compress'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('conjugate'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('copy'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('cumprod'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('cumsum'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('diagonal'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('dump'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('dumps'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('fill'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('flatten'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('getfield'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('item'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('itemset'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('max'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('mean'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('min'))
add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder',
"""
newbyteorder(new_order='S', /)
Return a new `dtype` with a different byte order.
Changes are also made in all fields and sub-arrays of the data type.
The `new_order` code can be any from the following:
* 'S' - swap dtype from current to opposite endian
* {'<', 'little'} - little endian
* {'>', 'big'} - big endian
* '=' - native order
* {'|', 'I'} - ignore (no change to byte order)
Parameters
----------
new_order : str, optional
Byte order to force; a value from the byte order specifications
above. The default value ('S') results in swapping the current
byte order.
Returns
-------
new_dtype : dtype
New `dtype` object with the given change to the byte order.
"""))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('nonzero'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('prod'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('ptp'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('put'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('ravel'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('repeat'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('reshape'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('resize'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('round'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('searchsorted'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('setfield'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('setflags'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('sort'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('squeeze'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('std'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('sum'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('swapaxes'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('take'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('tofile'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('tolist'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('tostring'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('trace'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('transpose'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('var'))
add_newdoc('numpy.core.numerictypes', 'generic',
refer_to_array_attribute('view'))
##############################################################################
#
# Documentation for scalar type abstract base classes in type hierarchy
#
##############################################################################
add_newdoc('numpy.core.numerictypes', 'number',
"""
Abstract base class of all numeric scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'integer',
"""
Abstract base class of all integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'signedinteger',
"""
Abstract base class of all signed integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'unsignedinteger',
"""
Abstract base class of all unsigned integer scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'inexact',
"""
Abstract base class of all numeric scalar types with a (potentially)
inexact representation of the values in its range, such as
floating-point numbers.
""")
add_newdoc('numpy.core.numerictypes', 'floating',
"""
Abstract base class of all floating-point scalar types.
""")
add_newdoc('numpy.core.numerictypes', 'complexfloating',
"""
Abstract base class of all complex number scalar types that are made up of
floating-point numbers.
""")
add_newdoc('numpy.core.numerictypes', 'flexible',
"""
Abstract base class of all scalar types without predefined length.
The actual size of these types depends on the specific `np.dtype`
instantiation.
""")
add_newdoc('numpy.core.numerictypes', 'character',
"""
Abstract base class of all character string scalar types.
""")
| bsd-3-clause |
agry/NGECore2 | scripts/mobiles/corellia/corsec_special_ops_inspector.py | 2 | 2171 | import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('corsec_inspector_sergeant_aggro')
mobileTemplate.setLevel(81)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("aggro corsec")
mobileTemplate.setAssistRange(10)
mobileTemplate.setStalker(True)
mobileTemplate.setOptionsBitmask(Options.AGGRESSIVE | Options.ATTACKABLE)
templates = Vector()
templates.add('object/mobile/shared_dressed_corsec_captain_human_female_01.iff')
templates.add('object/mobile/shared_dressed_corsec_captain_human_male_01.iff')
templates.add('object/mobile/shared_dressed_corsec_captain_human_male_02.iff')
templates.add('object/mobile/shared_dressed_corsec_captain_human_male_03.iff')
templates.add('object/mobile/shared_dressed_corsec_officer_human_female_01.iff')
templates.add('object/mobile/shared_dressed_corsec_officer_human_male_01.iff')
templates.add('object/mobile/shared_dressed_corsec_detective_human_female_01.iff')
templates.add('object/mobile/shared_dressed_corsec_detective_human_male_01.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_cdef.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
lootPoolNames_1 = ['Junk']
lootPoolChances_1 = [100]
lootGroupChance_1 = 100
mobileTemplate.addToLootGroups(lootPoolNames_1,lootPoolChances_1,lootGroupChance_1)
core.spawnService.addMobileTemplate('corsec_special_ops_inspector', mobileTemplate)
return | lgpl-3.0 |
darren-wang/gl | glance/tests/unit/test_cached_images.py | 20 | 4719 | # Copyright (C) 2013 Yahoo! Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
import webob
from glance.api import cached_images
from glance.api import policy
from glance.common import exception
from glance import image_cache
class FakePolicyEnforcer(policy.Enforcer):
def __init__(self):
self.default_rule = ''
self.policy_path = ''
self.policy_file_mtime = None
self.policy_file_contents = None
def enforce(self, context, action, target):
return 'pass'
def check(rule, target, creds, exc=None, *args, **kwargs):
return 'pass'
def _check(self, context, rule, target, *args, **kwargs):
return 'pass'
class FakeCache(image_cache.ImageCache):
def __init__(self):
self.init_driver()
self.deleted_images = []
def init_driver(self):
pass
def get_cached_images(self):
return {'id': 'test'}
def delete_cached_image(self, image_id):
self.deleted_images.append(image_id)
def delete_all_cached_images(self):
self.delete_cached_image(self.get_cached_images().get('id'))
return 1
def get_queued_images(self):
return {'test': 'passed'}
def queue_image(self, image_id):
return 'pass'
def delete_queued_image(self, image_id):
self.deleted_images.append(image_id)
def delete_all_queued_images(self):
self.delete_queued_image('deleted_img')
return 1
class FakeController(cached_images.Controller):
def __init__(self):
self.cache = FakeCache()
self.policy = FakePolicyEnforcer()
class TestController(testtools.TestCase):
def test_initialization_without_conf(self):
self.assertRaises(exception.BadDriverConfiguration,
cached_images.Controller)
class TestCachedImages(testtools.TestCase):
def setUp(self):
super(TestCachedImages, self).setUp()
test_controller = FakeController()
self.controller = test_controller
def test_get_cached_images(self):
req = webob.Request.blank('')
req.context = 'test'
result = self.controller.get_cached_images(req)
self.assertEqual({'cached_images': {'id': 'test'}}, result)
def test_delete_cached_image(self):
req = webob.Request.blank('')
req.context = 'test'
self.controller.delete_cached_image(req, image_id='test')
self.assertEqual(['test'], self.controller.cache.deleted_images)
def test_delete_cached_images(self):
req = webob.Request.blank('')
req.context = 'test'
self.assertEqual({'num_deleted': 1},
self.controller.delete_cached_images(req))
self.assertEqual(['test'], self.controller.cache.deleted_images)
def test_policy_enforce_forbidden(self):
def fake_enforce(context, action, target):
raise exception.Forbidden()
self.controller.policy.enforce = fake_enforce
req = webob.Request.blank('')
req.context = 'test'
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.get_cached_images, req)
def test_get_queued_images(self):
req = webob.Request.blank('')
req.context = 'test'
result = self.controller.get_queued_images(req)
self.assertEqual({'queued_images': {'test': 'passed'}}, result)
def test_queue_image(self):
req = webob.Request.blank('')
req.context = 'test'
self.controller.queue_image(req, image_id='test1')
def test_delete_queued_image(self):
req = webob.Request.blank('')
req.context = 'test'
self.controller.delete_queued_image(req, 'deleted_img')
self.assertEqual(['deleted_img'],
self.controller.cache.deleted_images)
def test_delete_queued_images(self):
req = webob.Request.blank('')
req.context = 'test'
self.assertEqual({'num_deleted': 1},
self.controller.delete_queued_images(req))
self.assertEqual(['deleted_img'],
self.controller.cache.deleted_images)
| apache-2.0 |
azunite/gyp_20150930 | test/actions-multiple-outputs-with-dependencies/gyptest-action.py | 34 | 1172 | #!/usr/bin/env python
# Copyright (c) 2015 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies actions with multiple outputs & dependncies will correctly rebuild.
This is a regression test for crrev.com/1177163002.
"""
import TestGyp
import os
import sys
import time
if sys.platform in ('darwin', 'win32'):
print "This test is currently disabled: https://crbug.com/483696."
sys.exit(0)
test = TestGyp.TestGyp()
TESTDIR='relocate/src'
test.run_gyp('action.gyp', chdir='src')
test.relocate('src', TESTDIR)
def build_and_check(content):
test.write(TESTDIR + '/input.txt', content)
test.build('action.gyp', 'upper', chdir=TESTDIR)
test.built_file_must_match('result.txt', content, chdir=TESTDIR)
build_and_check('Content for first build.')
# Ninja works with timestamps and the test above is fast enough that the
# 'updated' file may end up with the same timestamp as the original, meaning
# that ninja may not always recognize the input file has changed.
if test.format == 'ninja':
time.sleep(1)
build_and_check('An updated input file.')
test.pass_test()
| bsd-3-clause |
vikatory/kbengine | kbe/res/scripts/common/Lib/test/test_codecencodings_jp.py | 88 | 4981 | #
# test_codecencodings_jp.py
# Codec encoding tests for Japanese encodings.
#
from test import support
from test import multibytecodec_support
import unittest
class Test_CP932(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'cp932'
tstring = multibytecodec_support.load_teststring('shift_jis')
codectests = (
# invalid bytes
(b"abc\x81\x00\x81\x00\x82\x84", "strict", None),
(b"abc\xf8", "strict", None),
(b"abc\x81\x00\x82\x84", "replace", "abc\ufffd\x00\uff44"),
(b"abc\x81\x00\x82\x84\x88", "replace", "abc\ufffd\x00\uff44\ufffd"),
(b"abc\x81\x00\x82\x84", "ignore", "abc\x00\uff44"),
(b"ab\xEBxy", "replace", "ab\uFFFDxy"),
(b"ab\xF0\x39xy", "replace", "ab\uFFFD9xy"),
(b"ab\xEA\xF0xy", "replace", 'ab\ufffd\ue038y'),
# sjis vs cp932
(b"\\\x7e", "replace", "\\\x7e"),
(b"\x81\x5f\x81\x61\x81\x7c", "replace", "\uff3c\u2225\uff0d"),
)
euc_commontests = (
# invalid bytes
(b"abc\x80\x80\xc1\xc4", "strict", None),
(b"abc\x80\x80\xc1\xc4", "replace", "abc\ufffd\ufffd\u7956"),
(b"abc\x80\x80\xc1\xc4\xc8", "replace", "abc\ufffd\ufffd\u7956\ufffd"),
(b"abc\x80\x80\xc1\xc4", "ignore", "abc\u7956"),
(b"abc\xc8", "strict", None),
(b"abc\x8f\x83\x83", "replace", "abc\ufffd\ufffd\ufffd"),
(b"\x82\xFCxy", "replace", "\ufffd\ufffdxy"),
(b"\xc1\x64", "strict", None),
(b"\xa1\xc0", "strict", "\uff3c"),
(b"\xa1\xc0\\", "strict", "\uff3c\\"),
(b"\x8eXY", "replace", "\ufffdXY"),
)
class Test_EUC_JIS_2004(multibytecodec_support.TestBase,
unittest.TestCase):
encoding = 'euc_jis_2004'
tstring = multibytecodec_support.load_teststring('euc_jisx0213')
codectests = euc_commontests
xmlcharnametest = (
"\xab\u211c\xbb = \u2329\u1234\u232a",
b"\xa9\xa8ℜ\xa9\xb2 = ⟨ሴ⟩"
)
class Test_EUC_JISX0213(multibytecodec_support.TestBase,
unittest.TestCase):
encoding = 'euc_jisx0213'
tstring = multibytecodec_support.load_teststring('euc_jisx0213')
codectests = euc_commontests
xmlcharnametest = (
"\xab\u211c\xbb = \u2329\u1234\u232a",
b"\xa9\xa8ℜ\xa9\xb2 = ⟨ሴ⟩"
)
class Test_EUC_JP_COMPAT(multibytecodec_support.TestBase,
unittest.TestCase):
encoding = 'euc_jp'
tstring = multibytecodec_support.load_teststring('euc_jp')
codectests = euc_commontests + (
("\xa5", "strict", b"\x5c"),
("\u203e", "strict", b"\x7e"),
)
shiftjis_commonenctests = (
(b"abc\x80\x80\x82\x84", "strict", None),
(b"abc\xf8", "strict", None),
(b"abc\x80\x80\x82\x84def", "ignore", "abc\uff44def"),
)
class Test_SJIS_COMPAT(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'shift_jis'
tstring = multibytecodec_support.load_teststring('shift_jis')
codectests = shiftjis_commonenctests + (
(b"abc\x80\x80\x82\x84", "replace", "abc\ufffd\ufffd\uff44"),
(b"abc\x80\x80\x82\x84\x88", "replace", "abc\ufffd\ufffd\uff44\ufffd"),
(b"\\\x7e", "strict", "\\\x7e"),
(b"\x81\x5f\x81\x61\x81\x7c", "strict", "\uff3c\u2016\u2212"),
(b"abc\x81\x39", "replace", "abc\ufffd9"),
(b"abc\xEA\xFC", "replace", "abc\ufffd\ufffd"),
(b"abc\xFF\x58", "replace", "abc\ufffdX"),
)
class Test_SJIS_2004(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'shift_jis_2004'
tstring = multibytecodec_support.load_teststring('shift_jis')
codectests = shiftjis_commonenctests + (
(b"\\\x7e", "strict", "\xa5\u203e"),
(b"\x81\x5f\x81\x61\x81\x7c", "strict", "\\\u2016\u2212"),
(b"abc\xEA\xFC", "strict", "abc\u64bf"),
(b"\x81\x39xy", "replace", "\ufffd9xy"),
(b"\xFF\x58xy", "replace", "\ufffdXxy"),
(b"\x80\x80\x82\x84xy", "replace", "\ufffd\ufffd\uff44xy"),
(b"\x80\x80\x82\x84\x88xy", "replace", "\ufffd\ufffd\uff44\u5864y"),
(b"\xFC\xFBxy", "replace", '\ufffd\u95b4y'),
)
xmlcharnametest = (
"\xab\u211c\xbb = \u2329\u1234\u232a",
b"\x85Gℜ\x85Q = ⟨ሴ⟩"
)
class Test_SJISX0213(multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'shift_jisx0213'
tstring = multibytecodec_support.load_teststring('shift_jisx0213')
codectests = shiftjis_commonenctests + (
(b"abc\x80\x80\x82\x84", "replace", "abc\ufffd\ufffd\uff44"),
(b"abc\x80\x80\x82\x84\x88", "replace", "abc\ufffd\ufffd\uff44\ufffd"),
# sjis vs cp932
(b"\\\x7e", "replace", "\xa5\u203e"),
(b"\x81\x5f\x81\x61\x81\x7c", "replace", "\x5c\u2016\u2212"),
)
xmlcharnametest = (
"\xab\u211c\xbb = \u2329\u1234\u232a",
b"\x85Gℜ\x85Q = ⟨ሴ⟩"
)
def test_main():
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
flavoi/diventi | diventi/ebooks/views.py | 1 | 3556 | from django.shortcuts import get_object_or_404
from django.views.generic.detail import DetailView
from django.views import View
from django.utils.translation import (
get_language,
gettext_lazy as _
)
from django.contrib.auth.mixins import (
LoginRequiredMixin,
UserPassesTestMixin
)
from django.http import Http404
from django.conf import settings
from diventi.products.models import Product
from .models import Book
from .utils import (
parse_dropbox_paper_soup,
make_paper_toc,
)
class UserHasProductMixin(UserPassesTestMixin):
"""
This view checks if the user has bought the product
related to the requested book.
It assumes to have the slug of the book object available
in book_slug get parameter.
"""
permission_denied_message = _('This book is not in your collection, please check your profile.')
def test_func(self):
book_slug = self.kwargs.get('book_slug', None)
book = get_object_or_404(Book, slug=book_slug)
product = book.book_product
user_has_bought_test = product.user_has_already_bought(self.request.user) or product.user_has_authored(self.request.user)
if not user_has_bought_test:
self.permission_denied_message = _('This book is not in your collection, please check your profile.')
book_is_published_test = book.is_published()
if not book_is_published_test:
self.permission_denied_message = _('This book is not yet available, please check back later.')
return user_has_bought_test and book_is_published_test
class EbookView(View):
"""
Generic view that manages context data for ebooks.
It assumes to have the slug of the book object available
in book_slug get parameter.
"""
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
book_slug = self.kwargs.get('book_slug', None)
book = get_object_or_404(Book, slug=book_slug)
context['book'] = book
return context
class BookDetailView(LoginRequiredMixin, UserHasProductMixin,
EbookView, DetailView):
""" Returns the digital content of a product. """
model = Book
slug_url_kwarg = 'book_slug'
def get_queryset(self, **kwargs):
queryset = super().get_queryset(**kwargs)
return queryset.published().product()
def get_template_names(self):
return ['ebooks/book_detail_%s.html' % self.object.template]
from django.utils.safestring import mark_safe
class PaperEbookView(BookDetailView):
""" Renders an ebook from a paper document """
def get_object(self, queryset=None):
obj = super(PaperEbookView, self).get_object(queryset)
if not obj.paper_id:
raise Http404(_('This book is not linked to a paper, please contact the authors.'))
return obj
def get_template_names(self):
return ['ebooks/book_detail_quick.html', ]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
current_lan = get_language()
paper_filename = 'ebooks/partials/book_paper_{}_{}.html'.format(self.object.id, current_lan)
paper_soup = parse_dropbox_paper_soup(paper_filename)
# context['paper_title'] = paper_soup.select_one('.ace-line').extract().get_text()
context['paper_title'] = self.object.title
context['paper_toc'] = make_paper_toc(paper_soup)
context['book_paper'] = paper_filename
return context | apache-2.0 |
GitHublong/hue | desktop/core/ext-py/django-extensions-1.5.0/django_extensions/tests/uuid_field.py | 42 | 2384 | import re
import uuid
import six
from django_extensions.db.fields import PostgreSQLUUIDField
from django_extensions.tests.fields import FieldTestCase
from django_extensions.tests.testapp.models import UUIDTestModel_field, UUIDTestModel_pk, UUIDTestAgregateModel, UUIDTestManyToManyModel
class UUIDFieldTest(FieldTestCase):
def testUUIDFieldCreate(self):
j = UUIDTestModel_field.objects.create(a=6, uuid_field=six.u('550e8400-e29b-41d4-a716-446655440000'))
self.assertEqual(j.uuid_field, six.u('550e8400-e29b-41d4-a716-446655440000'))
def testUUIDField_pkCreate(self):
j = UUIDTestModel_pk.objects.create(uuid_field=six.u('550e8400-e29b-41d4-a716-446655440000'))
self.assertEqual(j.uuid_field, six.u('550e8400-e29b-41d4-a716-446655440000'))
self.assertEqual(j.pk, six.u('550e8400-e29b-41d4-a716-446655440000'))
def testUUIDField_pkAgregateCreate(self):
j = UUIDTestAgregateModel.objects.create(a=6, uuid_field=six.u('550e8400-e29b-41d4-a716-446655440001'))
self.assertEqual(j.a, 6)
self.assertIsInstance(j.pk, six.string_types)
self.assertEqual(len(j.pk), 36)
def testUUIDFieldManyToManyCreate(self):
j = UUIDTestManyToManyModel.objects.create(uuid_field=six.u('550e8400-e29b-41d4-a716-446655440010'))
self.assertEqual(j.uuid_field, six.u('550e8400-e29b-41d4-a716-446655440010'))
self.assertEqual(j.pk, six.u('550e8400-e29b-41d4-a716-446655440010'))
class PostgreSQLUUIDFieldTest(FieldTestCase):
def test_uuid_casting(self):
# As explain by postgres documentation
# http://www.postgresql.org/docs/9.1/static/datatype-uuid.html
# an uuid needs to be a sequence of lower-case hexadecimal digits, in
# several groups separated by hyphens, specifically a group of 8 digits
# followed by three groups of 4 digits followed by a group of 12 digits
matcher = re.compile('^[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}'
'-[\da-f]{12}$')
field = PostgreSQLUUIDField()
for value in (str(uuid.uuid4()), uuid.uuid4().urn, uuid.uuid4().hex,
uuid.uuid4().int, uuid.uuid4().bytes):
prepared_value = field.get_db_prep_value(value, None)
self.assertTrue(matcher.match(prepared_value) is not None,
prepared_value)
| apache-2.0 |
40023256/2015cdag1man | static/Brython3.1.0-20150301-090019/Lib/unittest/test/support.py | 770 | 3379 | import unittest
class TestEquality(object):
"""Used as a mixin for TestCase"""
# Check for a valid __eq__ implementation
def test_eq(self):
for obj_1, obj_2 in self.eq_pairs:
self.assertEqual(obj_1, obj_2)
self.assertEqual(obj_2, obj_1)
# Check for a valid __ne__ implementation
def test_ne(self):
for obj_1, obj_2 in self.ne_pairs:
self.assertNotEqual(obj_1, obj_2)
self.assertNotEqual(obj_2, obj_1)
class TestHashing(object):
"""Used as a mixin for TestCase"""
# Check for a valid __hash__ implementation
def test_hash(self):
for obj_1, obj_2 in self.eq_pairs:
try:
if not hash(obj_1) == hash(obj_2):
self.fail("%r and %r do not hash equal" % (obj_1, obj_2))
except KeyboardInterrupt:
raise
except Exception as e:
self.fail("Problem hashing %r and %r: %s" % (obj_1, obj_2, e))
for obj_1, obj_2 in self.ne_pairs:
try:
if hash(obj_1) == hash(obj_2):
self.fail("%s and %s hash equal, but shouldn't" %
(obj_1, obj_2))
except KeyboardInterrupt:
raise
except Exception as e:
self.fail("Problem hashing %s and %s: %s" % (obj_1, obj_2, e))
class LoggingResult(unittest.TestResult):
def __init__(self, log):
self._events = log
super().__init__()
def startTest(self, test):
self._events.append('startTest')
super().startTest(test)
def startTestRun(self):
self._events.append('startTestRun')
super(LoggingResult, self).startTestRun()
def stopTest(self, test):
self._events.append('stopTest')
super().stopTest(test)
def stopTestRun(self):
self._events.append('stopTestRun')
super(LoggingResult, self).stopTestRun()
def addFailure(self, *args):
self._events.append('addFailure')
super().addFailure(*args)
def addSuccess(self, *args):
self._events.append('addSuccess')
super(LoggingResult, self).addSuccess(*args)
def addError(self, *args):
self._events.append('addError')
super().addError(*args)
def addSkip(self, *args):
self._events.append('addSkip')
super(LoggingResult, self).addSkip(*args)
def addExpectedFailure(self, *args):
self._events.append('addExpectedFailure')
super(LoggingResult, self).addExpectedFailure(*args)
def addUnexpectedSuccess(self, *args):
self._events.append('addUnexpectedSuccess')
super(LoggingResult, self).addUnexpectedSuccess(*args)
class ResultWithNoStartTestRunStopTestRun(object):
"""An object honouring TestResult before startTestRun/stopTestRun."""
def __init__(self):
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
def startTest(self, test):
pass
def stopTest(self, test):
pass
def addError(self, test):
pass
def addFailure(self, test):
pass
def addSuccess(self, test):
pass
def wasSuccessful(self):
return True
| gpl-3.0 |
mahak/cinder | cinder/volume/drivers/datera/datera_common.py | 2 | 11881 | # Copyright 2020 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import random
import re
import string
import time
import types
import uuid
from glanceclient import exc as glance_exc
from oslo_log import log as logging
from oslo_utils import importutils
from cinder import context
from cinder import exception
from cinder.i18n import _
from cinder.image import glance
from cinder.volume import qos_specs
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
dfs_sdk = importutils.try_import('dfs_sdk')
OS_PREFIX = "OS"
UNMANAGE_PREFIX = "UNMANAGED"
# Taken from this SO post :
# http://stackoverflow.com/a/18516125
# Using old-style string formatting because of the nature of the regex
# conflicting with new-style curly braces
UUID4_STR_RE = ("%s.*([a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab]"
"[a-f0-9]{3}-?[a-f0-9]{12})")
UUID4_RE = re.compile(UUID4_STR_RE % OS_PREFIX)
SNAP_RE = re.compile(r"\d{10,}\.\d+")
# Recursive dict to assemble basic url structure for the most common
# API URL endpoints. Most others are constructed from these
DEFAULT_SI_SLEEP = 1
DEFAULT_SI_SLEEP_API_2 = 5
DEFAULT_SNAP_SLEEP = 1
API_VERSIONS = ["2.1", "2.2"]
API_TIMEOUT = 20
VALID_CHARS = set(string.ascii_letters + string.digits + "-_.")
class DateraAPIException(exception.VolumeBackendAPIException):
message = _("Bad response from Datera API")
def get_name(resource):
dn = resource.get('display_name')
cid = resource.get('id')
if dn:
dn = filter_chars(dn)
# Check to ensure the name is short enough to fit. Prioritize
# the prefix and Cinder ID, strip all invalid characters
nl = len(OS_PREFIX) + len(dn) + len(cid) + 2
if nl >= 64:
dn = dn[:-(nl - 63)]
return "-".join((OS_PREFIX, dn, cid))
return "-".join((OS_PREFIX, cid))
def get_unmanaged(name):
return "-".join((UNMANAGE_PREFIX, name))
def filter_chars(s):
if s:
return ''.join([c for c in s if c in VALID_CHARS])
return s
def lookup(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
obj = args[0]
name = "_" + func.__name__ + "_" + obj.apiv.replace(".", "_")
LOG.debug("Trying method: %s", name)
call_id = uuid.uuid4()
if obj.do_profile:
LOG.debug("Profiling method: %s, id %s", name, call_id)
t1 = time.time()
obj.thread_local.trace_id = call_id
result = getattr(obj, name)(*args[1:], **kwargs)
if obj.do_profile:
t2 = time.time()
timedelta = round(t2 - t1, 3)
LOG.debug("Profile for method %s, id %s: %ss",
name, call_id, timedelta)
return result
return wrapper
def _parse_vol_ref(ref):
if ref.count(":") not in (2, 3):
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format: "
"tenant:app_inst_name:storage_inst_name:vol_name or "
"app_inst_name:storage_inst_name:vol_name"))
try:
(tenant, app_inst_name, storage_inst_name,
vol_name) = ref.split(":")
if tenant == "root":
tenant = None
except (TypeError, ValueError):
app_inst_name, storage_inst_name, vol_name = ref.split(
":")
tenant = None
return app_inst_name, storage_inst_name, vol_name, tenant
def _check_snap_ref(ref):
if not SNAP_RE.match(ref):
raise exception.ManageExistingInvalidReference(
_("existing_ref argument must be of this format: "
"1234567890.12345678"))
return True
def _get_size(app_inst):
"""Helper method for getting the size of a backend object
If app_inst is provided, we'll just parse the dict to get
the size instead of making a separate http request
"""
if 'data' in app_inst:
app_inst = app_inst['data']
sis = app_inst['storage_instances']
found_si = sis[0]
found_vol = found_si['volumes'][0]
return found_vol['size']
def _get_volume_type_obj(driver, resource):
type_id = resource.get('volume_type_id', None)
# Handle case of volume with no type. We still want the
# specified defaults from above
if type_id:
ctxt = context.get_admin_context()
volume_type = volume_types.get_volume_type(ctxt, type_id)
else:
volume_type = None
return volume_type
def _get_policies_for_resource(driver, resource):
volume_type = driver._get_volume_type_obj(resource)
return driver._get_policies_for_volume_type(volume_type)
def _get_policies_for_volume_type(driver, volume_type):
"""Get extra_specs and qos_specs of a volume_type.
This fetches the scoped keys from the volume type. Anything set from
qos_specs will override key/values set from extra_specs.
"""
# Handle case of volume with no type. We still want the
# specified defaults from above
if volume_type:
specs = volume_type.get('extra_specs', {})
else:
specs = {}
# Set defaults:
policies = {k.lstrip('DF:'): str(v['default']) for (k, v)
in driver._init_vendor_properties()[0].items()}
if volume_type:
qos_specs_id = volume_type.get('qos_specs_id')
if qos_specs_id is not None:
ctxt = context.get_admin_context()
qos_kvs = qos_specs.get_qos_specs(ctxt, qos_specs_id)['specs']
if qos_kvs:
specs.update(qos_kvs)
# Populate updated value
for key, value in specs.items():
if ':' in key:
fields = key.split(':')
key = fields[1]
policies[key] = value
# Cast everything except booleans int that can be cast
for k, v in policies.items():
# Handle String Boolean case
if v == 'True' or v == 'False':
policies[k] = policies[k] == 'True'
continue
# Int cast
try:
policies[k] = int(v)
except ValueError:
pass
return policies
def _image_accessible(driver, context, volume, image_meta):
# Determine if image is accessible by current project
pid = volume.get('project_id', '')
public = False
visibility = image_meta.get('visibility', None)
LOG.debug("Image %(image)s visibility: %(vis)s",
{"image": image_meta['id'], "vis": visibility})
if visibility and visibility in ['public', 'community']:
public = True
elif visibility and visibility in ['shared', 'private']:
# Do membership check. Newton and before didn't have a 'shared'
# visibility option, so we have to do this check for 'private'
# as well
gclient = glance.get_default_image_service()
members = []
# list_members is only available in Rocky+
try:
members = gclient.list_members(context, image_meta['id'])
except AttributeError:
# This is the fallback method for the same query
try:
members = gclient._client.call(context,
'list',
controller='image_members',
image_id=image_meta['id'])
except glance_exc.HTTPForbidden as e:
LOG.warning(e)
except glance_exc.HTTPForbidden as e:
LOG.warning(e)
members = list(members)
LOG.debug("Shared image %(image)s members: %(members)s",
{"image": image_meta['id'], "members": members})
for member in members:
if (member['member_id'] == pid and
member['status'] == 'accepted'):
public = True
break
if image_meta.get('is_public', False):
public = True
else:
if image_meta.get('owner', '') == pid:
public = True
if not public:
LOG.warning("Requested image is not "
"accessible by current Project.")
return public
def _format_tenant(tenant):
if tenant == "all" or (tenant and ('/root' in tenant or 'root' in tenant)):
return '/root'
elif tenant and ('/root' not in tenant and 'root' not in tenant):
return "/" + "/".join(('root', tenant)).strip('/')
return tenant
def get_ip_pool(policies):
ip_pool = policies['ip_pool']
if ',' in ip_pool:
ip_pools = ip_pool.split(',')
ip_pool = random.choice(ip_pools)
return ip_pool
def create_tenant(driver, project_id):
if driver.tenant_id.lower() == 'map':
name = get_name({'id': project_id})
elif driver.tenant_id:
name = driver.tenant_id.replace('root', '').strip('/')
else:
name = 'root'
if name:
try:
driver.api.tenants.create(name=name)
except dfs_sdk.exceptions.ApiConflictError:
LOG.debug("Tenant %s already exists", name)
return _format_tenant(name)
def get_tenant(driver, project_id):
if driver.tenant_id.lower() == 'map':
return _format_tenant(get_name({'id': project_id}))
elif not driver.tenant_id:
return _format_tenant('root')
return _format_tenant(driver.tenant_id)
def cvol_to_ai(driver, resource, tenant=None):
if not tenant:
tenant = get_tenant(driver, resource['project_id'])
try:
# api.tenants.get needs a non '/'-prefixed tenant id
driver.api.tenants.get(tenant.strip('/'))
except dfs_sdk.exceptions.ApiNotFoundError:
create_tenant(driver, resource['project_id'])
cid = resource.get('id', None)
if not cid:
raise ValueError('Unsure what id key to use for object', resource)
ais = driver.api.app_instances.list(
filter='match(name,.*{}.*)'.format(cid),
tenant=tenant)
if not ais:
raise exception.VolumeNotFound(volume_id=cid)
return ais[0]
def cvol_to_dvol(driver, resource, tenant=None):
if not tenant:
tenant = get_tenant(driver, resource['project_id'])
ai = cvol_to_ai(driver, resource, tenant=tenant)
si = ai.storage_instances.list(tenant=tenant)[0]
vol = si.volumes.list(tenant=tenant)[0]
return vol
def _version_to_int(ver):
# Using a factor of 100 per digit so up to 100 versions are supported
# per major/minor/patch/subpatch digit in this calculation
# Example:
# In [2]: _version_to_int("3.3.0.0")
# Out[2]: 303000000
# In [3]: _version_to_int("2.2.7.1")
# Out[3]: 202070100
VERSION_DIGITS = 4
factor = pow(10, VERSION_DIGITS * 2)
div = pow(10, 2)
val = 0
for c in ver.split("."):
val += int(int(c) * factor)
factor /= div
return val
def dat_version_gte(version_a, version_b):
return _version_to_int(version_a) >= _version_to_int(version_b)
def register_driver(driver):
for func in [_get_volume_type_obj,
_get_policies_for_resource,
_get_policies_for_volume_type,
_image_accessible,
get_tenant,
create_tenant,
cvol_to_ai,
cvol_to_dvol]:
f = types.MethodType(func, driver)
setattr(driver, func.__name__, f)
| apache-2.0 |
mgax/aleph | aleph/model/selector.py | 3 | 1189 | import logging
from datetime import datetime
from sqlalchemy.ext.hybrid import hybrid_property
from normality import normalize
from aleph.core import db
log = logging.getLogger(__name__)
class Selector(db.Model):
id = db.Column(db.Integer, primary_key=True)
_text = db.Column('text', db.Unicode, index=True)
normalized = db.Column(db.Unicode, index=True)
created_at = db.Column(db.DateTime, default=datetime.utcnow)
updated_at = db.Column(db.DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow)
entity_id = db.Column(db.Unicode(50), db.ForeignKey('entity.id'))
entity = db.relationship('Entity', backref=db.backref('selectors',
lazy='dynamic', cascade='all, delete-orphan')) # noqa
@hybrid_property
def text(self):
return self._text
@text.setter
def text(self, text):
self._text = text
self.normalized = self.normalize(text)
@classmethod
def normalize(cls, text):
return normalize(text)
def __repr__(self):
return '<Selector(%r, %r)>' % (self.entity_id, self.text)
def __unicode__(self):
return self.text
| mit |
edc-ecuador/edc | qa/rpc-tests/test_framework/netutil.py | 328 | 4562 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Linux network utilities
import sys
import socket
import fcntl
import struct
import array
import os
import binascii
# Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
STATE_ESTABLISHED = '01'
STATE_SYN_SENT = '02'
STATE_SYN_RECV = '03'
STATE_FIN_WAIT1 = '04'
STATE_FIN_WAIT2 = '05'
STATE_TIME_WAIT = '06'
STATE_CLOSE = '07'
STATE_CLOSE_WAIT = '08'
STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = binascii.unhexlify(host)
host_out = ''
for x in range(0, len(host)/4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', '\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split('\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return binascii.hexlify(bytearray(addr))
| mit |
LinuxEmbed/LinuxEmbed-RPI-Linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
AthinaB/synnefo | snf-admin-app/synnefo_admin/admin/resources/networks/actions.py | 7 | 3433 | # Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
from collections import OrderedDict
from synnefo.logic.networks import validate_network_action
from synnefo.logic import networks
from synnefo_admin.admin.actions import AdminAction, noop
from synnefo_admin.admin.utils import update_actions_rbac, send_admin_email
class NetworkAction(AdminAction):
"""Class for actions on networks. Derived from AdminAction.
Pre-determined Attributes:
target: network
"""
def __init__(self, name, f, **kwargs):
"""Initialize the class with provided values."""
AdminAction.__init__(self, name=name, target='network', f=f, **kwargs)
def drain_network(network):
network.drained = True
network.save()
def undrain_network(network):
network.drained = False
network.save()
def check_network_action(action):
if action == "CONTACT":
# Contact action is allowed only on private networks. However, this
# function may get called with an AstakosUser as a target. In this
# case, we always confirm the action.
return lambda n: not getattr(n, 'public', False)
elif action == "DRAIN":
return lambda n: not n.drained and not n.deleted and n.public
elif action == "UNDRAIN":
return lambda n: n.drained and not n.deleted and n.public
else:
return lambda n: validate_network_action(n, action)
def generate_actions():
"""Create a list of actions on networks."""
actions = OrderedDict()
actions['drain'] = NetworkAction(name='Drain', f=drain_network,
c=check_network_action('DRAIN'),
caution_level=True,)
actions['undrain'] = NetworkAction(name='Undrain', f=undrain_network,
c=check_network_action('UNDRAIN'),
karma='neutral',)
actions['destroy'] = NetworkAction(name='Destroy', f=networks.delete,
c=check_network_action('DESTROY'),
karma='bad', caution_level='dangerous',)
actions['reassign'] = NetworkAction(name='Reassign to project', f=noop,
karma='neutral',
caution_level='dangerous',)
actions['change_owner'] = NetworkAction(name='Change owner', f=noop,
karma='neutral',
caution_level='dangerous',)
actions['contact'] = NetworkAction(name='Send e-mail', f=send_admin_email,
c=check_network_action("CONTACT"),)
update_actions_rbac(actions)
return actions
cached_actions = generate_actions()
| gpl-3.0 |
Altaf-Mahdi/android_kernel_oneplus_msm8974 | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <[email protected]>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
xapi-project/sm | drivers/LVHDoISCSISR.py | 1 | 28450 | #!/usr/bin/python
#
# Copyright (C) Citrix Systems Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; version 2.1 only.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# LVHDoISCSISR: LVHD over ISCSI software initiator SR driver
#
from __future__ import print_function
import SR
import LVHDSR
import BaseISCSI
import SRCommand
import util
import scsiutil
import lvutil
import time
import os
import sys
import xs_errors
import xmlrpclib
import mpath_cli
import iscsilib
import glob
import copy
import scsiutil
import xml.dom.minidom
CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_METADATA", "SR_TRIM",
"VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH",
"VDI_GENERATE_CONFIG", "VDI_CLONE", "VDI_SNAPSHOT",
"VDI_RESIZE", "ATOMIC_PAUSE", "VDI_RESET_ON_BOOT/2",
"VDI_UPDATE", "VDI_MIRROR", "VDI_CONFIG_CBT",
"VDI_ACTIVATE", "VDI_DEACTIVATE"]
CONFIGURATION = [['SCSIid', 'The scsi_id of the destination LUN'], \
['target', 'IP address or hostname of the iSCSI target'], \
['targetIQN', 'The IQN of the target LUN group to be attached'], \
['chapuser', 'The username to be used during CHAP authentication'], \
['chappassword', 'The password to be used during CHAP authentication'], \
['incoming_chapuser', 'The incoming username to be used during bi-directional CHAP authentication (optional)'], \
['incoming_chappassword', 'The incoming password to be used during bi-directional CHAP authentication (optional)'], \
['port', 'The network port number on which to query the target'], \
['multihomed', 'Enable multi-homing to this target, true or false (optional, defaults to same value as host.other_config:multipathing)'], \
['usediscoverynumber', 'The specific iscsi record index to use. (optional)'], \
['allocation', 'Valid values are thick or thin (optional, defaults to thick)']]
DRIVER_INFO = {
'name': 'LVHD over iSCSI',
'description': 'SR plugin which represents disks as Logical Volumes within a Volume Group created on an iSCSI LUN',
'vendor': 'Citrix Systems Inc',
'copyright': '(C) 2008 Citrix Systems Inc',
'driver_version': '1.0',
'required_api_version': '1.0',
'capabilities': CAPABILITIES,
'configuration': CONFIGURATION
}
class LVHDoISCSISR(LVHDSR.LVHDSR):
"""LVHD over ISCSI storage repository"""
def handles(type):
if __name__ == '__main__':
name = sys.argv[0]
else:
name = __name__
if name.endswith("LVMoISCSISR"):
return type == "lvmoiscsi"
if type == "lvhdoiscsi":
return True
return False
handles = staticmethod(handles)
def load(self, sr_uuid):
if not sr_uuid:
# This is a probe call, generate a temp sr_uuid
sr_uuid = util.gen_uuid()
# If this is a vdi command, don't initialise SR
if util.isVDICommand(self.original_srcmd.cmd):
self.SCSIid = self.dconf['SCSIid']
else:
if 'target' in self.original_srcmd.dconf:
self.original_srcmd.dconf['targetlist'] = self.original_srcmd.dconf['target']
iscsi = BaseISCSI.BaseISCSISR(self.original_srcmd, sr_uuid)
self.iscsiSRs = []
self.iscsiSRs.append(iscsi)
saved_exc = None
if self.dconf['target'].find(',') == 0 or self.dconf['targetIQN'] == "*":
# Instantiate multiple sessions
self.iscsiSRs = []
if self.dconf['targetIQN'] == "*":
IQN = "any"
else:
IQN = self.dconf['targetIQN']
dict = {}
IQNstring = ""
IQNs = []
try:
if 'multiSession' in self.dconf:
IQNs = self.dconf['multiSession'].split("|")
for IQN in IQNs:
if IQN:
dict[IQN] = ""
else:
try:
IQNs.remove(IQN)
except:
# Exceptions are not expected but just in case
pass
# Order in multiSession must be preserved. It is important for dual-controllers.
# IQNstring cannot be built with a dictionary iteration because of this
IQNstring = self.dconf['multiSession']
else:
for tgt in self.dconf['target'].split(','):
try:
tgt_ip = util._convertDNS(tgt)
except:
raise xs_errors.XenError('DNSError')
iscsilib.ensure_daemon_running_ok(iscsi.localIQN)
map = iscsilib.discovery(tgt_ip, iscsi.port, iscsi.chapuser, iscsi.chappassword, targetIQN=IQN)
util.SMlog("Discovery for IP %s returned %s" % (tgt, map))
for i in range(0, len(map)):
(portal, tpgt, iqn) = map[i]
(ipaddr, port) = iscsilib.parse_IP_port(portal)
try:
util._testHost(ipaddr, long(port), 'ISCSITarget')
except:
util.SMlog("Target Not reachable: (%s:%s)" % (ipaddr, port))
continue
key = "%s,%s,%s" % (ipaddr, port, iqn)
dict[key] = ""
# Again, do not mess up with IQNs order. Dual controllers will benefit from that
if IQNstring == "":
# Compose the IQNstring first
for key in dict.iterkeys():
IQNstring += "%s|" % key
# Reinitialize and store iterator
key_iterator = dict.iterkeys()
else:
key_iterator = IQNs
# Now load the individual iSCSI base classes
for key in key_iterator:
(ipaddr, port, iqn) = key.split(',')
srcmd_copy = copy.deepcopy(self.original_srcmd)
srcmd_copy.dconf['target'] = ipaddr
srcmd_copy.dconf['targetIQN'] = iqn
srcmd_copy.dconf['multiSession'] = IQNstring
util.SMlog("Setting targetlist: %s" % srcmd_copy.dconf['targetlist'])
self.iscsiSRs.append(BaseISCSI.BaseISCSISR(srcmd_copy, sr_uuid))
pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref)
if pbd != None and 'multiSession' not in self.dconf:
dconf = self.session.xenapi.PBD.get_device_config(pbd)
dconf['multiSession'] = IQNstring
self.session.xenapi.PBD.set_device_config(pbd, dconf)
except Exception as exc:
util.logException("LVHDoISCSISR.load")
saved_exc = exc
try:
self.iscsi = self.iscsiSRs[0]
except IndexError as exc:
if isinstance(saved_exc, SR.SROSError):
raise saved_exc # pylint: disable-msg=E0702
elif isinstance(saved_exc, Exception):
raise xs_errors.XenError('SMGeneral', str(saved_exc))
else:
raise xs_errors.XenError('SMGeneral', str(exc))
# Be extremely careful not to throw exceptions here since this function
# is the main one used by all operations including probing and creating
pbd = None
try:
pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref)
except:
pass
# Apart from the upgrade case, user must specify a SCSIid
if 'SCSIid' not in self.dconf:
# Dual controller issue
self.LUNs = {} # Dict for LUNs from all the iscsi objects
for ii in range(0, len(self.iscsiSRs)):
self.iscsi = self.iscsiSRs[ii]
self._LUNprint(sr_uuid)
for key in self.iscsi.LUNs:
self.LUNs[key] = self.iscsi.LUNs[key]
self.print_LUNs_XML()
self.iscsi = self.iscsiSRs[0] # back to original value
raise xs_errors.XenError('ConfigSCSIid')
self.SCSIid = self.dconf['SCSIid']
# This block checks if the first iscsi target contains the right SCSIid.
# If not it scans the other iscsi targets because chances are that more
# than one controller is present
dev_match = False
forced_login = False
# No need to check if only one iscsi target is present
if len(self.iscsiSRs) == 1:
pass
else:
target_success = False
attempt_discovery = False
for iii in range(0, len(self.iscsiSRs)):
# Check we didn't leave any iscsi session open
# If exceptions happened before, the cleanup function has worked on the right target.
if forced_login == True:
try:
iscsilib.ensure_daemon_running_ok(self.iscsi.localIQN)
iscsilib.logout(self.iscsi.target, self.iscsi.targetIQN)
forced_login = False
except:
raise xs_errors.XenError('ISCSILogout')
self.iscsi = self.iscsiSRs[iii]
util.SMlog("path %s" % self.iscsi.path)
util.SMlog("iscsci data: targetIQN %s, portal %s" % (self.iscsi.targetIQN, self.iscsi.target))
iscsilib.ensure_daemon_running_ok(self.iscsi.localIQN)
if not iscsilib._checkTGT(self.iscsi.targetIQN):
attempt_discovery = True
try:
# Ensure iscsi db has been populated
map = iscsilib.discovery(
self.iscsi.target,
self.iscsi.port,
self.iscsi.chapuser,
self.iscsi.chappassword,
targetIQN=self.iscsi.targetIQN)
if len(map) == 0:
util.SMlog("Discovery for iscsi data targetIQN %s,"
" portal %s returned empty list"
" Trying another path if available" %
(self.iscsi.targetIQN,
self.iscsi.target))
continue
except:
util.SMlog("Discovery failed for iscsi data targetIQN"
" %s, portal %s. Trying another path if"
" available" %
(self.iscsi.targetIQN, self.iscsi.target))
continue
try:
iscsilib.login(self.iscsi.target,
self.iscsi.targetIQN,
self.iscsi.chapuser,
self.iscsi.chappassword,
self.iscsi.incoming_chapuser,
self.iscsi.incoming_chappassword,
self.mpath == "true")
except:
util.SMlog("Login failed for iscsi data targetIQN %s,"
" portal %s. Trying another path"
" if available" %
(self.iscsi.targetIQN, self.iscsi.target))
continue
target_success = True
forced_login = True
# A session should be active.
if not util.wait_for_path(self.iscsi.path, BaseISCSI.MAX_TIMEOUT):
util.SMlog("%s has no associated LUNs" % self.iscsi.targetIQN)
continue
scsiid_path = "/dev/disk/by-id/scsi-" + self.SCSIid
if not util.wait_for_path(scsiid_path, BaseISCSI.MAX_TIMEOUT):
util.SMlog("%s not found" % scsiid_path)
continue
for file in filter(self.iscsi.match_lun, util.listdir(self.iscsi.path)):
lun_path = os.path.join(self.iscsi.path, file)
lun_dev = scsiutil.getdev(lun_path)
try:
lun_scsiid = scsiutil.getSCSIid(lun_dev)
except:
util.SMlog("getSCSIid failed on %s in iscsi %s: LUN"
" offline or iscsi path down" %
(lun_dev, self.iscsi.path))
continue
util.SMlog("dev from lun %s %s" % (lun_dev, lun_scsiid))
if lun_scsiid == self.SCSIid:
util.SMlog("lun match in %s" % self.iscsi.path)
dev_match = True
# No more need to raise ISCSITarget exception.
# Resetting attempt_discovery
attempt_discovery = False
break
if dev_match:
if iii == 0:
break
util.SMlog("IQN reordering needed")
new_iscsiSRs = []
IQNs = {}
IQNstring = ""
# iscsiSRs can be seen as a circular buffer: the head now is the matching one
for kkk in range(iii, len(self.iscsiSRs)) + range(0, iii):
new_iscsiSRs.append(self.iscsiSRs[kkk])
ipaddr = self.iscsiSRs[kkk].target
port = self.iscsiSRs[kkk].port
iqn = self.iscsiSRs[kkk].targetIQN
key = "%s,%s,%s" % (ipaddr, port, iqn)
# The final string must preserve the order without repetition
if key not in IQNs:
IQNs[key] = ""
IQNstring += "%s|" % key
util.SMlog("IQNstring is now %s" % IQNstring)
self.iscsiSRs = new_iscsiSRs
util.SMlog("iqn %s is leading now" % self.iscsiSRs[0].targetIQN)
# Updating pbd entry, if any
try:
pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref)
if pbd != None and 'multiSession' in self.dconf:
util.SMlog("Updating multiSession in PBD")
dconf = self.session.xenapi.PBD.get_device_config(pbd)
dconf['multiSession'] = IQNstring
self.session.xenapi.PBD.set_device_config(pbd, dconf)
except:
pass
break
if not target_success and attempt_discovery:
raise xs_errors.XenError('ISCSITarget')
# Check for any unneeded open iscsi sessions
if forced_login == True:
try:
iscsilib.ensure_daemon_running_ok(self.iscsi.localIQN)
iscsilib.logout(self.iscsi.target, self.iscsi.targetIQN)
forced_login = False
except:
raise xs_errors.XenError('ISCSILogout')
LVHDSR.LVHDSR.load(self, sr_uuid)
def print_LUNs_XML(self):
dom = xml.dom.minidom.Document()
element = dom.createElement("iscsi-target")
dom.appendChild(element)
for uuid in self.LUNs:
val = self.LUNs[uuid]
entry = dom.createElement('LUN')
element.appendChild(entry)
for attr in ('vendor', 'serial', 'LUNid', \
'size', 'SCSIid'):
try:
aval = getattr(val, attr)
except AttributeError:
continue
if aval:
subentry = dom.createElement(attr)
entry.appendChild(subentry)
textnode = dom.createTextNode(str(aval))
subentry.appendChild(textnode)
print(dom.toprettyxml(), file=sys.stderr)
def _getSCSIid_from_LUN(self, sr_uuid):
was_attached = True
self.iscsi.attach(sr_uuid)
dev = self.dconf['LUNid'].split(',')
if len(dev) > 1:
raise xs_errors.XenError('LVMOneLUN')
path = os.path.join(self.iscsi.path, "LUN%s" % dev[0])
if not util.wait_for_path(path, BaseISCSI.MAX_TIMEOUT):
util.SMlog("Unable to detect LUN attached to host [%s]" % path)
try:
SCSIid = scsiutil.getSCSIid(path)
except:
raise xs_errors.XenError('InvalidDev')
self.iscsi.detach(sr_uuid)
return SCSIid
def _LUNprint(self, sr_uuid):
if self.iscsi.attached:
# Force a rescan on the bus.
self.iscsi.refresh()
# time.sleep(5)
# Now call attach (handles the refcounting + session activa)
self.iscsi.attach(sr_uuid)
util.SMlog("LUNprint: waiting for path: %s" % self.iscsi.path)
if util.wait_for_path("%s/LUN*" % self.iscsi.path, BaseISCSI.MAX_TIMEOUT):
try:
adapter = self.iscsi.adapter[self.iscsi.address]
util.SMlog("adapter=%s" % adapter)
# find a scsi device on which to issue a report luns command:
devs = glob.glob("%s/LUN*" % self.iscsi.path)
sgdevs = []
for i in devs:
sgdevs.append(int(i.split("LUN")[1]))
sgdevs.sort()
sgdev = "%s/LUN%d" % (self.iscsi.path, sgdevs[0])
# issue a report luns:
luns = util.pread2(["/usr/bin/sg_luns", "-q", sgdev]).split('\n')
nluns = len(luns) - 1 # remove the line relating to the final \n
# check if the LUNs are MPP-RDAC Luns
scsi_id = scsiutil.getSCSIid(sgdev)
# make sure we've got that many sg devices present
for i in range(0, 30):
luns = scsiutil._dosgscan()
sgdevs = filter(lambda r: r[1] == adapter, luns)
if len(sgdevs) >= nluns:
util.SMlog("Got all %d sg devices" % nluns)
break
else:
util.SMlog("Got %d sg devices - expecting %d" % (len(sgdevs), nluns))
time.sleep(1)
if os.path.exists("/sbin/udevsettle"):
util.pread2(["/sbin/udevsettle"])
else:
util.pread2(["/sbin/udevadm", "settle"])
except:
util.SMlog("Generic exception caught. Pass")
pass # Make sure we don't break the probe...
self.iscsi.print_LUNs()
self.iscsi.detach(sr_uuid)
def create(self, sr_uuid, size):
# Check SCSIid not already in use by other PBDs
if util.test_SCSIid(self.session, sr_uuid, self.SCSIid):
raise xs_errors.XenError('SRInUse')
self.iscsi.attach(sr_uuid)
try:
if not self.iscsi._attach_LUN_bySCSIid(self.SCSIid):
# UPGRADE FROM GEORGE: take care of ill-formed SCSIid
upgraded = False
matchSCSIid = False
for file in filter(self.iscsi.match_lun, util.listdir(self.iscsi.path)):
path = os.path.join(self.iscsi.path, file)
if not util.wait_for_path(path, BaseISCSI.MAX_TIMEOUT):
util.SMlog("Unable to detect LUN attached to host [%s]" % path)
continue
try:
SCSIid = scsiutil.getSCSIid(path)
except:
continue
try:
matchSCSIid = scsiutil.compareSCSIid_2_6_18(self.SCSIid, path)
except:
continue
if (matchSCSIid):
util.SMlog("Performing upgrade from George")
try:
pbd = util.find_my_pbd(self.session, self.host_ref, self.sr_ref)
device_config = self.session.xenapi.PBD.get_device_config(pbd)
device_config['SCSIid'] = SCSIid
self.session.xenapi.PBD.set_device_config(pbd, device_config)
self.dconf['SCSIid'] = SCSIid
self.SCSIid = self.dconf['SCSIid']
except:
continue
if not self.iscsi._attach_LUN_bySCSIid(self.SCSIid):
raise xs_errors.XenError('InvalidDev')
else:
upgraded = True
break
else:
util.SMlog("Not a matching LUN, skip ... scsi_id is: %s" % SCSIid)
continue
if not upgraded:
raise xs_errors.XenError('InvalidDev')
self._pathrefresh(LVHDoISCSISR)
LVHDSR.LVHDSR.create(self, sr_uuid, size)
except Exception as inst:
self.iscsi.detach(sr_uuid)
raise xs_errors.XenError("SRUnavailable", opterr=inst)
self.iscsi.detach(sr_uuid)
def delete(self, sr_uuid):
self._pathrefresh(LVHDoISCSISR)
LVHDSR.LVHDSR.delete(self, sr_uuid)
for i in self.iscsiSRs:
i.detach(sr_uuid)
def attach(self, sr_uuid):
try:
connected = False
for i in self.iscsiSRs:
try:
i.attach(sr_uuid)
except SR.SROSError as inst:
# Some iscsi objects can fail login but not all. Storing exception
if inst.errno == 141:
util.SMlog("Connection failed for target %s, continuing.." % i.target)
stored_exception = inst
continue
else:
raise
else:
connected = True
if not i._attach_LUN_bySCSIid(self.SCSIid):
raise xs_errors.XenError('InvalidDev')
# Check if at least one iscsi succeeded
if not connected:
raise stored_exception
if 'multiSession' in self.dconf:
# Force a manual bus refresh
for a in self.iscsi.adapter:
scsiutil.rescan([self.iscsi.adapter[a]])
self._pathrefresh(LVHDoISCSISR)
LVHDSR.LVHDSR.attach(self, sr_uuid)
except Exception as inst:
for i in self.iscsiSRs:
i.detach(sr_uuid)
raise xs_errors.XenError("SRUnavailable", opterr=inst)
self._setMultipathableFlag(SCSIid=self.SCSIid)
def detach(self, sr_uuid):
LVHDSR.LVHDSR.detach(self, sr_uuid)
for i in self.iscsiSRs:
i.detach(sr_uuid)
def scan(self, sr_uuid):
self._pathrefresh(LVHDoISCSISR)
if self.mpath == "true":
for i in self.iscsiSRs:
try:
i.attach(sr_uuid)
except SR.SROSError:
util.SMlog("Connection failed for target %s, continuing.." % i.target)
LVHDSR.LVHDSR.scan(self, sr_uuid)
def probe(self):
self.uuid = util.gen_uuid()
# When multipathing is enabled, since we don't refcount the multipath maps,
# we should not attempt to do the iscsi.attach/detach when the map is already present,
# as this will remove it (which may well be in use).
if self.mpath == 'true' and 'SCSIid' in self.dconf:
maps = []
try:
maps = mpath_cli.list_maps()
except:
pass
if self.dconf['SCSIid'] in maps:
raise xs_errors.XenError('SRInUse')
self.iscsi.attach(self.uuid)
if not self.iscsi._attach_LUN_bySCSIid(self.SCSIid):
util.SMlog("Unable to detect LUN")
raise xs_errors.XenError('InvalidDev')
self._pathrefresh(LVHDoISCSISR)
out = LVHDSR.LVHDSR.probe(self)
self.iscsi.detach(self.uuid)
return out
def vdi(self, uuid):
return LVHDoISCSIVDI(self, uuid)
class LVHDoISCSIVDI(LVHDSR.LVHDVDI):
def generate_config(self, sr_uuid, vdi_uuid):
util.SMlog("LVHDoISCSIVDI.generate_config")
if not lvutil._checkLV(self.path):
raise xs_errors.XenError('VDIUnavailable')
dict = {}
self.sr.dconf['localIQN'] = self.sr.iscsi.localIQN
self.sr.dconf['multipathing'] = self.sr.mpath
self.sr.dconf['multipathhandle'] = self.sr.mpathhandle
dict['device_config'] = self.sr.dconf
if 'chappassword_secret' in dict['device_config']:
s = util.get_secret(self.session, dict['device_config']['chappassword_secret'])
del dict['device_config']['chappassword_secret']
dict['device_config']['chappassword'] = s
dict['sr_uuid'] = sr_uuid
dict['vdi_uuid'] = vdi_uuid
dict['command'] = 'vdi_attach_from_config'
# Return the 'config' encoded within a normal XMLRPC response so that
# we can use the regular response/error parsing code.
config = xmlrpclib.dumps(tuple([dict]), "vdi_attach_from_config")
return xmlrpclib.dumps((config, ), "", True)
def attach_from_config(self, sr_uuid, vdi_uuid):
util.SMlog("LVHDoISCSIVDI.attach_from_config")
try:
self.sr.iscsi.attach(sr_uuid)
if not self.sr.iscsi._attach_LUN_bySCSIid(self.sr.SCSIid):
raise xs_errors.XenError('InvalidDev')
return LVHDSR.LVHDVDI.attach(self, sr_uuid, vdi_uuid)
except:
util.logException("LVHDoISCSIVDI.attach_from_config")
raise xs_errors.XenError('SRUnavailable', \
opterr='Unable to attach the heartbeat disk')
if __name__ == '__main__':
SRCommand.run(LVHDoISCSISR, DRIVER_INFO)
else:
SR.registerSR(LVHDoISCSISR)
| lgpl-2.1 |
adaxi/couchpotato | libs/requests/packages/chardet/charsetgroupprober.py | 2929 | 3791 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mActiveNum = 0
self._mProbers = []
self._mBestGuessProber = None
def reset(self):
CharSetProber.reset(self)
self._mActiveNum = 0
for prober in self._mProbers:
if prober:
prober.reset()
prober.active = True
self._mActiveNum += 1
self._mBestGuessProber = None
def get_charset_name(self):
if not self._mBestGuessProber:
self.get_confidence()
if not self._mBestGuessProber:
return None
# self._mBestGuessProber = self._mProbers[0]
return self._mBestGuessProber.get_charset_name()
def feed(self, aBuf):
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
continue
st = prober.feed(aBuf)
if not st:
continue
if st == constants.eFoundIt:
self._mBestGuessProber = prober
return self.get_state()
elif st == constants.eNotMe:
prober.active = False
self._mActiveNum -= 1
if self._mActiveNum <= 0:
self._mState = constants.eNotMe
return self.get_state()
return self.get_state()
def get_confidence(self):
st = self.get_state()
if st == constants.eFoundIt:
return 0.99
elif st == constants.eNotMe:
return 0.01
bestConf = 0.0
self._mBestGuessProber = None
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
if constants._debug:
sys.stderr.write(prober.get_charset_name()
+ ' not active\n')
continue
cf = prober.get_confidence()
if constants._debug:
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(), cf))
if bestConf < cf:
bestConf = cf
self._mBestGuessProber = prober
if not self._mBestGuessProber:
return 0.0
return bestConf
# else:
# self._mBestGuessProber = self._mProbers[0]
# return self._mBestGuessProber.get_confidence()
| gpl-3.0 |
porduna/weblabdeusto | server/src/test/unit/weblab/core/test_data_retriever.py | 3 | 14161 | #!/usr/bin/env python
#-*-*- encoding: utf-8 -*-*-
#
# Copyright (C) 2005 onwards University of Deusto
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# This software consists of contributions made by many individuals,
# listed below:
#
# Author: Pablo Orduña <[email protected]>
#
from __future__ import print_function, unicode_literals
import unittest
import time
import datetime
from voodoo.override import Override
from voodoo.gen import CoordAddress
from weblab.data.experiments import ExperimentId
from weblab.data.command import Command
import weblab.core.data_retriever as TemporalInformationRetriever
import weblab.core.coordinator.store as TemporalInformationStore
from weblab.core.db import DatabaseGateway
import test.unit.configuration as configuration
import voodoo.configuration as ConfigurationManager
RESERVATION1 = 'reservation_id1'
RESERVATION2 = 'reservation_id2'
RESERVATION3 = 'reservation_id3'
RESERVATION4 = 'reservation_id4'
DATA1 = "{'data' : 1 }"
DATA2 = "{'data' : 2 }"
DATA3 = "{'data' : 3 }"
DATA4 = "{'data' : 4 }"
DATA_REQUEST1 = "{'foo' : 1}"
DATA_REQUEST2 = "{'foo' : 1}"
DATA_REQUEST3 = "{'foo' : 1}"
DATA_REQUEST4 = "{'foo' : 1}"
def wait_for(retriever, iterations = 5, max_wait = 10):
initial_time = time.time()
initial_iterations = retriever.iterations
while retriever.iterations - initial_iterations < iterations:
time.sleep(0.01)
if time.time() - initial_time >= max_wait:
raise AssertionError("Maximum time waiting reached")
def coord_addr(coord_addr_str):
return CoordAddress.translate( coord_addr_str )
class TemporalInformationRetrieverTestCase(unittest.TestCase):
def setUp(self):
cfg_manager = ConfigurationManager.ConfigurationManager()
cfg_manager.append_module(configuration)
self.dbmanager = DatabaseGateway(cfg_manager)
self.dbmanager._delete_all_uses()
session = self.dbmanager.Session()
try:
student1 = self.dbmanager._get_user(session, 'student1')
finally:
session.close()
self.initial_store = TemporalInformationStore.InitialTemporalInformationStore()
self.finished_store = TemporalInformationStore.FinishTemporalInformationStore()
self.commands_store = TemporalInformationStore.CommandsTemporalInformationStore()
self.completed_store = TemporalInformationStore.CompletedInformationStore()
self.retriever = TemporalInformationRetriever.TemporalInformationRetriever(cfg_manager, self.initial_store, self.finished_store, self.commands_store, self.completed_store, self.dbmanager)
self.retriever.timeout = 0.001 # Be quicker instead of waiting for half a second
self.initial_time = self.end_time = datetime.datetime.now()
self.initial_timestamp = self.end_timestamp = time.time()
request_info = {'username':'student1','role':'student','permission_scope' : 'user', 'permission_id' : student1.id}
exp_id = ExperimentId('ud-dummy','Dummy Experiments')
self.entry1 = TemporalInformationStore.InitialInformationEntry(
RESERVATION1, exp_id, coord_addr('ser:inst@mach'),
DATA1, self.initial_time, self.end_time, request_info.copy(), DATA_REQUEST1)
self.entry2 = TemporalInformationStore.InitialInformationEntry(
RESERVATION2, exp_id, coord_addr('ser:inst@mach'),
DATA2, self.initial_time, self.end_time, request_info.copy(), DATA_REQUEST2)
self.entry3 = TemporalInformationStore.InitialInformationEntry(
RESERVATION3, exp_id, coord_addr('ser:inst@mach'),
DATA3, self.initial_time, self.end_time, request_info.copy(), DATA_REQUEST3)
self.entry4 = TemporalInformationStore.InitialInformationEntry(
RESERVATION4, exp_id, coord_addr('ser:inst@mach'),
DATA4, self.initial_time, self.end_time, request_info.copy(), DATA_REQUEST3)
def test_initial_finish(self):
self.retriever.start()
try:
usages = self.dbmanager.list_usages_per_user('student1')
self.assertEquals(0, len(usages))
self.initial_store.put(self.entry1)
self.initial_store.put(self.entry2)
self.initial_store.put(self.entry3)
self.finished_store.put(RESERVATION4, DATA4, self.initial_time, self.end_time)
# Wait and then populate the RESERVATION3 (the last one in the queue)
wait_for(self.retriever)
usages = self.dbmanager.list_usages_per_user('student1')
# There are 3, and RESERVATION4 is waiting
self.assertEquals(3, len(usages))
# Check that it has been stored
full_usage1 = self.dbmanager.retrieve_usage(usages[0].experiment_use_id)
self.assertEquals("@@@initial::request@@@", full_usage1.commands[-2].command.commandstring)
self.assertEquals(DATA_REQUEST1, full_usage1.commands[-2].response.commandstring)
self.assertEquals("@@@initial::response@@@", full_usage1.commands[-1].command.commandstring)
self.assertEquals(DATA1, full_usage1.commands[-1].response.commandstring)
self.assertEquals(None, full_usage1.end_date)
full_usage2 = self.dbmanager.retrieve_usage(usages[1].experiment_use_id)
self.assertEquals(DATA2, full_usage2.commands[-1].response.commandstring)
self.assertEquals(None, full_usage2.end_date)
full_usage3 = self.dbmanager.retrieve_usage(usages[2].experiment_use_id)
self.assertEquals(DATA3, full_usage3.commands[-1].response.commandstring)
self.assertEquals(None, full_usage3.end_date)
wait_for(self.retriever)
self.initial_store.put(self.entry4)
wait_for(self.retriever)
usages = self.dbmanager.list_usages_per_user('student1')
# RESERVATION4 achieved
self.assertEquals(4, len(usages))
# And end_date is filled for 4
full_usage4 = self.dbmanager.retrieve_usage(usages[3].experiment_use_id)
self.assertEquals(DATA4, full_usage4.commands[-1].response.commandstring)
self.assertNotEqual(None, full_usage4.end_date)
# While in the rest it's not yet filled
full_usage1 = self.dbmanager.retrieve_usage(usages[0].experiment_use_id)
self.assertEquals(DATA1, full_usage1.commands[-1].response.commandstring)
self.assertEquals(None, full_usage1.end_date)
full_usage2 = self.dbmanager.retrieve_usage(usages[1].experiment_use_id)
self.assertEquals(DATA2, full_usage2.commands[-1].response.commandstring)
self.assertEquals(None, full_usage2.end_date)
full_usage3 = self.dbmanager.retrieve_usage(usages[2].experiment_use_id)
self.assertEquals(DATA3, full_usage3.commands[-1].response.commandstring)
self.assertEquals(None, full_usage3.end_date)
# But if we add to the finish store, and we wait:
self.finished_store.put(RESERVATION1, DATA1, self.initial_time, self.end_time)
wait_for(self.retriever)
# Then it is filled
full_usage1 = self.dbmanager.retrieve_usage(usages[0].experiment_use_id)
self.assertEquals(DATA1, full_usage1.commands[-1].response.commandstring)
self.assertNotEqual(None, full_usage1.end_date)
finally:
self.retriever.stop()
self.retriever.join(1)
self.assertFalse(self.retriever.isAlive())
def test_commands(self):
self.retriever.start()
try:
usages = self.dbmanager.list_usages_per_user('student1')
self.assertEquals(0, len(usages))
self.initial_store.put(self.entry1)
wait_for(self.retriever)
usages = self.dbmanager.list_usages_per_user('student1')
self.assertEquals(1, len(usages))
entry_id1 = 58131
entry_id2 = 14214
entry_id3 = 84123
pre_command1 = TemporalInformationStore.CommandOrFileInformationEntry(RESERVATION1, True, True, entry_id1, Command(DATA_REQUEST1), self.initial_timestamp)
post_command1 = TemporalInformationStore.CommandOrFileInformationEntry(RESERVATION1, False, True, entry_id1, Command(DATA1), self.initial_timestamp)
pre_command2 = TemporalInformationStore.CommandOrFileInformationEntry(RESERVATION2, True, True, entry_id2, Command(DATA_REQUEST2), self.initial_timestamp)
post_command2 = TemporalInformationStore.CommandOrFileInformationEntry(RESERVATION2, False, True, entry_id2, Command(DATA2), self.initial_timestamp)
pre_command3 = TemporalInformationStore.CommandOrFileInformationEntry(RESERVATION3, True, True, entry_id3, Command(DATA_REQUEST3), self.initial_timestamp)
post_command3 = TemporalInformationStore.CommandOrFileInformationEntry(RESERVATION3, False, True, entry_id3, Command(DATA3), self.initial_timestamp)
# The reservation is stored, therefore this command will
# also be stored
self.commands_store.put(pre_command1)
# This reservation has not been stored, therefore this command
# will not be stored yet
self.commands_store.put(pre_command2)
# Neither this reservation or the pre_command3 have been stored,
# therefore this command will not be stored
self.commands_store.put(post_command3)
wait_for(self.retriever)
usages = self.dbmanager.list_usages_per_user('student1')
self.assertEquals(1, len(usages))
full_usage1 = self.dbmanager.retrieve_usage(usages[0].experiment_use_id)
self.assertEquals(DATA_REQUEST1, full_usage1.commands[-1].command.commandstring)
self.assertEquals(None, full_usage1.commands[-1].response.commandstring)
# So we add the post_command1, to avoid the "None"
self.commands_store.put(post_command1)
# And the pre_command3, to see if it is correctly enqueued
self.commands_store.put(pre_command3)
# And the entry 2, to let pre_command2 enter
self.initial_store.put(self.entry2)
wait_for(self.retriever)
usages = self.dbmanager.list_usages_per_user('student1')
self.assertEquals(2, len(usages))
full_usage1 = self.dbmanager.retrieve_usage(usages[0].experiment_use_id)
self.assertEquals(DATA_REQUEST1, full_usage1.commands[-1].command.commandstring)
self.assertEquals(DATA1, full_usage1.commands[-1].response.commandstring)
full_usage2 = self.dbmanager.retrieve_usage(usages[1].experiment_use_id)
self.assertEquals(DATA_REQUEST2, full_usage2.commands[-1].command.commandstring)
self.assertEquals(None, full_usage2.commands[-1].response.commandstring)
# So now we add the rest
self.commands_store.put(post_command2)
self.initial_store.put(self.entry3)
wait_for(self.retriever)
usages = self.dbmanager.list_usages_per_user('student1')
self.assertEquals(3, len(usages))
full_usage1 = self.dbmanager.retrieve_usage(usages[0].experiment_use_id)
self.assertEquals(DATA_REQUEST1, full_usage1.commands[-1].command.commandstring)
self.assertEquals(DATA1, full_usage1.commands[-1].response.commandstring)
full_usage2 = self.dbmanager.retrieve_usage(usages[1].experiment_use_id)
self.assertEquals(DATA_REQUEST2, full_usage2.commands[-1].command.commandstring)
self.assertEquals(DATA2, full_usage2.commands[-1].response.commandstring)
full_usage3 = self.dbmanager.retrieve_usage(usages[2].experiment_use_id)
self.assertEquals(DATA_REQUEST3, full_usage3.commands[-1].command.commandstring)
self.assertEquals(DATA3, full_usage3.commands[-1].response.commandstring)
finally:
self.retriever.stop()
self.retriever.join(1)
self.assertFalse(self.retriever.isAlive())
class FakeTemporalInformationRetriever(TemporalInformationRetriever.TemporalInformationRetriever):
PRINT_ERRORS = False
@Override(TemporalInformationRetriever.TemporalInformationRetriever)
def iterate(self):
failures = getattr(self, 'failures', 0)
self.failures = failures + 1
return 10 / 0 # cause an error
class IterationFailerTemporalInformationRetrieverTestCase(unittest.TestCase):
def test_fail(self):
initial_store = TemporalInformationStore.InitialTemporalInformationStore()
finished_store = TemporalInformationStore.FinishTemporalInformationStore()
commands_store = TemporalInformationStore.CommandsTemporalInformationStore()
completed_store = TemporalInformationStore.CompletedInformationStore()
cfg_manager = ConfigurationManager.ConfigurationManager()
cfg_manager.append_module(configuration)
fake = FakeTemporalInformationRetriever(cfg_manager, initial_store, finished_store, commands_store, completed_store, None)
fake.start()
try:
initial_time = time.time()
while not hasattr( fake, 'failures') or fake.failures < 1:
time.sleep(0.01)
if time.time() - initial_time > 5:
raise AssertionError("Too long time passed waiting for failures to increase")
finally:
fake.stop()
fake.join(1)
self.assertFalse( fake.isAlive() )
def suite():
return unittest.TestSuite((
unittest.makeSuite(TemporalInformationRetrieverTestCase),
unittest.makeSuite(IterationFailerTemporalInformationRetrieverTestCase),
))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
switchboardOp/ansible | lib/ansible/modules/network/avi/avi_cloud.py | 46 | 9205 | #!/usr/bin/python
#
# Created on Aug 25, 2016
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloud
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of Cloud Avi RESTful Object
description:
- This module is used to configure Cloud object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent","present"]
apic_configuration:
description:
- Apicconfiguration settings for cloud.
apic_mode:
description:
- Boolean flag to set apic_mode.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
aws_configuration:
description:
- Awsconfiguration settings for cloud.
cloudstack_configuration:
description:
- Cloudstackconfiguration settings for cloud.
dhcp_enabled:
description:
- Select the ip address management scheme.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
dns_provider_ref:
description:
- Dns profile for the cloud.
- It is a reference to an object of type ipamdnsproviderprofile.
docker_configuration:
description:
- Dockerconfiguration settings for cloud.
east_west_dns_provider_ref:
description:
- Dns profile for east-west services.
- It is a reference to an object of type ipamdnsproviderprofile.
east_west_ipam_provider_ref:
description:
- Ipam profile for east-west services.
- Warning - please use virtual subnets in this ipam profile that do not conflict with the underlay networks or any overlay networks in the cluster.
- For example in aws and gcp, 169.254.0.0/16 is used for storing instance metadata.
- Hence, it should not be used in this profile.
- It is a reference to an object of type ipamdnsproviderprofile.
enable_vip_static_routes:
description:
- Use static routes for vip side network resolution during virtualservice placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
ipam_provider_ref:
description:
- Ipam profile for the cloud.
- It is a reference to an object of type ipamdnsproviderprofile.
license_type:
description:
- If no license type is specified then default license enforcement for the cloud type is chosen.
- The default mappings are container cloud is max ses, openstack and vmware is cores and linux it is sockets.
- Enum options - LIC_BACKEND_SERVERS, LIC_SOCKETS, LIC_CORES, LIC_HOSTS.
linuxserver_configuration:
description:
- Linuxserverconfiguration settings for cloud.
mesos_configuration:
description:
- Mesosconfiguration settings for cloud.
mtu:
description:
- Mtu setting for the cloud.
- Default value when not specified in API or module is interpreted by Avi Controller as 1500.
name:
description:
- Name of the object.
required: true
nsx_configuration:
description:
- Configuration parameters for nsx manager.
- Field introduced in 17.1.1.
obj_name_prefix:
description:
- Default prefix for all automatically created objects in this cloud.
- This prefix can be overridden by the se-group template.
openstack_configuration:
description:
- Openstackconfiguration settings for cloud.
oshiftk8s_configuration:
description:
- Oshiftk8sconfiguration settings for cloud.
prefer_static_routes:
description:
- Prefer static routes over interface routes during virtualservice placement.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
proxy_configuration:
description:
- Proxyconfiguration settings for cloud.
rancher_configuration:
description:
- Rancherconfiguration settings for cloud.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
vca_configuration:
description:
- Vcloudairconfiguration settings for cloud.
vcenter_configuration:
description:
- Vcenterconfiguration settings for cloud.
vtype:
description:
- Cloud type.
- Enum options - CLOUD_NONE, CLOUD_VCENTER, CLOUD_OPENSTACK, CLOUD_AWS, CLOUD_VCA, CLOUD_APIC, CLOUD_MESOS, CLOUD_LINUXSERVER, CLOUD_DOCKER_UCP,
- CLOUD_RANCHER, CLOUD_OSHIFT_K8S.
- Default value when not specified in API or module is interpreted by Avi Controller as CLOUD_NONE.
required: true
extends_documentation_fragment:
- avi
'''
EXAMPLES = '''
- name: Create a VMware cloud with write access mode
avi_cloud:
username: ''
controller: ''
password: ''
apic_mode: false
dhcp_enabled: true
enable_vip_static_routes: false
license_type: LIC_CORES
mtu: 1500
name: VCenter Cloud
prefer_static_routes: false
tenant_ref: admin
vcenter_configuration:
datacenter_ref: /api/vimgrdcruntime/datacenter-2-10.10.20.100
management_network: /api/vimgrnwruntime/dvportgroup-103-10.10.20.100
password: password
privilege: WRITE_ACCESS
username: user
vcenter_url: 10.10.20.100
vtype: CLOUD_VCENTER
'''
RETURN = '''
obj:
description: Cloud (api/cloud) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
apic_configuration=dict(type='dict',),
apic_mode=dict(type='bool',),
aws_configuration=dict(type='dict',),
cloudstack_configuration=dict(type='dict',),
dhcp_enabled=dict(type='bool',),
dns_provider_ref=dict(type='str',),
docker_configuration=dict(type='dict',),
east_west_dns_provider_ref=dict(type='str',),
east_west_ipam_provider_ref=dict(type='str',),
enable_vip_static_routes=dict(type='bool',),
ipam_provider_ref=dict(type='str',),
license_type=dict(type='str',),
linuxserver_configuration=dict(type='dict',),
mesos_configuration=dict(type='dict',),
mtu=dict(type='int',),
name=dict(type='str', required=True),
nsx_configuration=dict(type='dict',),
obj_name_prefix=dict(type='str',),
openstack_configuration=dict(type='dict',),
oshiftk8s_configuration=dict(type='dict',),
prefer_static_routes=dict(type='bool',),
proxy_configuration=dict(type='dict',),
rancher_configuration=dict(type='dict',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
vca_configuration=dict(type='dict',),
vcenter_configuration=dict(type='dict',),
vtype=dict(type='str', required=True),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cloud',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
aliyun/aliyun-oss-python-sdk | tests/test_bucket_worm.py | 1 | 2199 | from .common import *
class TestBucketWorm(OssTestCase):
def test_bucke_worm_normal(self):
init_result = self.bucket.init_bucket_worm(1)
worm_id = init_result.worm_id
self.assertIsNotNone(init_result.request_id)
get_result = self.bucket.get_bucket_worm()
self.assertIsNotNone(get_result.request_id)
self.assertEqual(worm_id, get_result.worm_id)
self.assertEqual('InProgress', get_result.state)
self.assertEqual(1, get_result.retention_period_days)
self.assertIsNotNone(get_result.creation_date)
complete_reuslt = self.bucket.complete_bucket_worm(worm_id)
self.assertIsNotNone(complete_reuslt.request_id)
get_result = self.bucket.get_bucket_worm()
self.assertEqual(worm_id, get_result.worm_id)
self.assertEqual('Locked', get_result.state)
self.assertEqual(1, get_result.retention_period_days)
self.assertIsNotNone(get_result.creation_date)
extend_result = self.bucket.extend_bucket_worm(worm_id, 2)
self.assertIsNotNone(extend_result.request_id)
get_result = self.bucket.get_bucket_worm()
self.assertEqual(worm_id, get_result.worm_id)
self.assertEqual('Locked', get_result.state)
self.assertEqual(2, get_result.retention_period_days)
self.assertIsNotNone(get_result.creation_date)
def test_abort_bucket_worm(self):
self.bucket.init_bucket_worm(1)
abort_result = self.bucket.abort_bucket_worm()
self.assertIsNotNone(abort_result.request_id)
init_result = self.bucket.init_bucket_worm(1)
worm_id = init_result.worm_id
self.bucket.complete_bucket_worm(worm_id)
self.assertRaises(oss2.exceptions.WORMConfigurationLocked, self.bucket.abort_bucket_worm)
def test_bucket_worm_illegal(self):
self.assertRaises(oss2.exceptions.NoSuchWORMConfiguration, self.bucket.get_bucket_worm)
init_result = self.bucket.init_bucket_worm(1)
worm_id = init_result.worm_id
self.assertRaises(oss2.exceptions.InvalidWORMConfiguration, self.bucket.extend_bucket_worm, worm_id, 2)
if __name__ == '__main__':
unittest.main()
| mit |
9seconds/streams | streams/executors/executors.py | 1 | 1711 | # -*- coding: utf-8 -*-
"""
This module has implementation of executors wrapped by
:py:class:`streams.executors.mixins.PoolOfPoolsMixin` and applicable to work
with :py:class:`streams.poolofpools.PoolOfPools`.
Basically all of them are thin extensions of classes from
:py:mod:`concurrent.futures`.
"""
###############################################################################
from concurrent.futures import Executor, Future, \
ThreadPoolExecutor as BaseThreadPoolExecutor, \
ProcessPoolExecutor as BaseProcessPoolExecutor
from .mixins import PoolOfPoolsMixin
###############################################################################
class SequentalExecutor(PoolOfPoolsMixin, Executor):
"""
Debug executor. No concurrency, it just yields elements one by one.
"""
# noinspection PyUnusedLocal
def __init__(self, *args, **kwargs):
super(SequentalExecutor, self).__init__()
self._max_workers = 1
def submit(self, fn, *args, **kwargs):
future = Future()
try:
result = fn(*args, **kwargs)
except Exception as exc:
future.set_exception(exc)
else:
future.set_result(result)
return future
class ThreadPoolExecutor(PoolOfPoolsMixin, BaseThreadPoolExecutor):
"""
Implementation of :py:class:`concurrent.futures.ThreadPoolExecutor`
applicable to work with :py:class:`streams.poolofpools.PoolOfPools`.
"""
pass
class ProcessPoolExecutor(PoolOfPoolsMixin, BaseProcessPoolExecutor):
"""
Implementation of :py:class:`concurrent.futures.ProcessPoolExecutor`
applicable to work with :py:class:`streams.poolofpools.PoolOfPools`.
"""
pass
| mit |
docmeth02/CouchPotatoServer | libs/rtorrent/lib/xmlrpc/basic_auth.py | 83 | 3291 | #
# Copyright (c) 2013 Dean Gardiner, <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from base64 import b64encode
import httplib
import xmlrpclib
class BasicAuthTransport(xmlrpclib.Transport):
def __init__(self, secure=False, username=None, password=None):
xmlrpclib.Transport.__init__(self)
self.secure = secure
self.username = username
self.password = password
def send_auth(self, h):
if not self.username or not self.password:
return
auth = b64encode("%s:%s" % (self.username, self.password))
h.putheader('Authorization', "Basic %s" % auth)
def make_connection(self, host):
if self._connection and host == self._connection[0]:
return self._connection[1]
chost, self._extra_headers, x509 = self.get_host_info(host)
if self.secure:
try:
self._connection = host, httplib.HTTPSConnection(chost, None, **(x509 or {}))
except AttributeError:
raise NotImplementedError(
"your version of httplib doesn't support HTTPS"
)
else:
self._connection = host, httplib.HTTPConnection(chost)
return self._connection[1]
def single_request(self, host, handler, request_body, verbose=0):
# issue XML-RPC request
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
try:
self.send_request(h, handler, request_body)
self.send_host(h, host)
self.send_user_agent(h)
self.send_auth(h)
self.send_content(h, request_body)
response = h.getresponse(buffering=True)
if response.status == 200:
self.verbose = verbose
return self.parse_response(response)
except xmlrpclib.Fault:
raise
except Exception:
self.close()
raise
#discard any response data and raise exception
if response.getheader("content-length", 0):
response.read()
raise xmlrpclib.ProtocolError(
host + handler,
response.status, response.reason,
response.msg,
)
| gpl-3.0 |
mhugent/Quantum-GIS | python/plugins/processing/algs/qgis/Polygonize.py | 2 | 4932 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Polygonize.py
---------------------
Date : March 2013
Copyright : (C) 2013 by Piotr Pociask
Email : ppociask at o2 dot pl
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Piotr Pociask'
__date__ = 'March 2013'
__copyright__ = '(C) 2013, Piotr Pociask'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from PyQt4.QtCore import QVariant
from qgis.core import *
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import \
GeoAlgorithmExecutionException
from processing.parameters.ParameterVector import ParameterVector
from processing.parameters.ParameterBoolean import ParameterBoolean
from processing.outputs.OutputVector import OutputVector
from processing.tools import dataobjects, vector
class Polygonize(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
FIELDS = 'FIELDS'
GEOMETRY = 'GEOMETRY'
def processAlgorithm(self, progress):
try:
from shapely.ops import polygonize
from shapely.geometry import Point, MultiLineString
except ImportError:
raise GeoAlgorithmExecutionException(
'Polygonize algorithm requires shapely module!')
vlayer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
output = self.getOutputFromName(self.OUTPUT)
vprovider = vlayer.dataProvider()
if self.getParameterValue(self.FIELDS):
fields = vprovider.fields()
else:
fields = QgsFields()
if self.getParameterValue(self.GEOMETRY):
fieldsCount = fields.count()
fields.append(QgsField('area', QVariant.Double, 'double', 16, 2))
fields.append(QgsField('perimeter', QVariant.Double,
'double', 16, 2))
allLinesList = []
features = vector.features(vlayer)
current = 0
progress.setInfo('Processing lines...')
total = 40.0 / float(len(features))
for inFeat in features:
inGeom = inFeat.geometry()
if inGeom.isMultipart():
allLinesList.extend(inGeom.asMultiPolyline())
else:
allLinesList.append(inGeom.asPolyline())
current += 1
progress.setPercentage(int(current * total))
progress.setPercentage(40)
allLines = MultiLineString(allLinesList)
progress.setInfo('Noding lines...')
try:
from shapely.ops import unary_union
allLines = unary_union(allLines)
except ImportError:
allLines = allLines.union(Point(0, 0))
progress.setPercentage(45)
progress.setInfo('Polygonizing...')
polygons = list(polygonize([allLines]))
if not polygons:
raise GeoAlgorithmExecutionException('No polygons were created!')
progress.setPercentage(50)
progress.setInfo('Saving polygons...')
writer = output.getVectorWriter(fields, QGis.WKBPolygon, vlayer.crs())
outFeat = QgsFeature()
current = 0
total = 50.0 / float(len(polygons))
for polygon in polygons:
outFeat.setGeometry(QgsGeometry.fromWkt(polygon.wkt))
if self.getParameterValue(self.GEOMETRY):
outFeat.setAttributes([None] * fieldsCount + [polygon.area,
polygon.length])
writer.addFeature(outFeat)
current += 1
progress.setPercentage(50 + int(current * total))
progress.setInfo('Finished')
del writer
def defineCharacteristics(self):
self.name = 'Polygonize'
self.group = 'Vector geometry tools'
self.addParameter(ParameterVector(self.INPUT, 'Input layer',
[ParameterVector.VECTOR_TYPE_LINE]))
self.addParameter(ParameterBoolean(self.FIELDS,
'Keep table structure of line layer', False))
self.addParameter(ParameterBoolean(self.GEOMETRY,
'Create geometry columns', True))
self.addOutput(OutputVector(self.OUTPUT, 'Output layer'))
| gpl-2.0 |
ramanajee/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/profiler_unittest.py | 124 | 5111 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.common.system.platforminfo_mock import MockPlatformInfo
from webkitpy.common.system.systemhost_mock import MockSystemHost
from .profiler import ProfilerFactory, GooglePProf
class ProfilerFactoryTest(unittest.TestCase):
def _assert_default_profiler_name(self, os_name, expected_profiler_name):
profiler_name = ProfilerFactory.default_profiler_name(MockPlatformInfo(os_name))
self.assertEqual(profiler_name, expected_profiler_name)
def test_default_profilers(self):
self._assert_default_profiler_name('mac', 'iprofiler')
self._assert_default_profiler_name('linux', 'perf')
self._assert_default_profiler_name('win32', None)
self._assert_default_profiler_name('freebsd', None)
def test_default_profiler_output(self):
host = MockSystemHost()
self.assertFalse(host.filesystem.exists("/tmp/output"))
# Default mocks are Mac, so iprofile should be default.
profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output')
self.assertTrue(host.filesystem.exists("/tmp/output"))
self.assertEqual(profiler._output_path, "/tmp/output/test.dtps")
# Linux defaults to perf.
host.platform.os_name = 'linux'
profiler = ProfilerFactory.create_profiler(host, '/bin/executable', '/tmp/output')
self.assertEqual(profiler._output_path, "/tmp/output/test.data")
class GooglePProfTest(unittest.TestCase):
def test_pprof_output_regexp(self):
pprof_output = """
sometimes
there
is
junk before the total line
Total: 3770 samples
76 2.0% 2.0% 104 2.8% lookup (inline)
60 1.6% 3.6% 60 1.6% FL_SetPrevious (inline)
56 1.5% 5.1% 56 1.5% MaskPtr (inline)
51 1.4% 6.4% 222 5.9% WebCore::HTMLTokenizer::nextToken
42 1.1% 7.6% 47 1.2% WTF::Vector::shrinkCapacity
35 0.9% 8.5% 35 0.9% WTF::RefPtr::get (inline)
33 0.9% 9.4% 43 1.1% append (inline)
29 0.8% 10.1% 67 1.8% WTF::StringImpl::deref (inline)
29 0.8% 10.9% 100 2.7% add (inline)
28 0.7% 11.6% 28 0.7% WebCore::QualifiedName::localName (inline)
25 0.7% 12.3% 27 0.7% WebCore::Private::addChildNodesToDeletionQueue
24 0.6% 12.9% 24 0.6% __memcpy_ssse3_back
23 0.6% 13.6% 23 0.6% intHash (inline)
23 0.6% 14.2% 76 2.0% tcmalloc::FL_Next
23 0.6% 14.8% 95 2.5% tcmalloc::FL_Push
22 0.6% 15.4% 22 0.6% WebCore::MarkupTokenizerBase::InputStreamPreprocessor::peek (inline)
"""
expected_first_ten_lines = """ 76 2.0% 2.0% 104 2.8% lookup (inline)
60 1.6% 3.6% 60 1.6% FL_SetPrevious (inline)
56 1.5% 5.1% 56 1.5% MaskPtr (inline)
51 1.4% 6.4% 222 5.9% WebCore::HTMLTokenizer::nextToken
42 1.1% 7.6% 47 1.2% WTF::Vector::shrinkCapacity
35 0.9% 8.5% 35 0.9% WTF::RefPtr::get (inline)
33 0.9% 9.4% 43 1.1% append (inline)
29 0.8% 10.1% 67 1.8% WTF::StringImpl::deref (inline)
29 0.8% 10.9% 100 2.7% add (inline)
28 0.7% 11.6% 28 0.7% WebCore::QualifiedName::localName (inline)
"""
host = MockSystemHost()
profiler = GooglePProf(host, '/bin/executable', '/tmp/output')
self.assertEqual(profiler._first_ten_lines_of_profile(pprof_output), expected_first_ten_lines)
| bsd-3-clause |
wiltonlazary/arangodb | 3rdParty/V8/V8-5.0.71.39/tools/swarming_client/swarming.py | 4 | 49821 | #!/usr/bin/env python
# Copyright 2013 The Swarming Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0 that
# can be found in the LICENSE file.
"""Client tool to trigger tasks or retrieve results from a Swarming server."""
__version__ = '0.8.4'
import collections
import datetime
import json
import logging
import optparse
import os
import subprocess
import sys
import tempfile
import threading
import time
import urllib
from third_party import colorama
from third_party.depot_tools import fix_encoding
from third_party.depot_tools import subcommand
from utils import file_path
from utils import fs
from utils import logging_utils
from third_party.chromium import natsort
from utils import net
from utils import on_error
from utils import threading_utils
from utils import tools
import auth
import isolated_format
import isolateserver
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
class Failure(Exception):
"""Generic failure."""
pass
### Isolated file handling.
def isolated_to_hash(arg, algo):
"""Archives a .isolated file if needed.
Returns the file hash to trigger and a bool specifying if it was a file (True)
or a hash (False).
"""
if arg.endswith('.isolated'):
arg = unicode(os.path.abspath(arg))
file_hash = isolated_format.hash_file(arg, algo)
if not file_hash:
on_error.report('Archival failure %s' % arg)
return None, True
return file_hash, True
elif isolated_format.is_valid_hash(arg, algo):
return arg, False
else:
on_error.report('Invalid hash %s' % arg)
return None, False
def isolated_handle_options(options, args):
"""Handles '--isolated <isolated>', '<isolated>' and '-- <args...>' arguments.
Returns:
tuple(command, inputs_ref).
"""
isolated_cmd_args = []
is_file = False
if not options.isolated:
if '--' in args:
index = args.index('--')
isolated_cmd_args = args[index+1:]
args = args[:index]
else:
# optparse eats '--' sometimes.
isolated_cmd_args = args[1:]
args = args[:1]
if len(args) != 1:
raise ValueError(
'Use --isolated, --raw-cmd or \'--\' to pass arguments to the called '
'process.')
# Old code. To be removed eventually.
options.isolated, is_file = isolated_to_hash(
args[0], isolated_format.get_hash_algo(options.namespace))
if not options.isolated:
raise ValueError('Invalid argument %s' % args[0])
elif args:
if '--' in args:
index = args.index('--')
isolated_cmd_args = args[index+1:]
if index != 0:
raise ValueError('Unexpected arguments.')
else:
# optparse eats '--' sometimes.
isolated_cmd_args = args
# If a file name was passed, use its base name of the isolated hash.
# Otherwise, use user name as an approximation of a task name.
if not options.task_name:
if is_file:
key = os.path.splitext(os.path.basename(args[0]))[0]
else:
key = options.user
options.task_name = u'%s/%s/%s' % (
key,
'_'.join(
'%s=%s' % (k, v)
for k, v in sorted(options.dimensions.iteritems())),
options.isolated)
inputs_ref = FilesRef(
isolated=options.isolated,
isolatedserver=options.isolate_server,
namespace=options.namespace)
return isolated_cmd_args, inputs_ref
### Triggering.
# See ../appengine/swarming/swarming_rpcs.py.
FilesRef = collections.namedtuple(
'FilesRef',
[
'isolated',
'isolatedserver',
'namespace',
])
# See ../appengine/swarming/swarming_rpcs.py.
TaskProperties = collections.namedtuple(
'TaskProperties',
[
'command',
'dimensions',
'env',
'execution_timeout_secs',
'extra_args',
'grace_period_secs',
'idempotent',
'inputs_ref',
'io_timeout_secs',
])
# See ../appengine/swarming/swarming_rpcs.py.
NewTaskRequest = collections.namedtuple(
'NewTaskRequest',
[
'expiration_secs',
'name',
'parent_task_id',
'priority',
'properties',
'tags',
'user',
])
def namedtuple_to_dict(value):
"""Recursively converts a namedtuple to a dict."""
out = dict(value._asdict())
for k, v in out.iteritems():
if hasattr(v, '_asdict'):
out[k] = namedtuple_to_dict(v)
return out
def task_request_to_raw_request(task_request):
"""Returns the json dict expected by the Swarming server for new request.
This is for the v1 client Swarming API.
"""
out = namedtuple_to_dict(task_request)
# Maps are not supported until protobuf v3.
out['properties']['dimensions'] = [
{'key': k, 'value': v}
for k, v in out['properties']['dimensions'].iteritems()
]
out['properties']['dimensions'].sort(key=lambda x: x['key'])
out['properties']['env'] = [
{'key': k, 'value': v}
for k, v in out['properties']['env'].iteritems()
]
out['properties']['env'].sort(key=lambda x: x['key'])
return out
def swarming_trigger(swarming, raw_request):
"""Triggers a request on the Swarming server and returns the json data.
It's the low-level function.
Returns:
{
'request': {
'created_ts': u'2010-01-02 03:04:05',
'name': ..
},
'task_id': '12300',
}
"""
logging.info('Triggering: %s', raw_request['name'])
result = net.url_read_json(
swarming + '/_ah/api/swarming/v1/tasks/new', data=raw_request)
if not result:
on_error.report('Failed to trigger task %s' % raw_request['name'])
return None
if result.get('error'):
# The reply is an error.
msg = 'Failed to trigger task %s' % raw_request['name']
if result['error'].get('errors'):
for err in result['error']['errors']:
if err.get('message'):
msg += '\nMessage: %s' % err['message']
if err.get('debugInfo'):
msg += '\nDebug info:\n%s' % err['debugInfo']
elif result['error'].get('message'):
msg += '\nMessage: %s' % result['error']['message']
on_error.report(msg)
return None
return result
def setup_googletest(env, shards, index):
"""Sets googletest specific environment variables."""
if shards > 1:
assert not any(i['key'] == 'GTEST_SHARD_INDEX' for i in env), env
assert not any(i['key'] == 'GTEST_TOTAL_SHARDS' for i in env), env
env = env[:]
env.append({'key': 'GTEST_SHARD_INDEX', 'value': str(index)})
env.append({'key': 'GTEST_TOTAL_SHARDS', 'value': str(shards)})
return env
def trigger_task_shards(swarming, task_request, shards):
"""Triggers one or many subtasks of a sharded task.
Returns:
Dict with task details, returned to caller as part of --dump-json output.
None in case of failure.
"""
def convert(index):
req = task_request_to_raw_request(task_request)
if shards > 1:
req['properties']['env'] = setup_googletest(
req['properties']['env'], shards, index)
req['name'] += ':%s:%s' % (index, shards)
return req
requests = [convert(index) for index in xrange(shards)]
tasks = {}
priority_warning = False
for index, request in enumerate(requests):
task = swarming_trigger(swarming, request)
if not task:
break
logging.info('Request result: %s', task)
if (not priority_warning and
task['request']['priority'] != task_request.priority):
priority_warning = True
print >> sys.stderr, (
'Priority was reset to %s' % task['request']['priority'])
tasks[request['name']] = {
'shard_index': index,
'task_id': task['task_id'],
'view_url': '%s/user/task/%s' % (swarming, task['task_id']),
}
# Some shards weren't triggered. Abort everything.
if len(tasks) != len(requests):
if tasks:
print >> sys.stderr, 'Only %d shard(s) out of %d were triggered' % (
len(tasks), len(requests))
for task_dict in tasks.itervalues():
abort_task(swarming, task_dict['task_id'])
return None
return tasks
### Collection.
# How often to print status updates to stdout in 'collect'.
STATUS_UPDATE_INTERVAL = 15 * 60.
class State(object):
"""States in which a task can be.
WARNING: Copy-pasted from appengine/swarming/server/task_result.py. These
values are part of the API so if they change, the API changed.
It's in fact an enum. Values should be in decreasing order of importance.
"""
RUNNING = 0x10
PENDING = 0x20
EXPIRED = 0x30
TIMED_OUT = 0x40
BOT_DIED = 0x50
CANCELED = 0x60
COMPLETED = 0x70
STATES = (
'RUNNING', 'PENDING', 'EXPIRED', 'TIMED_OUT', 'BOT_DIED', 'CANCELED',
'COMPLETED')
STATES_RUNNING = ('RUNNING', 'PENDING')
STATES_NOT_RUNNING = (
'EXPIRED', 'TIMED_OUT', 'BOT_DIED', 'CANCELED', 'COMPLETED')
STATES_DONE = ('TIMED_OUT', 'COMPLETED')
STATES_ABANDONED = ('EXPIRED', 'BOT_DIED', 'CANCELED')
_NAMES = {
RUNNING: 'Running',
PENDING: 'Pending',
EXPIRED: 'Expired',
TIMED_OUT: 'Execution timed out',
BOT_DIED: 'Bot died',
CANCELED: 'User canceled',
COMPLETED: 'Completed',
}
_ENUMS = {
'RUNNING': RUNNING,
'PENDING': PENDING,
'EXPIRED': EXPIRED,
'TIMED_OUT': TIMED_OUT,
'BOT_DIED': BOT_DIED,
'CANCELED': CANCELED,
'COMPLETED': COMPLETED,
}
@classmethod
def to_string(cls, state):
"""Returns a user-readable string representing a State."""
if state not in cls._NAMES:
raise ValueError('Invalid state %s' % state)
return cls._NAMES[state]
@classmethod
def from_enum(cls, state):
"""Returns int value based on the string."""
if state not in cls._ENUMS:
raise ValueError('Invalid state %s' % state)
return cls._ENUMS[state]
class TaskOutputCollector(object):
"""Assembles task execution summary (for --task-summary-json output).
Optionally fetches task outputs from isolate server to local disk (used when
--task-output-dir is passed).
This object is shared among multiple threads running 'retrieve_results'
function, in particular they call 'process_shard_result' method in parallel.
"""
def __init__(self, task_output_dir, shard_count):
"""Initializes TaskOutputCollector, ensures |task_output_dir| exists.
Args:
task_output_dir: (optional) local directory to put fetched files to.
shard_count: expected number of task shards.
"""
self.task_output_dir = (
unicode(os.path.abspath(task_output_dir))
if task_output_dir else task_output_dir)
self.shard_count = shard_count
self._lock = threading.Lock()
self._per_shard_results = {}
self._storage = None
if self.task_output_dir and not fs.isdir(self.task_output_dir):
fs.makedirs(self.task_output_dir)
def process_shard_result(self, shard_index, result):
"""Stores results of a single task shard, fetches output files if necessary.
Modifies |result| in place.
shard_index is 0-based.
Called concurrently from multiple threads.
"""
# Sanity check index is in expected range.
assert isinstance(shard_index, int)
if shard_index < 0 or shard_index >= self.shard_count:
logging.warning(
'Shard index %d is outside of expected range: [0; %d]',
shard_index, self.shard_count - 1)
return
if result.get('outputs_ref'):
ref = result['outputs_ref']
result['outputs_ref']['view_url'] = '%s/browse?%s' % (
ref['isolatedserver'],
urllib.urlencode(
[('namespace', ref['namespace']), ('hash', ref['isolated'])]))
# Store result dict of that shard, ignore results we've already seen.
with self._lock:
if shard_index in self._per_shard_results:
logging.warning('Ignoring duplicate shard index %d', shard_index)
return
self._per_shard_results[shard_index] = result
# Fetch output files if necessary.
if self.task_output_dir and result.get('outputs_ref'):
storage = self._get_storage(
result['outputs_ref']['isolatedserver'],
result['outputs_ref']['namespace'])
if storage:
# Output files are supposed to be small and they are not reused across
# tasks. So use MemoryCache for them instead of on-disk cache. Make
# files writable, so that calling script can delete them.
isolateserver.fetch_isolated(
result['outputs_ref']['isolated'],
storage,
isolateserver.MemoryCache(file_mode_mask=0700),
os.path.join(self.task_output_dir, str(shard_index)),
False)
def finalize(self):
"""Assembles and returns task summary JSON, shutdowns underlying Storage."""
with self._lock:
# Write an array of shard results with None for missing shards.
summary = {
'shards': [
self._per_shard_results.get(i) for i in xrange(self.shard_count)
],
}
# Write summary.json to task_output_dir as well.
if self.task_output_dir:
tools.write_json(
os.path.join(self.task_output_dir, u'summary.json'),
summary,
False)
if self._storage:
self._storage.close()
self._storage = None
return summary
def _get_storage(self, isolate_server, namespace):
"""Returns isolateserver.Storage to use to fetch files."""
assert self.task_output_dir
with self._lock:
if not self._storage:
self._storage = isolateserver.get_storage(isolate_server, namespace)
else:
# Shards must all use exact same isolate server and namespace.
if self._storage.location != isolate_server:
logging.error(
'Task shards are using multiple isolate servers: %s and %s',
self._storage.location, isolate_server)
return None
if self._storage.namespace != namespace:
logging.error(
'Task shards are using multiple namespaces: %s and %s',
self._storage.namespace, namespace)
return None
return self._storage
def now():
"""Exists so it can be mocked easily."""
return time.time()
def parse_time(value):
"""Converts serialized time from the API to datetime.datetime."""
# When microseconds are 0, the '.123456' suffix is elided. This means the
# serialized format is not consistent, which confuses the hell out of python.
for fmt in ('%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S'):
try:
return datetime.datetime.strptime(value, fmt)
except ValueError:
pass
raise ValueError('Failed to parse %s' % value)
def retrieve_results(
base_url, shard_index, task_id, timeout, should_stop, output_collector):
"""Retrieves results for a single task ID.
Returns:
<result dict> on success.
None on failure.
"""
assert isinstance(timeout, float), timeout
result_url = '%s/_ah/api/swarming/v1/task/%s/result' % (base_url, task_id)
output_url = '%s/_ah/api/swarming/v1/task/%s/stdout' % (base_url, task_id)
started = now()
deadline = started + timeout if timeout else None
attempt = 0
while not should_stop.is_set():
attempt += 1
# Waiting for too long -> give up.
current_time = now()
if deadline and current_time >= deadline:
logging.error('retrieve_results(%s) timed out on attempt %d',
base_url, attempt)
return None
# Do not spin too fast. Spin faster at the beginning though.
# Start with 1 sec delay and for each 30 sec of waiting add another second
# of delay, until hitting 15 sec ceiling.
if attempt > 1:
max_delay = min(15, 1 + (current_time - started) / 30.0)
delay = min(max_delay, deadline - current_time) if deadline else max_delay
if delay > 0:
logging.debug('Waiting %.1f sec before retrying', delay)
should_stop.wait(delay)
if should_stop.is_set():
return None
# Disable internal retries in net.url_read_json, since we are doing retries
# ourselves.
# TODO(maruel): We'd need to know if it's a 404 and not retry at all.
# TODO(maruel): Sadly, we currently have to poll here. Use hanging HTTP
# request on GAE v2.
result = net.url_read_json(result_url, retry_50x=False)
if not result:
continue
if result.get('error'):
# An error occurred.
if result['error'].get('errors'):
for err in result['error']['errors']:
logging.warning(
'Error while reading task: %s; %s',
err.get('message'), err.get('debugInfo'))
elif result['error'].get('message'):
logging.warning(
'Error while reading task: %s', result['error']['message'])
continue
if result['state'] in State.STATES_NOT_RUNNING:
# TODO(maruel): Not always fetch stdout?
out = net.url_read_json(output_url)
result['output'] = out.get('output') if out else out
# Record the result, try to fetch attached output files (if any).
if output_collector:
# TODO(vadimsh): Respect |should_stop| and |deadline| when fetching.
output_collector.process_shard_result(shard_index, result)
if result.get('internal_failure'):
logging.error('Internal error!')
elif result['state'] == 'BOT_DIED':
logging.error('Bot died!')
return result
def convert_to_old_format(result):
"""Converts the task result data from Endpoints API format to old API format
for compatibility.
This goes into the file generated as --task-summary-json.
"""
# Sets default.
result.setdefault('abandoned_ts', None)
result.setdefault('bot_id', None)
result.setdefault('bot_version', None)
result.setdefault('children_task_ids', [])
result.setdefault('completed_ts', None)
result.setdefault('cost_saved_usd', None)
result.setdefault('costs_usd', None)
result.setdefault('deduped_from', None)
result.setdefault('name', None)
result.setdefault('outputs_ref', None)
result.setdefault('properties_hash', None)
result.setdefault('server_versions', None)
result.setdefault('started_ts', None)
result.setdefault('tags', None)
result.setdefault('user', None)
# Convertion back to old API.
duration = result.pop('duration', None)
result['durations'] = [duration] if duration else []
exit_code = result.pop('exit_code', None)
result['exit_codes'] = [int(exit_code)] if exit_code else []
result['id'] = result.pop('task_id')
result['isolated_out'] = result.get('outputs_ref', None)
output = result.pop('output', None)
result['outputs'] = [output] if output else []
# properties_hash
# server_version
# Endpoints result 'state' as string. For compatibility with old code, convert
# to int.
result['state'] = State.from_enum(result['state'])
result['try_number'] = (
int(result['try_number']) if result.get('try_number') else None)
if 'bot_dimensions' in result:
result['bot_dimensions'] = {
i['key']: i['value'] for i in result['bot_dimensions']
}
else:
result['bot_dimensions'] = None
def yield_results(
swarm_base_url, task_ids, timeout, max_threads, print_status_updates,
output_collector):
"""Yields swarming task results from the swarming server as (index, result).
Duplicate shards are ignored. Shards are yielded in order of completion.
Timed out shards are NOT yielded at all. Caller can compare number of yielded
shards with len(task_keys) to verify all shards completed.
max_threads is optional and is used to limit the number of parallel fetches
done. Since in general the number of task_keys is in the range <=10, it's not
worth normally to limit the number threads. Mostly used for testing purposes.
output_collector is an optional instance of TaskOutputCollector that will be
used to fetch files produced by a task from isolate server to the local disk.
Yields:
(index, result). In particular, 'result' is defined as the
GetRunnerResults() function in services/swarming/server/test_runner.py.
"""
number_threads = (
min(max_threads, len(task_ids)) if max_threads else len(task_ids))
should_stop = threading.Event()
results_channel = threading_utils.TaskChannel()
with threading_utils.ThreadPool(number_threads, number_threads, 0) as pool:
try:
# Adds a task to the thread pool to call 'retrieve_results' and return
# the results together with shard_index that produced them (as a tuple).
def enqueue_retrieve_results(shard_index, task_id):
task_fn = lambda *args: (shard_index, retrieve_results(*args))
pool.add_task(
0, results_channel.wrap_task(task_fn), swarm_base_url, shard_index,
task_id, timeout, should_stop, output_collector)
# Enqueue 'retrieve_results' calls for each shard key to run in parallel.
for shard_index, task_id in enumerate(task_ids):
enqueue_retrieve_results(shard_index, task_id)
# Wait for all of them to finish.
shards_remaining = range(len(task_ids))
active_task_count = len(task_ids)
while active_task_count:
shard_index, result = None, None
try:
shard_index, result = results_channel.pull(
timeout=STATUS_UPDATE_INTERVAL)
except threading_utils.TaskChannel.Timeout:
if print_status_updates:
print(
'Waiting for results from the following shards: %s' %
', '.join(map(str, shards_remaining)))
sys.stdout.flush()
continue
except Exception:
logging.exception('Unexpected exception in retrieve_results')
# A call to 'retrieve_results' finished (successfully or not).
active_task_count -= 1
if not result:
logging.error('Failed to retrieve the results for a swarming key')
continue
# Yield back results to the caller.
assert shard_index in shards_remaining
shards_remaining.remove(shard_index)
yield shard_index, result
finally:
# Done or aborted with Ctrl+C, kill the remaining threads.
should_stop.set()
def decorate_shard_output(swarming, shard_index, metadata):
"""Returns wrapped output for swarming task shard."""
if metadata.get('started_ts') and not metadata.get('deduped_from'):
pending = '%.1fs' % (
parse_time(metadata['started_ts']) - parse_time(metadata['created_ts'])
).total_seconds()
else:
pending = 'N/A'
if metadata.get('duration') is not None:
duration = '%.1fs' % metadata['duration']
else:
duration = 'N/A'
if metadata.get('exit_code') is not None:
# Integers are encoded as string to not loose precision.
exit_code = '%s' % metadata['exit_code']
else:
exit_code = 'N/A'
bot_id = metadata.get('bot_id') or 'N/A'
url = '%s/user/task/%s' % (swarming, metadata['task_id'])
tag_header = 'Shard %d %s' % (shard_index, url)
tag_footer = (
'End of shard %d Pending: %s Duration: %s Bot: %s Exit: %s' % (
shard_index, pending, duration, bot_id, exit_code))
tag_len = max(len(tag_header), len(tag_footer))
dash_pad = '+-%s-+\n' % ('-' * tag_len)
tag_header = '| %s |\n' % tag_header.ljust(tag_len)
tag_footer = '| %s |\n' % tag_footer.ljust(tag_len)
header = dash_pad + tag_header + dash_pad
footer = dash_pad + tag_footer + dash_pad[:-1]
output = (metadata.get('output') or '').rstrip() + '\n'
return header + output + footer
def collect(
swarming, task_ids, timeout, decorate, print_status_updates,
task_summary_json, task_output_dir):
"""Retrieves results of a Swarming task.
Returns:
process exit code that should be returned to the user.
"""
# Collect summary JSON and output files (if task_output_dir is not None).
output_collector = TaskOutputCollector(task_output_dir, len(task_ids))
seen_shards = set()
exit_code = None
total_duration = 0
try:
for index, metadata in yield_results(
swarming, task_ids, timeout, None, print_status_updates,
output_collector):
seen_shards.add(index)
# Default to failure if there was no process that even started.
shard_exit_code = metadata.get('exit_code')
if shard_exit_code:
# It's encoded as a string, so bool('0') is True.
shard_exit_code = int(shard_exit_code)
if shard_exit_code or exit_code is None:
exit_code = shard_exit_code
total_duration += metadata.get('duration', 0)
if decorate:
print(decorate_shard_output(swarming, index, metadata))
if len(seen_shards) < len(task_ids):
print('')
else:
print('%s: %s %s' % (
metadata.get('bot_id', 'N/A'),
metadata['task_id'],
shard_exit_code))
if metadata['output']:
output = metadata['output'].rstrip()
if output:
print(''.join(' %s\n' % l for l in output.splitlines()))
finally:
summary = output_collector.finalize()
if task_summary_json:
# TODO(maruel): Make this optional.
for i in summary['shards']:
if i:
convert_to_old_format(i)
tools.write_json(task_summary_json, summary, False)
if decorate and total_duration:
print('Total duration: %.1fs' % total_duration)
if len(seen_shards) != len(task_ids):
missing_shards = [x for x in range(len(task_ids)) if x not in seen_shards]
print >> sys.stderr, ('Results from some shards are missing: %s' %
', '.join(map(str, missing_shards)))
return 1
return exit_code if exit_code is not None else 1
### API management.
class APIError(Exception):
pass
def endpoints_api_discovery_apis(host):
"""Uses Cloud Endpoints' API Discovery Service to returns metadata about all
the APIs exposed by a host.
https://developers.google.com/discovery/v1/reference/apis/list
"""
data = net.url_read_json(host + '/_ah/api/discovery/v1/apis')
if data is None:
raise APIError('Failed to discover APIs on %s' % host)
out = {}
for api in data['items']:
if api['id'] == 'discovery:v1':
continue
# URL is of the following form:
# url = host + (
# '/_ah/api/discovery/v1/apis/%s/%s/rest' % (api['id'], api['version'])
api_data = net.url_read_json(api['discoveryRestUrl'])
if api_data is None:
raise APIError('Failed to discover %s on %s' % (api['id'], host))
out[api['id']] = api_data
return out
### Commands.
def abort_task(_swarming, _manifest):
"""Given a task manifest that was triggered, aborts its execution."""
# TODO(vadimsh): No supported by the server yet.
def add_filter_options(parser):
parser.filter_group = optparse.OptionGroup(parser, 'Filtering slaves')
parser.filter_group.add_option(
'-d', '--dimension', default=[], action='append', nargs=2,
dest='dimensions', metavar='FOO bar',
help='dimension to filter on')
parser.add_option_group(parser.filter_group)
def add_sharding_options(parser):
parser.sharding_group = optparse.OptionGroup(parser, 'Sharding options')
parser.sharding_group.add_option(
'--shards', type='int', default=1,
help='Number of shards to trigger and collect.')
parser.add_option_group(parser.sharding_group)
def add_trigger_options(parser):
"""Adds all options to trigger a task on Swarming."""
isolateserver.add_isolate_server_options(parser)
add_filter_options(parser)
parser.task_group = optparse.OptionGroup(parser, 'Task properties')
parser.task_group.add_option(
'-s', '--isolated',
help='Hash of the .isolated to grab from the isolate server')
parser.task_group.add_option(
'-e', '--env', default=[], action='append', nargs=2, metavar='FOO bar',
help='Environment variables to set')
parser.task_group.add_option(
'--priority', type='int', default=100,
help='The lower value, the more important the task is')
parser.task_group.add_option(
'-T', '--task-name',
help='Display name of the task. Defaults to '
'<base_name>/<dimensions>/<isolated hash>/<timestamp> if an '
'isolated file is provided, if a hash is provided, it defaults to '
'<user>/<dimensions>/<isolated hash>/<timestamp>')
parser.task_group.add_option(
'--tags', action='append', default=[],
help='Tags to assign to the task.')
parser.task_group.add_option(
'--user', default='',
help='User associated with the task. Defaults to authenticated user on '
'the server.')
parser.task_group.add_option(
'--idempotent', action='store_true', default=False,
help='When set, the server will actively try to find a previous task '
'with the same parameter and return this result instead if possible')
parser.task_group.add_option(
'--expiration', type='int', default=6*60*60,
help='Seconds to allow the task to be pending for a bot to run before '
'this task request expires.')
parser.task_group.add_option(
'--deadline', type='int', dest='expiration',
help=optparse.SUPPRESS_HELP)
parser.task_group.add_option(
'--hard-timeout', type='int', default=60*60,
help='Seconds to allow the task to complete.')
parser.task_group.add_option(
'--io-timeout', type='int', default=20*60,
help='Seconds to allow the task to be silent.')
parser.task_group.add_option(
'--raw-cmd', action='store_true', default=False,
help='When set, the command after -- is used as-is without run_isolated. '
'In this case, no .isolated file is expected.')
parser.add_option_group(parser.task_group)
def process_trigger_options(parser, options, args):
"""Processes trigger options and uploads files to isolate server if necessary.
"""
options.dimensions = dict(options.dimensions)
options.env = dict(options.env)
if not options.dimensions:
parser.error('Please at least specify one --dimension')
if options.raw_cmd:
if not args:
parser.error(
'Arguments with --raw-cmd should be passed after -- as command '
'delimiter.')
if options.isolate_server:
parser.error('Can\'t use both --raw-cmd and --isolate-server.')
command = args
if not options.task_name:
options.task_name = u'%s/%s' % (
options.user,
'_'.join(
'%s=%s' % (k, v)
for k, v in sorted(options.dimensions.iteritems())))
inputs_ref = None
else:
isolateserver.process_isolate_server_options(parser, options, False)
try:
command, inputs_ref = isolated_handle_options(options, args)
except ValueError as e:
parser.error(str(e))
# If inputs_ref is used, command is actually extra_args. Otherwise it's an
# actual command to run.
properties = TaskProperties(
command=None if inputs_ref else command,
dimensions=options.dimensions,
env=options.env,
execution_timeout_secs=options.hard_timeout,
extra_args=command if inputs_ref else None,
grace_period_secs=30,
idempotent=options.idempotent,
inputs_ref=inputs_ref,
io_timeout_secs=options.io_timeout)
if not all(len(t.split(':', 1)) == 2 for t in options.tags):
parser.error('--tags must be in the format key:value')
return NewTaskRequest(
expiration_secs=options.expiration,
name=options.task_name,
parent_task_id=os.environ.get('SWARMING_TASK_ID', ''),
priority=options.priority,
properties=properties,
tags=options.tags,
user=options.user)
def add_collect_options(parser):
parser.server_group.add_option(
'-t', '--timeout',
type='float',
default=80*60.,
help='Timeout to wait for result, set to 0 for no timeout; default: '
'%default s')
parser.group_logging.add_option(
'--decorate', action='store_true', help='Decorate output')
parser.group_logging.add_option(
'--print-status-updates', action='store_true',
help='Print periodic status updates')
parser.task_output_group = optparse.OptionGroup(parser, 'Task output')
parser.task_output_group.add_option(
'--task-summary-json',
metavar='FILE',
help='Dump a summary of task results to this file as json. It contains '
'only shards statuses as know to server directly. Any output files '
'emitted by the task can be collected by using --task-output-dir')
parser.task_output_group.add_option(
'--task-output-dir',
metavar='DIR',
help='Directory to put task results into. When the task finishes, this '
'directory contains per-shard directory with output files produced '
'by shards: <task-output-dir>/<zero-based-shard-index>/.')
parser.add_option_group(parser.task_output_group)
@subcommand.usage('bots...')
def CMDbot_delete(parser, args):
"""Forcibly deletes bots from the Swarming server."""
parser.add_option(
'-f', '--force', action='store_true',
help='Do not prompt for confirmation')
options, args = parser.parse_args(args)
if not args:
parser.error('Please specific bots to delete')
bots = sorted(args)
if not options.force:
print('Delete the following bots?')
for bot in bots:
print(' %s' % bot)
if raw_input('Continue? [y/N] ') not in ('y', 'Y'):
print('Goodbye.')
return 1
result = 0
for bot in bots:
url = '%s/_ah/api/swarming/v1/bot/%s/delete' % (options.swarming, bot)
if net.url_read_json(url, data={}, method='POST') is None:
print('Deleting %s failed. Probably already gone' % bot)
result = 1
return result
def CMDbots(parser, args):
"""Returns information about the bots connected to the Swarming server."""
add_filter_options(parser)
parser.filter_group.add_option(
'--dead-only', action='store_true',
help='Only print dead bots, useful to reap them and reimage broken bots')
parser.filter_group.add_option(
'-k', '--keep-dead', action='store_true',
help='Do not filter out dead bots')
parser.filter_group.add_option(
'-b', '--bare', action='store_true',
help='Do not print out dimensions')
options, args = parser.parse_args(args)
if options.keep_dead and options.dead_only:
parser.error('Use only one of --keep-dead and --dead-only')
bots = []
cursor = None
limit = 250
# Iterate via cursors.
base_url = (
options.swarming + '/_ah/api/swarming/v1/bots/list?limit=%d' % limit)
while True:
url = base_url
if cursor:
url += '&cursor=%s' % urllib.quote(cursor)
data = net.url_read_json(url)
if data is None:
print >> sys.stderr, 'Failed to access %s' % options.swarming
return 1
bots.extend(data['items'])
cursor = data.get('cursor')
if not cursor:
break
for bot in natsort.natsorted(bots, key=lambda x: x['bot_id']):
if options.dead_only:
if not bot.get('is_dead'):
continue
elif not options.keep_dead and bot.get('is_dead'):
continue
# If the user requested to filter on dimensions, ensure the bot has all the
# dimensions requested.
dimensions = {i['key']: i['value'] for i in bot['dimensions']}
for key, value in options.dimensions:
if key not in dimensions:
break
# A bot can have multiple value for a key, for example,
# {'os': ['Windows', 'Windows-6.1']}, so that --dimension os=Windows will
# be accepted.
if isinstance(dimensions[key], list):
if value not in dimensions[key]:
break
else:
if value != dimensions[key]:
break
else:
print bot['bot_id']
if not options.bare:
print ' %s' % json.dumps(dimensions, sort_keys=True)
if bot.get('task_id'):
print ' task: %s' % bot['task_id']
return 0
@subcommand.usage('--json file | task_id...')
def CMDcollect(parser, args):
"""Retrieves results of one or multiple Swarming task by its ID.
The result can be in multiple part if the execution was sharded. It can
potentially have retries.
"""
add_collect_options(parser)
parser.add_option(
'-j', '--json',
help='Load the task ids from .json as saved by trigger --dump-json')
options, args = parser.parse_args(args)
if not args and not options.json:
parser.error('Must specify at least one task id or --json.')
if args and options.json:
parser.error('Only use one of task id or --json.')
if options.json:
options.json = unicode(os.path.abspath(options.json))
try:
with fs.open(options.json, 'rb') as f:
tasks = sorted(
json.load(f)['tasks'].itervalues(), key=lambda x: x['shard_index'])
args = [t['task_id'] for t in tasks]
except (KeyError, IOError, TypeError, ValueError):
parser.error('Failed to parse %s' % options.json)
else:
valid = frozenset('0123456789abcdef')
if any(not valid.issuperset(task_id) for task_id in args):
parser.error('Task ids are 0-9a-f.')
try:
return collect(
options.swarming,
args,
options.timeout,
options.decorate,
options.print_status_updates,
options.task_summary_json,
options.task_output_dir)
except Failure:
on_error.report(None)
return 1
@subcommand.usage('[filename]')
def CMDput_bootstrap(parser, args):
"""Uploads a new version of bootstrap.py."""
options, args = parser.parse_args(args)
if len(args) != 1:
parser.error('Must specify file to upload')
url = options.swarming + '/_ah/api/swarming/v1/server/put_bootstrap'
path = unicode(os.path.abspath(args[0]))
with fs.open(path, 'rb') as f:
content = f.read().decode('utf-8')
data = net.url_read_json(url, data={'content': content})
print data
return 0
@subcommand.usage('[filename]')
def CMDput_bot_config(parser, args):
"""Uploads a new version of bot_config.py."""
options, args = parser.parse_args(args)
if len(args) != 1:
parser.error('Must specify file to upload')
url = options.swarming + '/_ah/api/swarming/v1/server/put_bot_config'
path = unicode(os.path.abspath(args[0]))
with fs.open(path, 'rb') as f:
content = f.read().decode('utf-8')
data = net.url_read_json(url, data={'content': content})
print data
return 0
@subcommand.usage('[method name]')
def CMDquery(parser, args):
"""Returns raw JSON information via an URL endpoint. Use 'query-list' to
gather the list of API methods from the server.
Examples:
Listing all bots:
swarming.py query -S server-url.com bots/list
Listing last 10 tasks on a specific bot named 'swarm1':
swarming.py query -S server-url.com --limit 10 bot/swarm1/tasks
Listing last 10 tasks with tags os:Ubuntu-12.04 and pool:Chrome. Note that
quoting is important!:
swarming.py query -S server-url.com --limit 10 \\
'tasks/list?tags=os:Ubuntu-12.04&tags=pool:Chrome'
"""
CHUNK_SIZE = 250
parser.add_option(
'-L', '--limit', type='int', default=200,
help='Limit to enforce on limitless items (like number of tasks); '
'default=%default')
parser.add_option(
'--json', help='Path to JSON output file (otherwise prints to stdout)')
parser.add_option(
'--progress', action='store_true',
help='Prints a dot at each request to show progress')
options, args = parser.parse_args(args)
if len(args) != 1:
parser.error(
'Must specify only method name and optionally query args properly '
'escaped.')
base_url = options.swarming + '/_ah/api/swarming/v1/' + args[0]
url = base_url
if options.limit:
# Check check, change if not working out.
merge_char = '&' if '?' in url else '?'
url += '%slimit=%d' % (merge_char, min(CHUNK_SIZE, options.limit))
data = net.url_read_json(url)
if data is None:
# TODO(maruel): Do basic diagnostic.
print >> sys.stderr, 'Failed to access %s' % url
return 1
# Some items support cursors. Try to get automatically if cursors are needed
# by looking at the 'cursor' items.
while (
data.get('cursor') and
(not options.limit or len(data['items']) < options.limit)):
merge_char = '&' if '?' in base_url else '?'
url = base_url + '%scursor=%s' % (merge_char, urllib.quote(data['cursor']))
if options.limit:
url += '&limit=%d' % min(CHUNK_SIZE, options.limit - len(data['items']))
if options.progress:
sys.stdout.write('.')
sys.stdout.flush()
new = net.url_read_json(url)
if new is None:
if options.progress:
print('')
print >> sys.stderr, 'Failed to access %s' % options.swarming
return 1
data['items'].extend(new.get('items', []))
data['cursor'] = new.get('cursor')
if options.progress:
print('')
if options.limit and len(data.get('items', [])) > options.limit:
data['items'] = data['items'][:options.limit]
data.pop('cursor', None)
if options.json:
options.json = unicode(os.path.abspath(options.json))
tools.write_json(options.json, data, True)
else:
try:
tools.write_json(sys.stdout, data, False)
sys.stdout.write('\n')
except IOError:
pass
return 0
def CMDquery_list(parser, args):
"""Returns list of all the Swarming APIs that can be used with command
'query'.
"""
parser.add_option(
'--json', help='Path to JSON output file (otherwise prints to stdout)')
options, args = parser.parse_args(args)
if args:
parser.error('No argument allowed.')
try:
apis = endpoints_api_discovery_apis(options.swarming)
except APIError as e:
parser.error(str(e))
if options.json:
options.json = unicode(os.path.abspath(options.json))
with fs.open(options.json, 'wb') as f:
json.dump(apis, f)
else:
help_url = (
'https://apis-explorer.appspot.com/apis-explorer/?base=%s/_ah/api#p/' %
options.swarming)
for api_id, api in sorted(apis.iteritems()):
print api_id
print ' ' + api['description']
for resource_name, resource in sorted(api['resources'].iteritems()):
print ''
for method_name, method in sorted(resource['methods'].iteritems()):
# Only list the GET ones.
if method['httpMethod'] != 'GET':
continue
print '- %s.%s: %s' % (
resource_name, method_name, method['path'])
print ' ' + method['description']
print ' %s%s%s' % (help_url, api['servicePath'], method['id'])
return 0
@subcommand.usage('(hash|isolated) [-- extra_args]')
def CMDrun(parser, args):
"""Triggers a task and wait for the results.
Basically, does everything to run a command remotely.
"""
add_trigger_options(parser)
add_collect_options(parser)
add_sharding_options(parser)
options, args = parser.parse_args(args)
task_request = process_trigger_options(parser, options, args)
try:
tasks = trigger_task_shards(
options.swarming, task_request, options.shards)
except Failure as e:
on_error.report(
'Failed to trigger %s(%s): %s' %
(options.task_name, args[0], e.args[0]))
return 1
if not tasks:
on_error.report('Failed to trigger the task.')
return 1
print('Triggered task: %s' % options.task_name)
task_ids = [
t['task_id']
for t in sorted(tasks.itervalues(), key=lambda x: x['shard_index'])
]
try:
return collect(
options.swarming,
task_ids,
options.timeout,
options.decorate,
options.print_status_updates,
options.task_summary_json,
options.task_output_dir)
except Failure:
on_error.report(None)
return 1
@subcommand.usage('task_id -- <extra_args>')
def CMDreproduce(parser, args):
"""Runs a task locally that was triggered on the server.
This running locally the same commands that have been run on the bot. The data
downloaded will be in a subdirectory named 'work' of the current working
directory.
You can pass further additional arguments to the target command by passing
them after --.
"""
options, args = parser.parse_args(args)
extra_args = []
if not args:
parser.error('Must specify exactly one task id.')
if len(args) > 1:
if args[1] == '--':
if len(args) > 2:
extra_args = args[2:]
else:
extra_args = args[1:]
url = options.swarming + '/_ah/api/swarming/v1/task/%s/request' % args[0]
request = net.url_read_json(url)
if not request:
print >> sys.stderr, 'Failed to retrieve request data for the task'
return 1
workdir = unicode(os.path.abspath('work'))
if not fs.isdir(workdir):
fs.mkdir(workdir)
properties = request['properties']
env = None
if properties.get('env'):
env = os.environ.copy()
logging.info('env: %r', properties['env'])
for i in properties['env']:
key = i['key'].encode('utf-8')
if not i['value']:
env.pop(key, None)
else:
env[key] = i['value'].encode('utf-8')
if properties.get('inputs_ref'):
# Create the tree.
with isolateserver.get_storage(
properties['inputs_ref']['isolatedserver'],
properties['inputs_ref']['namespace']) as storage:
bundle = isolateserver.fetch_isolated(
properties['inputs_ref']['isolated'],
storage,
isolateserver.MemoryCache(file_mode_mask=0700),
workdir,
False)
command = bundle.command
if bundle.relative_cwd:
workdir = os.path.join(workdir, bundle.relative_cwd)
else:
command = properties['command']
try:
return subprocess.call(command + extra_args, env=env, cwd=workdir)
except OSError as e:
print >> sys.stderr, 'Failed to run: %s' % ' '.join(command)
print >> sys.stderr, str(e)
return 1
@subcommand.usage('bot_id')
def CMDterminate(parser, args):
"""Tells a bot to gracefully shut itself down as soon as it can.
This is done by completing whatever current task there is then exiting the bot
process.
"""
parser.add_option(
'--wait', action='store_true', help='Wait for the bot to terminate')
options, args = parser.parse_args(args)
if len(args) != 1:
parser.error('Please provide the bot id')
url = options.swarming + '/_ah/api/swarming/v1/bot/%s/terminate' % args[0]
request = net.url_read_json(url, data={})
if not request:
print >> sys.stderr, 'Failed to ask for termination'
return 1
if options.wait:
return collect(
options.swarming, [request['task_id']], 0., False, False, None, None)
return 0
@subcommand.usage("(hash|isolated) [-- extra_args|raw command]")
def CMDtrigger(parser, args):
"""Triggers a Swarming task.
Accepts either the hash (sha1) of a .isolated file already uploaded or the
path to an .isolated file to archive.
If an .isolated file is specified instead of an hash, it is first archived.
Passes all extra arguments provided after '--' as additional command line
arguments for an isolated command specified in *.isolate file.
"""
add_trigger_options(parser)
add_sharding_options(parser)
parser.add_option(
'--dump-json',
metavar='FILE',
help='Dump details about the triggered task(s) to this file as json')
options, args = parser.parse_args(args)
task_request = process_trigger_options(parser, options, args)
try:
tasks = trigger_task_shards(
options.swarming, task_request, options.shards)
if tasks:
print('Triggered task: %s' % options.task_name)
tasks_sorted = sorted(
tasks.itervalues(), key=lambda x: x['shard_index'])
if options.dump_json:
data = {
'base_task_name': options.task_name,
'tasks': tasks,
}
tools.write_json(unicode(options.dump_json), data, True)
print('To collect results, use:')
print(' swarming.py collect -S %s --json %s' %
(options.swarming, options.dump_json))
else:
print('To collect results, use:')
print(' swarming.py collect -S %s %s' %
(options.swarming, ' '.join(t['task_id'] for t in tasks_sorted)))
print('Or visit:')
for t in tasks_sorted:
print(' ' + t['view_url'])
return int(not tasks)
except Failure:
on_error.report(None)
return 1
class OptionParserSwarming(logging_utils.OptionParserWithLogging):
def __init__(self, **kwargs):
logging_utils.OptionParserWithLogging.__init__(
self, prog='swarming.py', **kwargs)
self.server_group = optparse.OptionGroup(self, 'Server')
self.server_group.add_option(
'-S', '--swarming',
metavar='URL', default=os.environ.get('SWARMING_SERVER', ''),
help='Swarming server to use')
self.add_option_group(self.server_group)
auth.add_auth_options(self)
def parse_args(self, *args, **kwargs):
options, args = logging_utils.OptionParserWithLogging.parse_args(
self, *args, **kwargs)
auth.process_auth_options(self, options)
user = self._process_swarming(options)
if hasattr(options, 'user') and not options.user:
options.user = user
return options, args
def _process_swarming(self, options):
"""Processes the --swarming option and aborts if not specified.
Returns the identity as determined by the server.
"""
if not options.swarming:
self.error('--swarming is required.')
try:
options.swarming = net.fix_url(options.swarming)
except ValueError as e:
self.error('--swarming %s' % e)
on_error.report_on_exception_exit(options.swarming)
try:
user = auth.ensure_logged_in(options.swarming)
except ValueError as e:
self.error(str(e))
return user
def main(args):
dispatcher = subcommand.CommandDispatcher(__name__)
return dispatcher.execute(OptionParserSwarming(version=__version__), args)
if __name__ == '__main__':
fix_encoding.fix_encoding()
tools.disable_buffering()
colorama.init()
sys.exit(main(sys.argv[1:]))
| apache-2.0 |
MattCrystal/glowing-happiness | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
Ali-aqrabawi/ezclinic | lib/django/middleware/common.py | 39 | 7843 | import logging
import re
from django import http
from django.conf import settings
from django.core.exceptions import PermissionDenied
from django.core.mail import mail_managers
from django.urls import is_valid_path
from django.utils.cache import get_conditional_response, set_response_etag
from django.utils.deprecation import MiddlewareMixin
from django.utils.encoding import force_text
from django.utils.http import unquote_etag
from django.utils.six.moves.urllib.parse import urlparse
logger = logging.getLogger('django.request')
class CommonMiddleware(MiddlewareMixin):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
This behavior can be customized by subclassing CommonMiddleware and
overriding the response_redirect_class attribute.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
response_redirect_class = http.HttpResponsePermanentRedirect
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
raise PermissionDenied('Forbidden user agent')
# Check for a redirect based on settings.PREPEND_WWW
host = request.get_host()
must_prepend = settings.PREPEND_WWW and host and not host.startswith('www.')
redirect_url = ('%s://www.%s' % (request.scheme, host)) if must_prepend else ''
# Check if a slash should be appended
if self.should_redirect_with_slash(request):
path = self.get_full_path_with_slash(request)
else:
path = request.get_full_path()
# Return a redirect if necessary
if redirect_url or path != request.get_full_path():
redirect_url += path
return self.response_redirect_class(redirect_url)
def should_redirect_with_slash(self, request):
"""
Return True if settings.APPEND_SLASH is True and appending a slash to
the request path turns an invalid path into a valid one.
"""
if settings.APPEND_SLASH and not request.get_full_path().endswith('/'):
urlconf = getattr(request, 'urlconf', None)
return (
not is_valid_path(request.path_info, urlconf) and
is_valid_path('%s/' % request.path_info, urlconf)
)
return False
def get_full_path_with_slash(self, request):
"""
Return the full path of the request with a trailing slash appended.
Raise a RuntimeError if settings.DEBUG is True and request.method is
POST, PUT, or PATCH.
"""
new_path = request.get_full_path(force_append_slash=True)
if settings.DEBUG and request.method in ('POST', 'PUT', 'PATCH'):
raise RuntimeError(
"You called this URL via %(method)s, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining %(method)s data. "
"Change your form to point to %(url)s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django settings." % {
'method': request.method,
'url': request.get_host() + new_path,
}
)
return new_path
def process_response(self, request, response):
"""
Calculate the ETag, if needed.
When the status code of the response is 404, it may redirect to a path
with an appended slash if should_redirect_with_slash() returns True.
"""
# If the given URL is "Not Found", then check if we should redirect to
# a path with a slash appended.
if response.status_code == 404:
if self.should_redirect_with_slash(request):
return self.response_redirect_class(self.get_full_path_with_slash(request))
if settings.USE_ETAGS:
if not response.has_header('ETag'):
set_response_etag(response)
if response.has_header('ETag'):
return get_conditional_response(
request,
etag=unquote_etag(response['ETag']),
response=response,
)
return response
class BrokenLinkEmailsMiddleware(MiddlewareMixin):
def process_response(self, request, response):
"""
Send broken link emails for relevant 404 NOT FOUND responses.
"""
if response.status_code == 404 and not settings.DEBUG:
domain = request.get_host()
path = request.get_full_path()
referer = force_text(request.META.get('HTTP_REFERER', ''), errors='replace')
if not self.is_ignorable_request(request, path, domain, referer):
ua = force_text(request.META.get('HTTP_USER_AGENT', '<none>'), errors='replace')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers(
"Broken %slink on %s" % (
('INTERNAL ' if self.is_internal_request(domain, referer) else ''),
domain
),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\n"
"IP address: %s\n" % (referer, path, ua, ip),
fail_silently=True)
return response
def is_internal_request(self, domain, referer):
"""
Returns True if the referring URL is the same domain as the current request.
"""
# Different subdomains are treated as different domains.
return bool(re.match("^https?://%s/" % re.escape(domain), referer))
def is_ignorable_request(self, request, uri, domain, referer):
"""
Return True if the given request *shouldn't* notify the site managers
according to project settings or in situations outlined by the inline
comments.
"""
# The referer is empty.
if not referer:
return True
# APPEND_SLASH is enabled and the referer is equal to the current URL
# without a trailing slash indicating an internal redirect.
if settings.APPEND_SLASH and uri.endswith('/') and referer == uri[:-1]:
return True
# A '?' in referer is identified as a search engine source.
if not self.is_internal_request(domain, referer) and '?' in referer:
return True
# The referer is equal to the current URL, ignoring the scheme (assumed
# to be a poorly implemented bot).
parsed_referer = urlparse(referer)
if parsed_referer.netloc in ['', domain] and parsed_referer.path == uri:
return True
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
| mit |
Eric-Zhong/odoo | addons/l10n_bo/__openerp__.py | 259 | 1698 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Bolivia Localization Chart Account",
"version": "1.0",
"description": """
Bolivian accounting chart and tax localization.
Plan contable boliviano e impuestos de acuerdo a disposiciones vigentes
""",
"author": "Cubic ERP",
"website": "http://cubicERP.com",
"category": "Localization/Account Charts",
"depends": [
"account_chart",
],
"data":[
"account_tax_code.xml",
"l10n_bo_chart.xml",
"account_tax.xml",
"l10n_bo_wizard.xml",
],
"demo_xml": [
],
"data": [
],
"active": False,
"installable": True,
"certificate" : "",
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
btallman/incubator-airflow | tests/ti_deps/deps/dag_ti_slots_available_dep.py | 20 | 1458 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from airflow.ti_deps.deps.dag_ti_slots_available_dep import DagTISlotsAvailableDep
from fake_models import FakeDag, FakeTask, FakeTI
class DagTISlotsAvailableDepTest(unittest.TestCase):
def test_concurrency_reached(self):
"""
Test concurrency reached should fail dep
"""
dag = FakeDag(concurrency=1, concurrency_reached=True)
task = FakeTask(dag=dag)
ti = FakeTI(task=task, dag_id="fake_dag")
self.assertFalse(DagTISlotsAvailableDep().is_met(ti=ti, dep_context=None))
def test_all_conditions_met(self):
"""
Test all conditions met should pass dep
"""
dag = FakeDag(concurrency=1, concurrency_reached=False)
task = FakeTask(dag=dag)
ti = FakeTI(task=task, dag_id="fake_dag")
self.assertTrue(DagTISlotsAvailableDep().is_met(ti=ti, dep_context=None))
| apache-2.0 |
bank-netforce/netforce | netforce_hr/netforce_hr/models/hr_department.py | 4 | 1500 | # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from netforce.model import Model, fields, get_model
class Department(Model):
_name = "hr.department"
_string = "Department"
_key = ["name"]
_fields = {
"name": fields.Char("Name", required=True, search=True),
"code": fields.Char("Code"),
"comments": fields.One2Many("message", "related_id", "Comments"),
}
_order = "name"
Department.register()
| mit |
RaghavPro/Runescape-Hiscores | hiscores/templatetags/hiscores_tags.py | 1 | 1077 | from django import template
register = template.Library()
@register.filter
def get_rank(page, loop_counter):
"""
Calculates the player rank from current page and loop index.
:param page: Current page number
:param loop_counter: Loop index
:return: rank
"""
rank = page.start_index() + loop_counter
return "{:,}".format(rank)
@register.filter
def display_skill(d, skill):
"""
Display skill in template
:param d: Dictionary
:param skill: key
:return: Grabs the key from the dictionary and formats it.
"""
return "{:,}".format(d[skill])
@register.filter
def display_exp(d, skill):
"""
Display exp of the specified skill in template
:param d: Dictionary
:param skill: skill
:return: formatted exp value
"""
skill_exp = skill + "_exp"
return "{:,}".format(int(d[skill_exp]))
@register.filter
def get_exp(l, index):
"""
Gets the 'exp' key from a list of dictionary
:param l: The list
:param index: List index
:return: 'exp' key
"""
return l[index]['exp'] | gpl-2.0 |
lukaspetr/FEniCSopt | fenicsopt/exports/gnuplot.py | 1 | 1501 | from __future__ import division
from dolfin import *
import numpy
import pprint
# Gnuplot related functions
################################################################################
# discontinuous piecewise linear output
def gnuplot_dg1(file, mesh, fun):
file = open(file, 'w+')
i = 0
for myCell in cells(mesh):
i += 1
vs = []
for v in vertices(myCell):
vs.append(v.midpoint())
print('%e %e %e' % (vs[0].x(),vs[0].y(),fun(vs[0].x(),vs[0].y())), file=file)
print('%e %e %e' % (vs[1].x(),vs[1].y(),fun(vs[1].x(),vs[1].y())), file=file)
print('%e %e %e' % (vs[2].x(),vs[2].y(),fun(vs[2].x(),vs[2].y())), file=file)
print('%e %e %e' % (vs[0].x(),vs[0].y(),fun(vs[0].x(),vs[0].y())), file=file)
if (i == 1):
print('%e %e %e' % (vs[0].x(),vs[0].y(),fun(vs[0].x(),vs[0].y())), file=file)
print('', file=file)
################################################################################
# for printing with e.g. pm3d map
def gnuplot_square_equidistant(file, N, fun):
file = open(file, 'w+')
for i in range(0, N+1):
for j in range(0, N+1):
x = i/N
y = j/N
p = Point(x,y)
f = fun(p)
print('%e %e %e' % (x,y,f), file=file)
print('', file=file)
################################################################################
# graph output
def gnuplot_graph(file, data):
file = open(file, 'w+')
for point in data:
pprint.pprint(point)
print('%e %e' % (point['position'], point['phi']), file=file)
print('', file=file)
| mit |
macobo/python-grader | tasks/MTAT.03.100/2013/Midterm_1/KT2_R8_mood_tester.py | 1 | 1312 | """
Task description (in Estonian):
3. Arvude mood (5p)
Kirjuta funktsioon mood, mis võtab argumendiks täisarvujärjendi ja tagastab arvu,
mida leidub järjendis kõige rohkem (ehk moodi). Kui selliseid arve on mitu, siis
tuleb tagastada neist vähim.
Näide: mood([-10, 17, 13, 17, -10, 21]) peab tagastama -10.
"""
from grader import *
from KT2_util import make_checker
from random import *
def mood(lst):
parim = 0
parim_count = 0
for el in lst:
count = 0
for a in lst:
if a == el:
count += 1
if count > parim_count or (count == parim_count and el < parim):
parim = el
parim_count = count
return parim
def rand_count(length, _seed=None, N = None):
seed(_seed)
if N is None: N = max(int(length / 5), 2)
return [randint(-N, N) for _ in range(length)]
checker = make_checker(mood)
checker([1])
checker([1, 1, 1, 3])
checker([1, 2, 2, 3, 2, 1, -2, 3])
checker([-10, 17, 13, 17, -10, 21],
description="Erijuht, võrdsete esinemiste arvu korral tagasta vähim - {function}({args}) == {expected}")
checker([17, -10, 13, -10, 17, 21],
description="Erijuht, võrdsete esinemiste arvu korral tagasta vähim - {function}({args}) == {expected}")
for i in range(5):
checker(rand_count(20, i)) | mit |
makermade/arm_android-19_arm-linux-androideabi-4.8 | lib/python2.7/lib-tk/Tkinter.py | 43 | 157840 | """Wrapper functions for Tcl/Tk.
Tkinter provides classes which allow the display, positioning and
control of widgets. Toplevel widgets are Tk and Toplevel. Other
widgets are Frame, Label, Entry, Text, Canvas, Button, Radiobutton,
Checkbutton, Scale, Listbox, Scrollbar, OptionMenu, Spinbox
LabelFrame and PanedWindow.
Properties of the widgets are specified with keyword arguments.
Keyword arguments have the same name as the corresponding resource
under Tk.
Widgets are positioned with one of the geometry managers Place, Pack
or Grid. These managers can be called with methods place, pack, grid
available in every Widget.
Actions are bound to events by resources (e.g. keyword argument
command) or with the method bind.
Example (Hello, World):
import Tkinter
from Tkconstants import *
tk = Tkinter.Tk()
frame = Tkinter.Frame(tk, relief=RIDGE, borderwidth=2)
frame.pack(fill=BOTH,expand=1)
label = Tkinter.Label(frame, text="Hello, World")
label.pack(fill=X, expand=1)
button = Tkinter.Button(frame,text="Exit",command=tk.destroy)
button.pack(side=BOTTOM)
tk.mainloop()
"""
__version__ = "$Revision: 81008 $"
import sys
if sys.platform == "win32":
# Attempt to configure Tcl/Tk without requiring PATH
import FixTk
import _tkinter # If this fails your Python may not be configured for Tk
tkinter = _tkinter # b/w compat for export
TclError = _tkinter.TclError
from types import *
from Tkconstants import *
import re
wantobjects = 1
TkVersion = float(_tkinter.TK_VERSION)
TclVersion = float(_tkinter.TCL_VERSION)
READABLE = _tkinter.READABLE
WRITABLE = _tkinter.WRITABLE
EXCEPTION = _tkinter.EXCEPTION
# These are not always defined, e.g. not on Win32 with Tk 8.0 :-(
try: _tkinter.createfilehandler
except AttributeError: _tkinter.createfilehandler = None
try: _tkinter.deletefilehandler
except AttributeError: _tkinter.deletefilehandler = None
_magic_re = re.compile(r'([\\{}])')
_space_re = re.compile(r'([\s])')
def _join(value):
"""Internal function."""
return ' '.join(map(_stringify, value))
def _stringify(value):
"""Internal function."""
if isinstance(value, (list, tuple)):
if len(value) == 1:
value = _stringify(value[0])
if value[0] == '{':
value = '{%s}' % value
else:
value = '{%s}' % _join(value)
else:
if isinstance(value, basestring):
value = unicode(value)
else:
value = str(value)
if not value:
value = '{}'
elif _magic_re.search(value):
# add '\' before special characters and spaces
value = _magic_re.sub(r'\\\1', value)
value = _space_re.sub(r'\\\1', value)
elif value[0] == '"' or _space_re.search(value):
value = '{%s}' % value
return value
def _flatten(tuple):
"""Internal function."""
res = ()
for item in tuple:
if type(item) in (TupleType, ListType):
res = res + _flatten(item)
elif item is not None:
res = res + (item,)
return res
try: _flatten = _tkinter._flatten
except AttributeError: pass
def _cnfmerge(cnfs):
"""Internal function."""
if type(cnfs) is DictionaryType:
return cnfs
elif type(cnfs) in (NoneType, StringType):
return cnfs
else:
cnf = {}
for c in _flatten(cnfs):
try:
cnf.update(c)
except (AttributeError, TypeError), msg:
print "_cnfmerge: fallback due to:", msg
for k, v in c.items():
cnf[k] = v
return cnf
try: _cnfmerge = _tkinter._cnfmerge
except AttributeError: pass
class Event:
"""Container for the properties of an event.
Instances of this type are generated if one of the following events occurs:
KeyPress, KeyRelease - for keyboard events
ButtonPress, ButtonRelease, Motion, Enter, Leave, MouseWheel - for mouse events
Visibility, Unmap, Map, Expose, FocusIn, FocusOut, Circulate,
Colormap, Gravity, Reparent, Property, Destroy, Activate,
Deactivate - for window events.
If a callback function for one of these events is registered
using bind, bind_all, bind_class, or tag_bind, the callback is
called with an Event as first argument. It will have the
following attributes (in braces are the event types for which
the attribute is valid):
serial - serial number of event
num - mouse button pressed (ButtonPress, ButtonRelease)
focus - whether the window has the focus (Enter, Leave)
height - height of the exposed window (Configure, Expose)
width - width of the exposed window (Configure, Expose)
keycode - keycode of the pressed key (KeyPress, KeyRelease)
state - state of the event as a number (ButtonPress, ButtonRelease,
Enter, KeyPress, KeyRelease,
Leave, Motion)
state - state as a string (Visibility)
time - when the event occurred
x - x-position of the mouse
y - y-position of the mouse
x_root - x-position of the mouse on the screen
(ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion)
y_root - y-position of the mouse on the screen
(ButtonPress, ButtonRelease, KeyPress, KeyRelease, Motion)
char - pressed character (KeyPress, KeyRelease)
send_event - see X/Windows documentation
keysym - keysym of the event as a string (KeyPress, KeyRelease)
keysym_num - keysym of the event as a number (KeyPress, KeyRelease)
type - type of the event as a number
widget - widget in which the event occurred
delta - delta of wheel movement (MouseWheel)
"""
pass
_support_default_root = 1
_default_root = None
def NoDefaultRoot():
"""Inhibit setting of default root window.
Call this function to inhibit that the first instance of
Tk is used for windows without an explicit parent window.
"""
global _support_default_root
_support_default_root = 0
global _default_root
_default_root = None
del _default_root
def _tkerror(err):
"""Internal function."""
pass
def _exit(code=0):
"""Internal function. Calling it will raise the exception SystemExit."""
try:
code = int(code)
except ValueError:
pass
raise SystemExit, code
_varnum = 0
class Variable:
"""Class to define value holders for e.g. buttons.
Subclasses StringVar, IntVar, DoubleVar, BooleanVar are specializations
that constrain the type of the value returned from get()."""
_default = ""
def __init__(self, master=None, value=None, name=None):
"""Construct a variable
MASTER can be given as master widget.
VALUE is an optional value (defaults to "")
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
global _varnum
if not master:
master = _default_root
self._master = master
self._tk = master.tk
if name:
self._name = name
else:
self._name = 'PY_VAR' + repr(_varnum)
_varnum += 1
if value is not None:
self.set(value)
elif not self._tk.call("info", "exists", self._name):
self.set(self._default)
def __del__(self):
"""Unset the variable in Tcl."""
self._tk.globalunsetvar(self._name)
def __str__(self):
"""Return the name of the variable in Tcl."""
return self._name
def set(self, value):
"""Set the variable to VALUE."""
return self._tk.globalsetvar(self._name, value)
def get(self):
"""Return value of variable."""
return self._tk.globalgetvar(self._name)
def trace_variable(self, mode, callback):
"""Define a trace callback for the variable.
MODE is one of "r", "w", "u" for read, write, undefine.
CALLBACK must be a function which is called when
the variable is read, written or undefined.
Return the name of the callback.
"""
cbname = self._master._register(callback)
self._tk.call("trace", "variable", self._name, mode, cbname)
return cbname
trace = trace_variable
def trace_vdelete(self, mode, cbname):
"""Delete the trace callback for a variable.
MODE is one of "r", "w", "u" for read, write, undefine.
CBNAME is the name of the callback returned from trace_variable or trace.
"""
self._tk.call("trace", "vdelete", self._name, mode, cbname)
self._master.deletecommand(cbname)
def trace_vinfo(self):
"""Return all trace callback information."""
return map(self._tk.split, self._tk.splitlist(
self._tk.call("trace", "vinfo", self._name)))
def __eq__(self, other):
"""Comparison for equality (==).
Note: if the Variable's master matters to behavior
also compare self._master == other._master
"""
return self.__class__.__name__ == other.__class__.__name__ \
and self._name == other._name
class StringVar(Variable):
"""Value holder for strings variables."""
_default = ""
def __init__(self, master=None, value=None, name=None):
"""Construct a string variable.
MASTER can be given as master widget.
VALUE is an optional value (defaults to "")
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
Variable.__init__(self, master, value, name)
def get(self):
"""Return value of variable as string."""
value = self._tk.globalgetvar(self._name)
if isinstance(value, basestring):
return value
return str(value)
class IntVar(Variable):
"""Value holder for integer variables."""
_default = 0
def __init__(self, master=None, value=None, name=None):
"""Construct an integer variable.
MASTER can be given as master widget.
VALUE is an optional value (defaults to 0)
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
Variable.__init__(self, master, value, name)
def set(self, value):
"""Set the variable to value, converting booleans to integers."""
if isinstance(value, bool):
value = int(value)
return Variable.set(self, value)
def get(self):
"""Return the value of the variable as an integer."""
return getint(self._tk.globalgetvar(self._name))
class DoubleVar(Variable):
"""Value holder for float variables."""
_default = 0.0
def __init__(self, master=None, value=None, name=None):
"""Construct a float variable.
MASTER can be given as master widget.
VALUE is an optional value (defaults to 0.0)
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
Variable.__init__(self, master, value, name)
def get(self):
"""Return the value of the variable as a float."""
return getdouble(self._tk.globalgetvar(self._name))
class BooleanVar(Variable):
"""Value holder for boolean variables."""
_default = False
def __init__(self, master=None, value=None, name=None):
"""Construct a boolean variable.
MASTER can be given as master widget.
VALUE is an optional value (defaults to False)
NAME is an optional Tcl name (defaults to PY_VARnum).
If NAME matches an existing variable and VALUE is omitted
then the existing value is retained.
"""
Variable.__init__(self, master, value, name)
def get(self):
"""Return the value of the variable as a bool."""
return self._tk.getboolean(self._tk.globalgetvar(self._name))
def mainloop(n=0):
"""Run the main loop of Tcl."""
_default_root.tk.mainloop(n)
getint = int
getdouble = float
def getboolean(s):
"""Convert true and false to integer values 1 and 0."""
return _default_root.tk.getboolean(s)
# Methods defined on both toplevel and interior widgets
class Misc:
"""Internal class.
Base class which defines methods common for interior widgets."""
# XXX font command?
_tclCommands = None
def destroy(self):
"""Internal function.
Delete all Tcl commands created for
this widget in the Tcl interpreter."""
if self._tclCommands is not None:
for name in self._tclCommands:
#print '- Tkinter: deleted command', name
self.tk.deletecommand(name)
self._tclCommands = None
def deletecommand(self, name):
"""Internal function.
Delete the Tcl command provided in NAME."""
#print '- Tkinter: deleted command', name
self.tk.deletecommand(name)
try:
self._tclCommands.remove(name)
except ValueError:
pass
def tk_strictMotif(self, boolean=None):
"""Set Tcl internal variable, whether the look and feel
should adhere to Motif.
A parameter of 1 means adhere to Motif (e.g. no color
change if mouse passes over slider).
Returns the set value."""
return self.tk.getboolean(self.tk.call(
'set', 'tk_strictMotif', boolean))
def tk_bisque(self):
"""Change the color scheme to light brown as used in Tk 3.6 and before."""
self.tk.call('tk_bisque')
def tk_setPalette(self, *args, **kw):
"""Set a new color scheme for all widget elements.
A single color as argument will cause that all colors of Tk
widget elements are derived from this.
Alternatively several keyword parameters and its associated
colors can be given. The following keywords are valid:
activeBackground, foreground, selectColor,
activeForeground, highlightBackground, selectBackground,
background, highlightColor, selectForeground,
disabledForeground, insertBackground, troughColor."""
self.tk.call(('tk_setPalette',)
+ _flatten(args) + _flatten(kw.items()))
def tk_menuBar(self, *args):
"""Do not use. Needed in Tk 3.6 and earlier."""
pass # obsolete since Tk 4.0
def wait_variable(self, name='PY_VAR'):
"""Wait until the variable is modified.
A parameter of type IntVar, StringVar, DoubleVar or
BooleanVar must be given."""
self.tk.call('tkwait', 'variable', name)
waitvar = wait_variable # XXX b/w compat
def wait_window(self, window=None):
"""Wait until a WIDGET is destroyed.
If no parameter is given self is used."""
if window is None:
window = self
self.tk.call('tkwait', 'window', window._w)
def wait_visibility(self, window=None):
"""Wait until the visibility of a WIDGET changes
(e.g. it appears).
If no parameter is given self is used."""
if window is None:
window = self
self.tk.call('tkwait', 'visibility', window._w)
def setvar(self, name='PY_VAR', value='1'):
"""Set Tcl variable NAME to VALUE."""
self.tk.setvar(name, value)
def getvar(self, name='PY_VAR'):
"""Return value of Tcl variable NAME."""
return self.tk.getvar(name)
getint = int
getdouble = float
def getboolean(self, s):
"""Return a boolean value for Tcl boolean values true and false given as parameter."""
return self.tk.getboolean(s)
def focus_set(self):
"""Direct input focus to this widget.
If the application currently does not have the focus
this widget will get the focus if the application gets
the focus through the window manager."""
self.tk.call('focus', self._w)
focus = focus_set # XXX b/w compat?
def focus_force(self):
"""Direct input focus to this widget even if the
application does not have the focus. Use with
caution!"""
self.tk.call('focus', '-force', self._w)
def focus_get(self):
"""Return the widget which has currently the focus in the
application.
Use focus_displayof to allow working with several
displays. Return None if application does not have
the focus."""
name = self.tk.call('focus')
if name == 'none' or not name: return None
return self._nametowidget(name)
def focus_displayof(self):
"""Return the widget which has currently the focus on the
display where this widget is located.
Return None if the application does not have the focus."""
name = self.tk.call('focus', '-displayof', self._w)
if name == 'none' or not name: return None
return self._nametowidget(name)
def focus_lastfor(self):
"""Return the widget which would have the focus if top level
for this widget gets the focus from the window manager."""
name = self.tk.call('focus', '-lastfor', self._w)
if name == 'none' or not name: return None
return self._nametowidget(name)
def tk_focusFollowsMouse(self):
"""The widget under mouse will get automatically focus. Can not
be disabled easily."""
self.tk.call('tk_focusFollowsMouse')
def tk_focusNext(self):
"""Return the next widget in the focus order which follows
widget which has currently the focus.
The focus order first goes to the next child, then to
the children of the child recursively and then to the
next sibling which is higher in the stacking order. A
widget is omitted if it has the takefocus resource set
to 0."""
name = self.tk.call('tk_focusNext', self._w)
if not name: return None
return self._nametowidget(name)
def tk_focusPrev(self):
"""Return previous widget in the focus order. See tk_focusNext for details."""
name = self.tk.call('tk_focusPrev', self._w)
if not name: return None
return self._nametowidget(name)
def after(self, ms, func=None, *args):
"""Call function once after given time.
MS specifies the time in milliseconds. FUNC gives the
function which shall be called. Additional parameters
are given as parameters to the function call. Return
identifier to cancel scheduling with after_cancel."""
if not func:
# I'd rather use time.sleep(ms*0.001)
self.tk.call('after', ms)
else:
def callit():
try:
func(*args)
finally:
try:
self.deletecommand(name)
except TclError:
pass
name = self._register(callit)
return self.tk.call('after', ms, name)
def after_idle(self, func, *args):
"""Call FUNC once if the Tcl main loop has no event to
process.
Return an identifier to cancel the scheduling with
after_cancel."""
return self.after('idle', func, *args)
def after_cancel(self, id):
"""Cancel scheduling of function identified with ID.
Identifier returned by after or after_idle must be
given as first parameter."""
try:
data = self.tk.call('after', 'info', id)
# In Tk 8.3, splitlist returns: (script, type)
# In Tk 8.4, splitlist may return (script, type) or (script,)
script = self.tk.splitlist(data)[0]
self.deletecommand(script)
except TclError:
pass
self.tk.call('after', 'cancel', id)
def bell(self, displayof=0):
"""Ring a display's bell."""
self.tk.call(('bell',) + self._displayof(displayof))
# Clipboard handling:
def clipboard_get(self, **kw):
"""Retrieve data from the clipboard on window's display.
The window keyword defaults to the root window of the Tkinter
application.
The type keyword specifies the form in which the data is
to be returned and should be an atom name such as STRING
or FILE_NAME. Type defaults to STRING, except on X11, where the default
is to try UTF8_STRING and fall back to STRING.
This command is equivalent to:
selection_get(CLIPBOARD)
"""
if 'type' not in kw and self._windowingsystem == 'x11':
try:
kw['type'] = 'UTF8_STRING'
return self.tk.call(('clipboard', 'get') + self._options(kw))
except TclError:
del kw['type']
return self.tk.call(('clipboard', 'get') + self._options(kw))
def clipboard_clear(self, **kw):
"""Clear the data in the Tk clipboard.
A widget specified for the optional displayof keyword
argument specifies the target display."""
if 'displayof' not in kw: kw['displayof'] = self._w
self.tk.call(('clipboard', 'clear') + self._options(kw))
def clipboard_append(self, string, **kw):
"""Append STRING to the Tk clipboard.
A widget specified at the optional displayof keyword
argument specifies the target display. The clipboard
can be retrieved with selection_get."""
if 'displayof' not in kw: kw['displayof'] = self._w
self.tk.call(('clipboard', 'append') + self._options(kw)
+ ('--', string))
# XXX grab current w/o window argument
def grab_current(self):
"""Return widget which has currently the grab in this application
or None."""
name = self.tk.call('grab', 'current', self._w)
if not name: return None
return self._nametowidget(name)
def grab_release(self):
"""Release grab for this widget if currently set."""
self.tk.call('grab', 'release', self._w)
def grab_set(self):
"""Set grab for this widget.
A grab directs all events to this and descendant
widgets in the application."""
self.tk.call('grab', 'set', self._w)
def grab_set_global(self):
"""Set global grab for this widget.
A global grab directs all events to this and
descendant widgets on the display. Use with caution -
other applications do not get events anymore."""
self.tk.call('grab', 'set', '-global', self._w)
def grab_status(self):
"""Return None, "local" or "global" if this widget has
no, a local or a global grab."""
status = self.tk.call('grab', 'status', self._w)
if status == 'none': status = None
return status
def option_add(self, pattern, value, priority = None):
"""Set a VALUE (second parameter) for an option
PATTERN (first parameter).
An optional third parameter gives the numeric priority
(defaults to 80)."""
self.tk.call('option', 'add', pattern, value, priority)
def option_clear(self):
"""Clear the option database.
It will be reloaded if option_add is called."""
self.tk.call('option', 'clear')
def option_get(self, name, className):
"""Return the value for an option NAME for this widget
with CLASSNAME.
Values with higher priority override lower values."""
return self.tk.call('option', 'get', self._w, name, className)
def option_readfile(self, fileName, priority = None):
"""Read file FILENAME into the option database.
An optional second parameter gives the numeric
priority."""
self.tk.call('option', 'readfile', fileName, priority)
def selection_clear(self, **kw):
"""Clear the current X selection."""
if 'displayof' not in kw: kw['displayof'] = self._w
self.tk.call(('selection', 'clear') + self._options(kw))
def selection_get(self, **kw):
"""Return the contents of the current X selection.
A keyword parameter selection specifies the name of
the selection and defaults to PRIMARY. A keyword
parameter displayof specifies a widget on the display
to use. A keyword parameter type specifies the form of data to be
fetched, defaulting to STRING except on X11, where UTF8_STRING is tried
before STRING."""
if 'displayof' not in kw: kw['displayof'] = self._w
if 'type' not in kw and self._windowingsystem == 'x11':
try:
kw['type'] = 'UTF8_STRING'
return self.tk.call(('selection', 'get') + self._options(kw))
except TclError:
del kw['type']
return self.tk.call(('selection', 'get') + self._options(kw))
def selection_handle(self, command, **kw):
"""Specify a function COMMAND to call if the X
selection owned by this widget is queried by another
application.
This function must return the contents of the
selection. The function will be called with the
arguments OFFSET and LENGTH which allows the chunking
of very long selections. The following keyword
parameters can be provided:
selection - name of the selection (default PRIMARY),
type - type of the selection (e.g. STRING, FILE_NAME)."""
name = self._register(command)
self.tk.call(('selection', 'handle') + self._options(kw)
+ (self._w, name))
def selection_own(self, **kw):
"""Become owner of X selection.
A keyword parameter selection specifies the name of
the selection (default PRIMARY)."""
self.tk.call(('selection', 'own') +
self._options(kw) + (self._w,))
def selection_own_get(self, **kw):
"""Return owner of X selection.
The following keyword parameter can
be provided:
selection - name of the selection (default PRIMARY),
type - type of the selection (e.g. STRING, FILE_NAME)."""
if 'displayof' not in kw: kw['displayof'] = self._w
name = self.tk.call(('selection', 'own') + self._options(kw))
if not name: return None
return self._nametowidget(name)
def send(self, interp, cmd, *args):
"""Send Tcl command CMD to different interpreter INTERP to be executed."""
return self.tk.call(('send', interp, cmd) + args)
def lower(self, belowThis=None):
"""Lower this widget in the stacking order."""
self.tk.call('lower', self._w, belowThis)
def tkraise(self, aboveThis=None):
"""Raise this widget in the stacking order."""
self.tk.call('raise', self._w, aboveThis)
lift = tkraise
def colormodel(self, value=None):
"""Useless. Not implemented in Tk."""
return self.tk.call('tk', 'colormodel', self._w, value)
def winfo_atom(self, name, displayof=0):
"""Return integer which represents atom NAME."""
args = ('winfo', 'atom') + self._displayof(displayof) + (name,)
return getint(self.tk.call(args))
def winfo_atomname(self, id, displayof=0):
"""Return name of atom with identifier ID."""
args = ('winfo', 'atomname') \
+ self._displayof(displayof) + (id,)
return self.tk.call(args)
def winfo_cells(self):
"""Return number of cells in the colormap for this widget."""
return getint(
self.tk.call('winfo', 'cells', self._w))
def winfo_children(self):
"""Return a list of all widgets which are children of this widget."""
result = []
for child in self.tk.splitlist(
self.tk.call('winfo', 'children', self._w)):
try:
# Tcl sometimes returns extra windows, e.g. for
# menus; those need to be skipped
result.append(self._nametowidget(child))
except KeyError:
pass
return result
def winfo_class(self):
"""Return window class name of this widget."""
return self.tk.call('winfo', 'class', self._w)
def winfo_colormapfull(self):
"""Return true if at the last color request the colormap was full."""
return self.tk.getboolean(
self.tk.call('winfo', 'colormapfull', self._w))
def winfo_containing(self, rootX, rootY, displayof=0):
"""Return the widget which is at the root coordinates ROOTX, ROOTY."""
args = ('winfo', 'containing') \
+ self._displayof(displayof) + (rootX, rootY)
name = self.tk.call(args)
if not name: return None
return self._nametowidget(name)
def winfo_depth(self):
"""Return the number of bits per pixel."""
return getint(self.tk.call('winfo', 'depth', self._w))
def winfo_exists(self):
"""Return true if this widget exists."""
return getint(
self.tk.call('winfo', 'exists', self._w))
def winfo_fpixels(self, number):
"""Return the number of pixels for the given distance NUMBER
(e.g. "3c") as float."""
return getdouble(self.tk.call(
'winfo', 'fpixels', self._w, number))
def winfo_geometry(self):
"""Return geometry string for this widget in the form "widthxheight+X+Y"."""
return self.tk.call('winfo', 'geometry', self._w)
def winfo_height(self):
"""Return height of this widget."""
return getint(
self.tk.call('winfo', 'height', self._w))
def winfo_id(self):
"""Return identifier ID for this widget."""
return self.tk.getint(
self.tk.call('winfo', 'id', self._w))
def winfo_interps(self, displayof=0):
"""Return the name of all Tcl interpreters for this display."""
args = ('winfo', 'interps') + self._displayof(displayof)
return self.tk.splitlist(self.tk.call(args))
def winfo_ismapped(self):
"""Return true if this widget is mapped."""
return getint(
self.tk.call('winfo', 'ismapped', self._w))
def winfo_manager(self):
"""Return the window mananger name for this widget."""
return self.tk.call('winfo', 'manager', self._w)
def winfo_name(self):
"""Return the name of this widget."""
return self.tk.call('winfo', 'name', self._w)
def winfo_parent(self):
"""Return the name of the parent of this widget."""
return self.tk.call('winfo', 'parent', self._w)
def winfo_pathname(self, id, displayof=0):
"""Return the pathname of the widget given by ID."""
args = ('winfo', 'pathname') \
+ self._displayof(displayof) + (id,)
return self.tk.call(args)
def winfo_pixels(self, number):
"""Rounded integer value of winfo_fpixels."""
return getint(
self.tk.call('winfo', 'pixels', self._w, number))
def winfo_pointerx(self):
"""Return the x coordinate of the pointer on the root window."""
return getint(
self.tk.call('winfo', 'pointerx', self._w))
def winfo_pointerxy(self):
"""Return a tuple of x and y coordinates of the pointer on the root window."""
return self._getints(
self.tk.call('winfo', 'pointerxy', self._w))
def winfo_pointery(self):
"""Return the y coordinate of the pointer on the root window."""
return getint(
self.tk.call('winfo', 'pointery', self._w))
def winfo_reqheight(self):
"""Return requested height of this widget."""
return getint(
self.tk.call('winfo', 'reqheight', self._w))
def winfo_reqwidth(self):
"""Return requested width of this widget."""
return getint(
self.tk.call('winfo', 'reqwidth', self._w))
def winfo_rgb(self, color):
"""Return tuple of decimal values for red, green, blue for
COLOR in this widget."""
return self._getints(
self.tk.call('winfo', 'rgb', self._w, color))
def winfo_rootx(self):
"""Return x coordinate of upper left corner of this widget on the
root window."""
return getint(
self.tk.call('winfo', 'rootx', self._w))
def winfo_rooty(self):
"""Return y coordinate of upper left corner of this widget on the
root window."""
return getint(
self.tk.call('winfo', 'rooty', self._w))
def winfo_screen(self):
"""Return the screen name of this widget."""
return self.tk.call('winfo', 'screen', self._w)
def winfo_screencells(self):
"""Return the number of the cells in the colormap of the screen
of this widget."""
return getint(
self.tk.call('winfo', 'screencells', self._w))
def winfo_screendepth(self):
"""Return the number of bits per pixel of the root window of the
screen of this widget."""
return getint(
self.tk.call('winfo', 'screendepth', self._w))
def winfo_screenheight(self):
"""Return the number of pixels of the height of the screen of this widget
in pixel."""
return getint(
self.tk.call('winfo', 'screenheight', self._w))
def winfo_screenmmheight(self):
"""Return the number of pixels of the height of the screen of
this widget in mm."""
return getint(
self.tk.call('winfo', 'screenmmheight', self._w))
def winfo_screenmmwidth(self):
"""Return the number of pixels of the width of the screen of
this widget in mm."""
return getint(
self.tk.call('winfo', 'screenmmwidth', self._w))
def winfo_screenvisual(self):
"""Return one of the strings directcolor, grayscale, pseudocolor,
staticcolor, staticgray, or truecolor for the default
colormodel of this screen."""
return self.tk.call('winfo', 'screenvisual', self._w)
def winfo_screenwidth(self):
"""Return the number of pixels of the width of the screen of
this widget in pixel."""
return getint(
self.tk.call('winfo', 'screenwidth', self._w))
def winfo_server(self):
"""Return information of the X-Server of the screen of this widget in
the form "XmajorRminor vendor vendorVersion"."""
return self.tk.call('winfo', 'server', self._w)
def winfo_toplevel(self):
"""Return the toplevel widget of this widget."""
return self._nametowidget(self.tk.call(
'winfo', 'toplevel', self._w))
def winfo_viewable(self):
"""Return true if the widget and all its higher ancestors are mapped."""
return getint(
self.tk.call('winfo', 'viewable', self._w))
def winfo_visual(self):
"""Return one of the strings directcolor, grayscale, pseudocolor,
staticcolor, staticgray, or truecolor for the
colormodel of this widget."""
return self.tk.call('winfo', 'visual', self._w)
def winfo_visualid(self):
"""Return the X identifier for the visual for this widget."""
return self.tk.call('winfo', 'visualid', self._w)
def winfo_visualsavailable(self, includeids=0):
"""Return a list of all visuals available for the screen
of this widget.
Each item in the list consists of a visual name (see winfo_visual), a
depth and if INCLUDEIDS=1 is given also the X identifier."""
data = self.tk.split(
self.tk.call('winfo', 'visualsavailable', self._w,
includeids and 'includeids' or None))
if type(data) is StringType:
data = [self.tk.split(data)]
return map(self.__winfo_parseitem, data)
def __winfo_parseitem(self, t):
"""Internal function."""
return t[:1] + tuple(map(self.__winfo_getint, t[1:]))
def __winfo_getint(self, x):
"""Internal function."""
return int(x, 0)
def winfo_vrootheight(self):
"""Return the height of the virtual root window associated with this
widget in pixels. If there is no virtual root window return the
height of the screen."""
return getint(
self.tk.call('winfo', 'vrootheight', self._w))
def winfo_vrootwidth(self):
"""Return the width of the virtual root window associated with this
widget in pixel. If there is no virtual root window return the
width of the screen."""
return getint(
self.tk.call('winfo', 'vrootwidth', self._w))
def winfo_vrootx(self):
"""Return the x offset of the virtual root relative to the root
window of the screen of this widget."""
return getint(
self.tk.call('winfo', 'vrootx', self._w))
def winfo_vrooty(self):
"""Return the y offset of the virtual root relative to the root
window of the screen of this widget."""
return getint(
self.tk.call('winfo', 'vrooty', self._w))
def winfo_width(self):
"""Return the width of this widget."""
return getint(
self.tk.call('winfo', 'width', self._w))
def winfo_x(self):
"""Return the x coordinate of the upper left corner of this widget
in the parent."""
return getint(
self.tk.call('winfo', 'x', self._w))
def winfo_y(self):
"""Return the y coordinate of the upper left corner of this widget
in the parent."""
return getint(
self.tk.call('winfo', 'y', self._w))
def update(self):
"""Enter event loop until all pending events have been processed by Tcl."""
self.tk.call('update')
def update_idletasks(self):
"""Enter event loop until all idle callbacks have been called. This
will update the display of windows but not process events caused by
the user."""
self.tk.call('update', 'idletasks')
def bindtags(self, tagList=None):
"""Set or get the list of bindtags for this widget.
With no argument return the list of all bindtags associated with
this widget. With a list of strings as argument the bindtags are
set to this list. The bindtags determine in which order events are
processed (see bind)."""
if tagList is None:
return self.tk.splitlist(
self.tk.call('bindtags', self._w))
else:
self.tk.call('bindtags', self._w, tagList)
def _bind(self, what, sequence, func, add, needcleanup=1):
"""Internal function."""
if type(func) is StringType:
self.tk.call(what + (sequence, func))
elif func:
funcid = self._register(func, self._substitute,
needcleanup)
cmd = ('%sif {"[%s %s]" == "break"} break\n'
%
(add and '+' or '',
funcid, self._subst_format_str))
self.tk.call(what + (sequence, cmd))
return funcid
elif sequence:
return self.tk.call(what + (sequence,))
else:
return self.tk.splitlist(self.tk.call(what))
def bind(self, sequence=None, func=None, add=None):
"""Bind to this widget at event SEQUENCE a call to function FUNC.
SEQUENCE is a string of concatenated event
patterns. An event pattern is of the form
<MODIFIER-MODIFIER-TYPE-DETAIL> where MODIFIER is one
of Control, Mod2, M2, Shift, Mod3, M3, Lock, Mod4, M4,
Button1, B1, Mod5, M5 Button2, B2, Meta, M, Button3,
B3, Alt, Button4, B4, Double, Button5, B5 Triple,
Mod1, M1. TYPE is one of Activate, Enter, Map,
ButtonPress, Button, Expose, Motion, ButtonRelease
FocusIn, MouseWheel, Circulate, FocusOut, Property,
Colormap, Gravity Reparent, Configure, KeyPress, Key,
Unmap, Deactivate, KeyRelease Visibility, Destroy,
Leave and DETAIL is the button number for ButtonPress,
ButtonRelease and DETAIL is the Keysym for KeyPress and
KeyRelease. Examples are
<Control-Button-1> for pressing Control and mouse button 1 or
<Alt-A> for pressing A and the Alt key (KeyPress can be omitted).
An event pattern can also be a virtual event of the form
<<AString>> where AString can be arbitrary. This
event can be generated by event_generate.
If events are concatenated they must appear shortly
after each other.
FUNC will be called if the event sequence occurs with an
instance of Event as argument. If the return value of FUNC is
"break" no further bound function is invoked.
An additional boolean parameter ADD specifies whether FUNC will
be called additionally to the other bound function or whether
it will replace the previous function.
Bind will return an identifier to allow deletion of the bound function with
unbind without memory leak.
If FUNC or SEQUENCE is omitted the bound function or list
of bound events are returned."""
return self._bind(('bind', self._w), sequence, func, add)
def unbind(self, sequence, funcid=None):
"""Unbind for this widget for event SEQUENCE the
function identified with FUNCID."""
self.tk.call('bind', self._w, sequence, '')
if funcid:
self.deletecommand(funcid)
def bind_all(self, sequence=None, func=None, add=None):
"""Bind to all widgets at an event SEQUENCE a call to function FUNC.
An additional boolean parameter ADD specifies whether FUNC will
be called additionally to the other bound function or whether
it will replace the previous function. See bind for the return value."""
return self._bind(('bind', 'all'), sequence, func, add, 0)
def unbind_all(self, sequence):
"""Unbind for all widgets for event SEQUENCE all functions."""
self.tk.call('bind', 'all' , sequence, '')
def bind_class(self, className, sequence=None, func=None, add=None):
"""Bind to widgets with bindtag CLASSNAME at event
SEQUENCE a call of function FUNC. An additional
boolean parameter ADD specifies whether FUNC will be
called additionally to the other bound function or
whether it will replace the previous function. See bind for
the return value."""
return self._bind(('bind', className), sequence, func, add, 0)
def unbind_class(self, className, sequence):
"""Unbind for a all widgets with bindtag CLASSNAME for event SEQUENCE
all functions."""
self.tk.call('bind', className , sequence, '')
def mainloop(self, n=0):
"""Call the mainloop of Tk."""
self.tk.mainloop(n)
def quit(self):
"""Quit the Tcl interpreter. All widgets will be destroyed."""
self.tk.quit()
def _getints(self, string):
"""Internal function."""
if string:
return tuple(map(getint, self.tk.splitlist(string)))
def _getdoubles(self, string):
"""Internal function."""
if string:
return tuple(map(getdouble, self.tk.splitlist(string)))
def _getboolean(self, string):
"""Internal function."""
if string:
return self.tk.getboolean(string)
def _displayof(self, displayof):
"""Internal function."""
if displayof:
return ('-displayof', displayof)
if displayof is None:
return ('-displayof', self._w)
return ()
@property
def _windowingsystem(self):
"""Internal function."""
try:
return self._root()._windowingsystem_cached
except AttributeError:
ws = self._root()._windowingsystem_cached = \
self.tk.call('tk', 'windowingsystem')
return ws
def _options(self, cnf, kw = None):
"""Internal function."""
if kw:
cnf = _cnfmerge((cnf, kw))
else:
cnf = _cnfmerge(cnf)
res = ()
for k, v in cnf.items():
if v is not None:
if k[-1] == '_': k = k[:-1]
if hasattr(v, '__call__'):
v = self._register(v)
elif isinstance(v, (tuple, list)):
nv = []
for item in v:
if not isinstance(item, (basestring, int)):
break
elif isinstance(item, int):
nv.append('%d' % item)
else:
# format it to proper Tcl code if it contains space
nv.append(_stringify(item))
else:
v = ' '.join(nv)
res = res + ('-'+k, v)
return res
def nametowidget(self, name):
"""Return the Tkinter instance of a widget identified by
its Tcl name NAME."""
name = str(name).split('.')
w = self
if not name[0]:
w = w._root()
name = name[1:]
for n in name:
if not n:
break
w = w.children[n]
return w
_nametowidget = nametowidget
def _register(self, func, subst=None, needcleanup=1):
"""Return a newly created Tcl function. If this
function is called, the Python function FUNC will
be executed. An optional function SUBST can
be given which will be executed before FUNC."""
f = CallWrapper(func, subst, self).__call__
name = repr(id(f))
try:
func = func.im_func
except AttributeError:
pass
try:
name = name + func.__name__
except AttributeError:
pass
self.tk.createcommand(name, f)
if needcleanup:
if self._tclCommands is None:
self._tclCommands = []
self._tclCommands.append(name)
return name
register = _register
def _root(self):
"""Internal function."""
w = self
while w.master: w = w.master
return w
_subst_format = ('%#', '%b', '%f', '%h', '%k',
'%s', '%t', '%w', '%x', '%y',
'%A', '%E', '%K', '%N', '%W', '%T', '%X', '%Y', '%D')
_subst_format_str = " ".join(_subst_format)
def _substitute(self, *args):
"""Internal function."""
if len(args) != len(self._subst_format): return args
getboolean = self.tk.getboolean
getint = int
def getint_event(s):
"""Tk changed behavior in 8.4.2, returning "??" rather more often."""
try:
return int(s)
except ValueError:
return s
nsign, b, f, h, k, s, t, w, x, y, A, E, K, N, W, T, X, Y, D = args
# Missing: (a, c, d, m, o, v, B, R)
e = Event()
# serial field: valid vor all events
# number of button: ButtonPress and ButtonRelease events only
# height field: Configure, ConfigureRequest, Create,
# ResizeRequest, and Expose events only
# keycode field: KeyPress and KeyRelease events only
# time field: "valid for events that contain a time field"
# width field: Configure, ConfigureRequest, Create, ResizeRequest,
# and Expose events only
# x field: "valid for events that contain a x field"
# y field: "valid for events that contain a y field"
# keysym as decimal: KeyPress and KeyRelease events only
# x_root, y_root fields: ButtonPress, ButtonRelease, KeyPress,
# KeyRelease,and Motion events
e.serial = getint(nsign)
e.num = getint_event(b)
try: e.focus = getboolean(f)
except TclError: pass
e.height = getint_event(h)
e.keycode = getint_event(k)
e.state = getint_event(s)
e.time = getint_event(t)
e.width = getint_event(w)
e.x = getint_event(x)
e.y = getint_event(y)
e.char = A
try: e.send_event = getboolean(E)
except TclError: pass
e.keysym = K
e.keysym_num = getint_event(N)
e.type = T
try:
e.widget = self._nametowidget(W)
except KeyError:
e.widget = W
e.x_root = getint_event(X)
e.y_root = getint_event(Y)
try:
e.delta = getint(D)
except ValueError:
e.delta = 0
return (e,)
def _report_exception(self):
"""Internal function."""
import sys
exc, val, tb = sys.exc_type, sys.exc_value, sys.exc_traceback
root = self._root()
root.report_callback_exception(exc, val, tb)
def _configure(self, cmd, cnf, kw):
"""Internal function."""
if kw:
cnf = _cnfmerge((cnf, kw))
elif cnf:
cnf = _cnfmerge(cnf)
if cnf is None:
cnf = {}
for x in self.tk.split(
self.tk.call(_flatten((self._w, cmd)))):
cnf[x[0][1:]] = (x[0][1:],) + x[1:]
return cnf
if type(cnf) is StringType:
x = self.tk.split(
self.tk.call(_flatten((self._w, cmd, '-'+cnf))))
return (x[0][1:],) + x[1:]
self.tk.call(_flatten((self._w, cmd)) + self._options(cnf))
# These used to be defined in Widget:
def configure(self, cnf=None, **kw):
"""Configure resources of a widget.
The values for resources are specified as keyword
arguments. To get an overview about
the allowed keyword arguments call the method keys.
"""
return self._configure('configure', cnf, kw)
config = configure
def cget(self, key):
"""Return the resource value for a KEY given as string."""
return self.tk.call(self._w, 'cget', '-' + key)
__getitem__ = cget
def __setitem__(self, key, value):
self.configure({key: value})
def __contains__(self, key):
raise TypeError("Tkinter objects don't support 'in' tests.")
def keys(self):
"""Return a list of all resource names of this widget."""
return map(lambda x: x[0][1:],
self.tk.split(self.tk.call(self._w, 'configure')))
def __str__(self):
"""Return the window path name of this widget."""
return self._w
# Pack methods that apply to the master
_noarg_ = ['_noarg_']
def pack_propagate(self, flag=_noarg_):
"""Set or get the status for propagation of geometry information.
A boolean argument specifies whether the geometry information
of the slaves will determine the size of this widget. If no argument
is given the current setting will be returned.
"""
if flag is Misc._noarg_:
return self._getboolean(self.tk.call(
'pack', 'propagate', self._w))
else:
self.tk.call('pack', 'propagate', self._w, flag)
propagate = pack_propagate
def pack_slaves(self):
"""Return a list of all slaves of this widget
in its packing order."""
return map(self._nametowidget,
self.tk.splitlist(
self.tk.call('pack', 'slaves', self._w)))
slaves = pack_slaves
# Place method that applies to the master
def place_slaves(self):
"""Return a list of all slaves of this widget
in its packing order."""
return map(self._nametowidget,
self.tk.splitlist(
self.tk.call(
'place', 'slaves', self._w)))
# Grid methods that apply to the master
def grid_bbox(self, column=None, row=None, col2=None, row2=None):
"""Return a tuple of integer coordinates for the bounding
box of this widget controlled by the geometry manager grid.
If COLUMN, ROW is given the bounding box applies from
the cell with row and column 0 to the specified
cell. If COL2 and ROW2 are given the bounding box
starts at that cell.
The returned integers specify the offset of the upper left
corner in the master widget and the width and height.
"""
args = ('grid', 'bbox', self._w)
if column is not None and row is not None:
args = args + (column, row)
if col2 is not None and row2 is not None:
args = args + (col2, row2)
return self._getints(self.tk.call(*args)) or None
bbox = grid_bbox
def _grid_configure(self, command, index, cnf, kw):
"""Internal function."""
if type(cnf) is StringType and not kw:
if cnf[-1:] == '_':
cnf = cnf[:-1]
if cnf[:1] != '-':
cnf = '-'+cnf
options = (cnf,)
else:
options = self._options(cnf, kw)
if not options:
res = self.tk.call('grid',
command, self._w, index)
words = self.tk.splitlist(res)
dict = {}
for i in range(0, len(words), 2):
key = words[i][1:]
value = words[i+1]
if not value:
value = None
elif '.' in value:
value = getdouble(value)
else:
value = getint(value)
dict[key] = value
return dict
res = self.tk.call(
('grid', command, self._w, index)
+ options)
if len(options) == 1:
if not res: return None
# In Tk 7.5, -width can be a float
if '.' in res: return getdouble(res)
return getint(res)
def grid_columnconfigure(self, index, cnf={}, **kw):
"""Configure column INDEX of a grid.
Valid resources are minsize (minimum size of the column),
weight (how much does additional space propagate to this column)
and pad (how much space to let additionally)."""
return self._grid_configure('columnconfigure', index, cnf, kw)
columnconfigure = grid_columnconfigure
def grid_location(self, x, y):
"""Return a tuple of column and row which identify the cell
at which the pixel at position X and Y inside the master
widget is located."""
return self._getints(
self.tk.call(
'grid', 'location', self._w, x, y)) or None
def grid_propagate(self, flag=_noarg_):
"""Set or get the status for propagation of geometry information.
A boolean argument specifies whether the geometry information
of the slaves will determine the size of this widget. If no argument
is given, the current setting will be returned.
"""
if flag is Misc._noarg_:
return self._getboolean(self.tk.call(
'grid', 'propagate', self._w))
else:
self.tk.call('grid', 'propagate', self._w, flag)
def grid_rowconfigure(self, index, cnf={}, **kw):
"""Configure row INDEX of a grid.
Valid resources are minsize (minimum size of the row),
weight (how much does additional space propagate to this row)
and pad (how much space to let additionally)."""
return self._grid_configure('rowconfigure', index, cnf, kw)
rowconfigure = grid_rowconfigure
def grid_size(self):
"""Return a tuple of the number of column and rows in the grid."""
return self._getints(
self.tk.call('grid', 'size', self._w)) or None
size = grid_size
def grid_slaves(self, row=None, column=None):
"""Return a list of all slaves of this widget
in its packing order."""
args = ()
if row is not None:
args = args + ('-row', row)
if column is not None:
args = args + ('-column', column)
return map(self._nametowidget,
self.tk.splitlist(self.tk.call(
('grid', 'slaves', self._w) + args)))
# Support for the "event" command, new in Tk 4.2.
# By Case Roole.
def event_add(self, virtual, *sequences):
"""Bind a virtual event VIRTUAL (of the form <<Name>>)
to an event SEQUENCE such that the virtual event is triggered
whenever SEQUENCE occurs."""
args = ('event', 'add', virtual) + sequences
self.tk.call(args)
def event_delete(self, virtual, *sequences):
"""Unbind a virtual event VIRTUAL from SEQUENCE."""
args = ('event', 'delete', virtual) + sequences
self.tk.call(args)
def event_generate(self, sequence, **kw):
"""Generate an event SEQUENCE. Additional
keyword arguments specify parameter of the event
(e.g. x, y, rootx, rooty)."""
args = ('event', 'generate', self._w, sequence)
for k, v in kw.items():
args = args + ('-%s' % k, str(v))
self.tk.call(args)
def event_info(self, virtual=None):
"""Return a list of all virtual events or the information
about the SEQUENCE bound to the virtual event VIRTUAL."""
return self.tk.splitlist(
self.tk.call('event', 'info', virtual))
# Image related commands
def image_names(self):
"""Return a list of all existing image names."""
return self.tk.call('image', 'names')
def image_types(self):
"""Return a list of all available image types (e.g. phote bitmap)."""
return self.tk.call('image', 'types')
class CallWrapper:
"""Internal class. Stores function to call when some user
defined Tcl function is called e.g. after an event occurred."""
def __init__(self, func, subst, widget):
"""Store FUNC, SUBST and WIDGET as members."""
self.func = func
self.subst = subst
self.widget = widget
def __call__(self, *args):
"""Apply first function SUBST to arguments, than FUNC."""
try:
if self.subst:
args = self.subst(*args)
return self.func(*args)
except SystemExit, msg:
raise SystemExit, msg
except:
self.widget._report_exception()
class XView:
"""Mix-in class for querying and changing the horizontal position
of a widget's window."""
def xview(self, *args):
"""Query and change the horizontal position of the view."""
res = self.tk.call(self._w, 'xview', *args)
if not args:
return self._getdoubles(res)
def xview_moveto(self, fraction):
"""Adjusts the view in the window so that FRACTION of the
total width of the canvas is off-screen to the left."""
self.tk.call(self._w, 'xview', 'moveto', fraction)
def xview_scroll(self, number, what):
"""Shift the x-view according to NUMBER which is measured in "units"
or "pages" (WHAT)."""
self.tk.call(self._w, 'xview', 'scroll', number, what)
class YView:
"""Mix-in class for querying and changing the vertical position
of a widget's window."""
def yview(self, *args):
"""Query and change the vertical position of the view."""
res = self.tk.call(self._w, 'yview', *args)
if not args:
return self._getdoubles(res)
def yview_moveto(self, fraction):
"""Adjusts the view in the window so that FRACTION of the
total height of the canvas is off-screen to the top."""
self.tk.call(self._w, 'yview', 'moveto', fraction)
def yview_scroll(self, number, what):
"""Shift the y-view according to NUMBER which is measured in
"units" or "pages" (WHAT)."""
self.tk.call(self._w, 'yview', 'scroll', number, what)
class Wm:
"""Provides functions for the communication with the window manager."""
def wm_aspect(self,
minNumer=None, minDenom=None,
maxNumer=None, maxDenom=None):
"""Instruct the window manager to set the aspect ratio (width/height)
of this widget to be between MINNUMER/MINDENOM and MAXNUMER/MAXDENOM. Return a tuple
of the actual values if no argument is given."""
return self._getints(
self.tk.call('wm', 'aspect', self._w,
minNumer, minDenom,
maxNumer, maxDenom))
aspect = wm_aspect
def wm_attributes(self, *args):
"""This subcommand returns or sets platform specific attributes
The first form returns a list of the platform specific flags and
their values. The second form returns the value for the specific
option. The third form sets one or more of the values. The values
are as follows:
On Windows, -disabled gets or sets whether the window is in a
disabled state. -toolwindow gets or sets the style of the window
to toolwindow (as defined in the MSDN). -topmost gets or sets
whether this is a topmost window (displays above all other
windows).
On Macintosh, XXXXX
On Unix, there are currently no special attribute values.
"""
args = ('wm', 'attributes', self._w) + args
return self.tk.call(args)
attributes=wm_attributes
def wm_client(self, name=None):
"""Store NAME in WM_CLIENT_MACHINE property of this widget. Return
current value."""
return self.tk.call('wm', 'client', self._w, name)
client = wm_client
def wm_colormapwindows(self, *wlist):
"""Store list of window names (WLIST) into WM_COLORMAPWINDOWS property
of this widget. This list contains windows whose colormaps differ from their
parents. Return current list of widgets if WLIST is empty."""
if len(wlist) > 1:
wlist = (wlist,) # Tk needs a list of windows here
args = ('wm', 'colormapwindows', self._w) + wlist
return map(self._nametowidget, self.tk.call(args))
colormapwindows = wm_colormapwindows
def wm_command(self, value=None):
"""Store VALUE in WM_COMMAND property. It is the command
which shall be used to invoke the application. Return current
command if VALUE is None."""
return self.tk.call('wm', 'command', self._w, value)
command = wm_command
def wm_deiconify(self):
"""Deiconify this widget. If it was never mapped it will not be mapped.
On Windows it will raise this widget and give it the focus."""
return self.tk.call('wm', 'deiconify', self._w)
deiconify = wm_deiconify
def wm_focusmodel(self, model=None):
"""Set focus model to MODEL. "active" means that this widget will claim
the focus itself, "passive" means that the window manager shall give
the focus. Return current focus model if MODEL is None."""
return self.tk.call('wm', 'focusmodel', self._w, model)
focusmodel = wm_focusmodel
def wm_frame(self):
"""Return identifier for decorative frame of this widget if present."""
return self.tk.call('wm', 'frame', self._w)
frame = wm_frame
def wm_geometry(self, newGeometry=None):
"""Set geometry to NEWGEOMETRY of the form =widthxheight+x+y. Return
current value if None is given."""
return self.tk.call('wm', 'geometry', self._w, newGeometry)
geometry = wm_geometry
def wm_grid(self,
baseWidth=None, baseHeight=None,
widthInc=None, heightInc=None):
"""Instruct the window manager that this widget shall only be
resized on grid boundaries. WIDTHINC and HEIGHTINC are the width and
height of a grid unit in pixels. BASEWIDTH and BASEHEIGHT are the
number of grid units requested in Tk_GeometryRequest."""
return self._getints(self.tk.call(
'wm', 'grid', self._w,
baseWidth, baseHeight, widthInc, heightInc))
grid = wm_grid
def wm_group(self, pathName=None):
"""Set the group leader widgets for related widgets to PATHNAME. Return
the group leader of this widget if None is given."""
return self.tk.call('wm', 'group', self._w, pathName)
group = wm_group
def wm_iconbitmap(self, bitmap=None, default=None):
"""Set bitmap for the iconified widget to BITMAP. Return
the bitmap if None is given.
Under Windows, the DEFAULT parameter can be used to set the icon
for the widget and any descendents that don't have an icon set
explicitly. DEFAULT can be the relative path to a .ico file
(example: root.iconbitmap(default='myicon.ico') ). See Tk
documentation for more information."""
if default:
return self.tk.call('wm', 'iconbitmap', self._w, '-default', default)
else:
return self.tk.call('wm', 'iconbitmap', self._w, bitmap)
iconbitmap = wm_iconbitmap
def wm_iconify(self):
"""Display widget as icon."""
return self.tk.call('wm', 'iconify', self._w)
iconify = wm_iconify
def wm_iconmask(self, bitmap=None):
"""Set mask for the icon bitmap of this widget. Return the
mask if None is given."""
return self.tk.call('wm', 'iconmask', self._w, bitmap)
iconmask = wm_iconmask
def wm_iconname(self, newName=None):
"""Set the name of the icon for this widget. Return the name if
None is given."""
return self.tk.call('wm', 'iconname', self._w, newName)
iconname = wm_iconname
def wm_iconposition(self, x=None, y=None):
"""Set the position of the icon of this widget to X and Y. Return
a tuple of the current values of X and X if None is given."""
return self._getints(self.tk.call(
'wm', 'iconposition', self._w, x, y))
iconposition = wm_iconposition
def wm_iconwindow(self, pathName=None):
"""Set widget PATHNAME to be displayed instead of icon. Return the current
value if None is given."""
return self.tk.call('wm', 'iconwindow', self._w, pathName)
iconwindow = wm_iconwindow
def wm_maxsize(self, width=None, height=None):
"""Set max WIDTH and HEIGHT for this widget. If the window is gridded
the values are given in grid units. Return the current values if None
is given."""
return self._getints(self.tk.call(
'wm', 'maxsize', self._w, width, height))
maxsize = wm_maxsize
def wm_minsize(self, width=None, height=None):
"""Set min WIDTH and HEIGHT for this widget. If the window is gridded
the values are given in grid units. Return the current values if None
is given."""
return self._getints(self.tk.call(
'wm', 'minsize', self._w, width, height))
minsize = wm_minsize
def wm_overrideredirect(self, boolean=None):
"""Instruct the window manager to ignore this widget
if BOOLEAN is given with 1. Return the current value if None
is given."""
return self._getboolean(self.tk.call(
'wm', 'overrideredirect', self._w, boolean))
overrideredirect = wm_overrideredirect
def wm_positionfrom(self, who=None):
"""Instruct the window manager that the position of this widget shall
be defined by the user if WHO is "user", and by its own policy if WHO is
"program"."""
return self.tk.call('wm', 'positionfrom', self._w, who)
positionfrom = wm_positionfrom
def wm_protocol(self, name=None, func=None):
"""Bind function FUNC to command NAME for this widget.
Return the function bound to NAME if None is given. NAME could be
e.g. "WM_SAVE_YOURSELF" or "WM_DELETE_WINDOW"."""
if hasattr(func, '__call__'):
command = self._register(func)
else:
command = func
return self.tk.call(
'wm', 'protocol', self._w, name, command)
protocol = wm_protocol
def wm_resizable(self, width=None, height=None):
"""Instruct the window manager whether this width can be resized
in WIDTH or HEIGHT. Both values are boolean values."""
return self.tk.call('wm', 'resizable', self._w, width, height)
resizable = wm_resizable
def wm_sizefrom(self, who=None):
"""Instruct the window manager that the size of this widget shall
be defined by the user if WHO is "user", and by its own policy if WHO is
"program"."""
return self.tk.call('wm', 'sizefrom', self._w, who)
sizefrom = wm_sizefrom
def wm_state(self, newstate=None):
"""Query or set the state of this widget as one of normal, icon,
iconic (see wm_iconwindow), withdrawn, or zoomed (Windows only)."""
return self.tk.call('wm', 'state', self._w, newstate)
state = wm_state
def wm_title(self, string=None):
"""Set the title of this widget."""
return self.tk.call('wm', 'title', self._w, string)
title = wm_title
def wm_transient(self, master=None):
"""Instruct the window manager that this widget is transient
with regard to widget MASTER."""
return self.tk.call('wm', 'transient', self._w, master)
transient = wm_transient
def wm_withdraw(self):
"""Withdraw this widget from the screen such that it is unmapped
and forgotten by the window manager. Re-draw it with wm_deiconify."""
return self.tk.call('wm', 'withdraw', self._w)
withdraw = wm_withdraw
class Tk(Misc, Wm):
"""Toplevel widget of Tk which represents mostly the main window
of an application. It has an associated Tcl interpreter."""
_w = '.'
def __init__(self, screenName=None, baseName=None, className='Tk',
useTk=1, sync=0, use=None):
"""Return a new Toplevel widget on screen SCREENNAME. A new Tcl interpreter will
be created. BASENAME will be used for the identification of the profile file (see
readprofile).
It is constructed from sys.argv[0] without extensions if None is given. CLASSNAME
is the name of the widget class."""
self.master = None
self.children = {}
self._tkloaded = 0
# to avoid recursions in the getattr code in case of failure, we
# ensure that self.tk is always _something_.
self.tk = None
if baseName is None:
import sys, os
baseName = os.path.basename(sys.argv[0])
baseName, ext = os.path.splitext(baseName)
if ext not in ('.py', '.pyc', '.pyo'):
baseName = baseName + ext
interactive = 0
self.tk = _tkinter.create(screenName, baseName, className, interactive, wantobjects, useTk, sync, use)
if useTk:
self._loadtk()
if not sys.flags.ignore_environment:
# Issue #16248: Honor the -E flag to avoid code injection.
self.readprofile(baseName, className)
def loadtk(self):
if not self._tkloaded:
self.tk.loadtk()
self._loadtk()
def _loadtk(self):
self._tkloaded = 1
global _default_root
# Version sanity checks
tk_version = self.tk.getvar('tk_version')
if tk_version != _tkinter.TK_VERSION:
raise RuntimeError, \
"tk.h version (%s) doesn't match libtk.a version (%s)" \
% (_tkinter.TK_VERSION, tk_version)
# Under unknown circumstances, tcl_version gets coerced to float
tcl_version = str(self.tk.getvar('tcl_version'))
if tcl_version != _tkinter.TCL_VERSION:
raise RuntimeError, \
"tcl.h version (%s) doesn't match libtcl.a version (%s)" \
% (_tkinter.TCL_VERSION, tcl_version)
if TkVersion < 4.0:
raise RuntimeError, \
"Tk 4.0 or higher is required; found Tk %s" \
% str(TkVersion)
# Create and register the tkerror and exit commands
# We need to inline parts of _register here, _ register
# would register differently-named commands.
if self._tclCommands is None:
self._tclCommands = []
self.tk.createcommand('tkerror', _tkerror)
self.tk.createcommand('exit', _exit)
self._tclCommands.append('tkerror')
self._tclCommands.append('exit')
if _support_default_root and not _default_root:
_default_root = self
self.protocol("WM_DELETE_WINDOW", self.destroy)
def destroy(self):
"""Destroy this and all descendants widgets. This will
end the application of this Tcl interpreter."""
for c in self.children.values(): c.destroy()
self.tk.call('destroy', self._w)
Misc.destroy(self)
global _default_root
if _support_default_root and _default_root is self:
_default_root = None
def readprofile(self, baseName, className):
"""Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into
the Tcl Interpreter and calls execfile on BASENAME.py and CLASSNAME.py if
such a file exists in the home directory."""
import os
if 'HOME' in os.environ: home = os.environ['HOME']
else: home = os.curdir
class_tcl = os.path.join(home, '.%s.tcl' % className)
class_py = os.path.join(home, '.%s.py' % className)
base_tcl = os.path.join(home, '.%s.tcl' % baseName)
base_py = os.path.join(home, '.%s.py' % baseName)
dir = {'self': self}
exec 'from Tkinter import *' in dir
if os.path.isfile(class_tcl):
self.tk.call('source', class_tcl)
if os.path.isfile(class_py):
execfile(class_py, dir)
if os.path.isfile(base_tcl):
self.tk.call('source', base_tcl)
if os.path.isfile(base_py):
execfile(base_py, dir)
def report_callback_exception(self, exc, val, tb):
"""Internal function. It reports exception on sys.stderr."""
import traceback, sys
sys.stderr.write("Exception in Tkinter callback\n")
sys.last_type = exc
sys.last_value = val
sys.last_traceback = tb
traceback.print_exception(exc, val, tb)
def __getattr__(self, attr):
"Delegate attribute access to the interpreter object"
return getattr(self.tk, attr)
# Ideally, the classes Pack, Place and Grid disappear, the
# pack/place/grid methods are defined on the Widget class, and
# everybody uses w.pack_whatever(...) instead of Pack.whatever(w,
# ...), with pack(), place() and grid() being short for
# pack_configure(), place_configure() and grid_columnconfigure(), and
# forget() being short for pack_forget(). As a practical matter, I'm
# afraid that there is too much code out there that may be using the
# Pack, Place or Grid class, so I leave them intact -- but only as
# backwards compatibility features. Also note that those methods that
# take a master as argument (e.g. pack_propagate) have been moved to
# the Misc class (which now incorporates all methods common between
# toplevel and interior widgets). Again, for compatibility, these are
# copied into the Pack, Place or Grid class.
def Tcl(screenName=None, baseName=None, className='Tk', useTk=0):
return Tk(screenName, baseName, className, useTk)
class Pack:
"""Geometry manager Pack.
Base class to use the methods pack_* in every widget."""
def pack_configure(self, cnf={}, **kw):
"""Pack a widget in the parent widget. Use as options:
after=widget - pack it after you have packed widget
anchor=NSEW (or subset) - position widget according to
given direction
before=widget - pack it before you will pack widget
expand=bool - expand widget if parent size grows
fill=NONE or X or Y or BOTH - fill widget if widget grows
in=master - use master to contain this widget
in_=master - see 'in' option description
ipadx=amount - add internal padding in x direction
ipady=amount - add internal padding in y direction
padx=amount - add padding in x direction
pady=amount - add padding in y direction
side=TOP or BOTTOM or LEFT or RIGHT - where to add this widget.
"""
self.tk.call(
('pack', 'configure', self._w)
+ self._options(cnf, kw))
pack = configure = config = pack_configure
def pack_forget(self):
"""Unmap this widget and do not use it for the packing order."""
self.tk.call('pack', 'forget', self._w)
forget = pack_forget
def pack_info(self):
"""Return information about the packing options
for this widget."""
words = self.tk.splitlist(
self.tk.call('pack', 'info', self._w))
dict = {}
for i in range(0, len(words), 2):
key = words[i][1:]
value = words[i+1]
if value[:1] == '.':
value = self._nametowidget(value)
dict[key] = value
return dict
info = pack_info
propagate = pack_propagate = Misc.pack_propagate
slaves = pack_slaves = Misc.pack_slaves
class Place:
"""Geometry manager Place.
Base class to use the methods place_* in every widget."""
def place_configure(self, cnf={}, **kw):
"""Place a widget in the parent widget. Use as options:
in=master - master relative to which the widget is placed
in_=master - see 'in' option description
x=amount - locate anchor of this widget at position x of master
y=amount - locate anchor of this widget at position y of master
relx=amount - locate anchor of this widget between 0.0 and 1.0
relative to width of master (1.0 is right edge)
rely=amount - locate anchor of this widget between 0.0 and 1.0
relative to height of master (1.0 is bottom edge)
anchor=NSEW (or subset) - position anchor according to given direction
width=amount - width of this widget in pixel
height=amount - height of this widget in pixel
relwidth=amount - width of this widget between 0.0 and 1.0
relative to width of master (1.0 is the same width
as the master)
relheight=amount - height of this widget between 0.0 and 1.0
relative to height of master (1.0 is the same
height as the master)
bordermode="inside" or "outside" - whether to take border width of
master widget into account
"""
self.tk.call(
('place', 'configure', self._w)
+ self._options(cnf, kw))
place = configure = config = place_configure
def place_forget(self):
"""Unmap this widget."""
self.tk.call('place', 'forget', self._w)
forget = place_forget
def place_info(self):
"""Return information about the placing options
for this widget."""
words = self.tk.splitlist(
self.tk.call('place', 'info', self._w))
dict = {}
for i in range(0, len(words), 2):
key = words[i][1:]
value = words[i+1]
if value[:1] == '.':
value = self._nametowidget(value)
dict[key] = value
return dict
info = place_info
slaves = place_slaves = Misc.place_slaves
class Grid:
"""Geometry manager Grid.
Base class to use the methods grid_* in every widget."""
# Thanks to Masazumi Yoshikawa ([email protected])
def grid_configure(self, cnf={}, **kw):
"""Position a widget in the parent widget in a grid. Use as options:
column=number - use cell identified with given column (starting with 0)
columnspan=number - this widget will span several columns
in=master - use master to contain this widget
in_=master - see 'in' option description
ipadx=amount - add internal padding in x direction
ipady=amount - add internal padding in y direction
padx=amount - add padding in x direction
pady=amount - add padding in y direction
row=number - use cell identified with given row (starting with 0)
rowspan=number - this widget will span several rows
sticky=NSEW - if cell is larger on which sides will this
widget stick to the cell boundary
"""
self.tk.call(
('grid', 'configure', self._w)
+ self._options(cnf, kw))
grid = configure = config = grid_configure
bbox = grid_bbox = Misc.grid_bbox
columnconfigure = grid_columnconfigure = Misc.grid_columnconfigure
def grid_forget(self):
"""Unmap this widget."""
self.tk.call('grid', 'forget', self._w)
forget = grid_forget
def grid_remove(self):
"""Unmap this widget but remember the grid options."""
self.tk.call('grid', 'remove', self._w)
def grid_info(self):
"""Return information about the options
for positioning this widget in a grid."""
words = self.tk.splitlist(
self.tk.call('grid', 'info', self._w))
dict = {}
for i in range(0, len(words), 2):
key = words[i][1:]
value = words[i+1]
if value[:1] == '.':
value = self._nametowidget(value)
dict[key] = value
return dict
info = grid_info
location = grid_location = Misc.grid_location
propagate = grid_propagate = Misc.grid_propagate
rowconfigure = grid_rowconfigure = Misc.grid_rowconfigure
size = grid_size = Misc.grid_size
slaves = grid_slaves = Misc.grid_slaves
class BaseWidget(Misc):
"""Internal class."""
def _setup(self, master, cnf):
"""Internal function. Sets up information about children."""
if _support_default_root:
global _default_root
if not master:
if not _default_root:
_default_root = Tk()
master = _default_root
self.master = master
self.tk = master.tk
name = None
if 'name' in cnf:
name = cnf['name']
del cnf['name']
if not name:
name = repr(id(self))
self._name = name
if master._w=='.':
self._w = '.' + name
else:
self._w = master._w + '.' + name
self.children = {}
if self._name in self.master.children:
self.master.children[self._name].destroy()
self.master.children[self._name] = self
def __init__(self, master, widgetName, cnf={}, kw={}, extra=()):
"""Construct a widget with the parent widget MASTER, a name WIDGETNAME
and appropriate options."""
if kw:
cnf = _cnfmerge((cnf, kw))
self.widgetName = widgetName
BaseWidget._setup(self, master, cnf)
if self._tclCommands is None:
self._tclCommands = []
classes = []
for k in cnf.keys():
if type(k) is ClassType:
classes.append((k, cnf[k]))
del cnf[k]
self.tk.call(
(widgetName, self._w) + extra + self._options(cnf))
for k, v in classes:
k.configure(self, v)
def destroy(self):
"""Destroy this and all descendants widgets."""
for c in self.children.values(): c.destroy()
self.tk.call('destroy', self._w)
if self._name in self.master.children:
del self.master.children[self._name]
Misc.destroy(self)
def _do(self, name, args=()):
# XXX Obsolete -- better use self.tk.call directly!
return self.tk.call((self._w, name) + args)
class Widget(BaseWidget, Pack, Place, Grid):
"""Internal class.
Base class for a widget which can be positioned with the geometry managers
Pack, Place or Grid."""
pass
class Toplevel(BaseWidget, Wm):
"""Toplevel widget, e.g. for dialogs."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a toplevel widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, class,
colormap, container, cursor, height, highlightbackground,
highlightcolor, highlightthickness, menu, relief, screen, takefocus,
use, visual, width."""
if kw:
cnf = _cnfmerge((cnf, kw))
extra = ()
for wmkey in ['screen', 'class_', 'class', 'visual',
'colormap']:
if wmkey in cnf:
val = cnf[wmkey]
# TBD: a hack needed because some keys
# are not valid as keyword arguments
if wmkey[-1] == '_': opt = '-'+wmkey[:-1]
else: opt = '-'+wmkey
extra = extra + (opt, val)
del cnf[wmkey]
BaseWidget.__init__(self, master, 'toplevel', cnf, {}, extra)
root = self._root()
self.iconname(root.iconname())
self.title(root.title())
self.protocol("WM_DELETE_WINDOW", self.destroy)
class Button(Widget):
"""Button widget."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a button widget with the parent MASTER.
STANDARD OPTIONS
activebackground, activeforeground, anchor,
background, bitmap, borderwidth, cursor,
disabledforeground, font, foreground
highlightbackground, highlightcolor,
highlightthickness, image, justify,
padx, pady, relief, repeatdelay,
repeatinterval, takefocus, text,
textvariable, underline, wraplength
WIDGET-SPECIFIC OPTIONS
command, compound, default, height,
overrelief, state, width
"""
Widget.__init__(self, master, 'button', cnf, kw)
def tkButtonEnter(self, *dummy):
self.tk.call('tkButtonEnter', self._w)
def tkButtonLeave(self, *dummy):
self.tk.call('tkButtonLeave', self._w)
def tkButtonDown(self, *dummy):
self.tk.call('tkButtonDown', self._w)
def tkButtonUp(self, *dummy):
self.tk.call('tkButtonUp', self._w)
def tkButtonInvoke(self, *dummy):
self.tk.call('tkButtonInvoke', self._w)
def flash(self):
"""Flash the button.
This is accomplished by redisplaying
the button several times, alternating between active and
normal colors. At the end of the flash the button is left
in the same normal/active state as when the command was
invoked. This command is ignored if the button's state is
disabled.
"""
self.tk.call(self._w, 'flash')
def invoke(self):
"""Invoke the command associated with the button.
The return value is the return value from the command,
or an empty string if there is no command associated with
the button. This command is ignored if the button's state
is disabled.
"""
return self.tk.call(self._w, 'invoke')
# Indices:
# XXX I don't like these -- take them away
def AtEnd():
return 'end'
def AtInsert(*args):
s = 'insert'
for a in args:
if a: s = s + (' ' + a)
return s
def AtSelFirst():
return 'sel.first'
def AtSelLast():
return 'sel.last'
def At(x, y=None):
if y is None:
return '@%r' % (x,)
else:
return '@%r,%r' % (x, y)
class Canvas(Widget, XView, YView):
"""Canvas widget to display graphical elements like lines or text."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a canvas widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, closeenough,
confine, cursor, height, highlightbackground, highlightcolor,
highlightthickness, insertbackground, insertborderwidth,
insertofftime, insertontime, insertwidth, offset, relief,
scrollregion, selectbackground, selectborderwidth, selectforeground,
state, takefocus, width, xscrollcommand, xscrollincrement,
yscrollcommand, yscrollincrement."""
Widget.__init__(self, master, 'canvas', cnf, kw)
def addtag(self, *args):
"""Internal function."""
self.tk.call((self._w, 'addtag') + args)
def addtag_above(self, newtag, tagOrId):
"""Add tag NEWTAG to all items above TAGORID."""
self.addtag(newtag, 'above', tagOrId)
def addtag_all(self, newtag):
"""Add tag NEWTAG to all items."""
self.addtag(newtag, 'all')
def addtag_below(self, newtag, tagOrId):
"""Add tag NEWTAG to all items below TAGORID."""
self.addtag(newtag, 'below', tagOrId)
def addtag_closest(self, newtag, x, y, halo=None, start=None):
"""Add tag NEWTAG to item which is closest to pixel at X, Y.
If several match take the top-most.
All items closer than HALO are considered overlapping (all are
closests). If START is specified the next below this tag is taken."""
self.addtag(newtag, 'closest', x, y, halo, start)
def addtag_enclosed(self, newtag, x1, y1, x2, y2):
"""Add tag NEWTAG to all items in the rectangle defined
by X1,Y1,X2,Y2."""
self.addtag(newtag, 'enclosed', x1, y1, x2, y2)
def addtag_overlapping(self, newtag, x1, y1, x2, y2):
"""Add tag NEWTAG to all items which overlap the rectangle
defined by X1,Y1,X2,Y2."""
self.addtag(newtag, 'overlapping', x1, y1, x2, y2)
def addtag_withtag(self, newtag, tagOrId):
"""Add tag NEWTAG to all items with TAGORID."""
self.addtag(newtag, 'withtag', tagOrId)
def bbox(self, *args):
"""Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle
which encloses all items with tags specified as arguments."""
return self._getints(
self.tk.call((self._w, 'bbox') + args)) or None
def tag_unbind(self, tagOrId, sequence, funcid=None):
"""Unbind for all items with TAGORID for event SEQUENCE the
function identified with FUNCID."""
self.tk.call(self._w, 'bind', tagOrId, sequence, '')
if funcid:
self.deletecommand(funcid)
def tag_bind(self, tagOrId, sequence=None, func=None, add=None):
"""Bind to all items with TAGORID at event SEQUENCE a call to function FUNC.
An additional boolean parameter ADD specifies whether FUNC will be
called additionally to the other bound function or whether it will
replace the previous function. See bind for the return value."""
return self._bind((self._w, 'bind', tagOrId),
sequence, func, add)
def canvasx(self, screenx, gridspacing=None):
"""Return the canvas x coordinate of pixel position SCREENX rounded
to nearest multiple of GRIDSPACING units."""
return getdouble(self.tk.call(
self._w, 'canvasx', screenx, gridspacing))
def canvasy(self, screeny, gridspacing=None):
"""Return the canvas y coordinate of pixel position SCREENY rounded
to nearest multiple of GRIDSPACING units."""
return getdouble(self.tk.call(
self._w, 'canvasy', screeny, gridspacing))
def coords(self, *args):
"""Return a list of coordinates for the item given in ARGS."""
# XXX Should use _flatten on args
return map(getdouble,
self.tk.splitlist(
self.tk.call((self._w, 'coords') + args)))
def _create(self, itemType, args, kw): # Args: (val, val, ..., cnf={})
"""Internal function."""
args = _flatten(args)
cnf = args[-1]
if type(cnf) in (DictionaryType, TupleType):
args = args[:-1]
else:
cnf = {}
return getint(self.tk.call(
self._w, 'create', itemType,
*(args + self._options(cnf, kw))))
def create_arc(self, *args, **kw):
"""Create arc shaped region with coordinates x1,y1,x2,y2."""
return self._create('arc', args, kw)
def create_bitmap(self, *args, **kw):
"""Create bitmap with coordinates x1,y1."""
return self._create('bitmap', args, kw)
def create_image(self, *args, **kw):
"""Create image item with coordinates x1,y1."""
return self._create('image', args, kw)
def create_line(self, *args, **kw):
"""Create line with coordinates x1,y1,...,xn,yn."""
return self._create('line', args, kw)
def create_oval(self, *args, **kw):
"""Create oval with coordinates x1,y1,x2,y2."""
return self._create('oval', args, kw)
def create_polygon(self, *args, **kw):
"""Create polygon with coordinates x1,y1,...,xn,yn."""
return self._create('polygon', args, kw)
def create_rectangle(self, *args, **kw):
"""Create rectangle with coordinates x1,y1,x2,y2."""
return self._create('rectangle', args, kw)
def create_text(self, *args, **kw):
"""Create text with coordinates x1,y1."""
return self._create('text', args, kw)
def create_window(self, *args, **kw):
"""Create window with coordinates x1,y1,x2,y2."""
return self._create('window', args, kw)
def dchars(self, *args):
"""Delete characters of text items identified by tag or id in ARGS (possibly
several times) from FIRST to LAST character (including)."""
self.tk.call((self._w, 'dchars') + args)
def delete(self, *args):
"""Delete items identified by all tag or ids contained in ARGS."""
self.tk.call((self._w, 'delete') + args)
def dtag(self, *args):
"""Delete tag or id given as last arguments in ARGS from items
identified by first argument in ARGS."""
self.tk.call((self._w, 'dtag') + args)
def find(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'find') + args)) or ()
def find_above(self, tagOrId):
"""Return items above TAGORID."""
return self.find('above', tagOrId)
def find_all(self):
"""Return all items."""
return self.find('all')
def find_below(self, tagOrId):
"""Return all items below TAGORID."""
return self.find('below', tagOrId)
def find_closest(self, x, y, halo=None, start=None):
"""Return item which is closest to pixel at X, Y.
If several match take the top-most.
All items closer than HALO are considered overlapping (all are
closests). If START is specified the next below this tag is taken."""
return self.find('closest', x, y, halo, start)
def find_enclosed(self, x1, y1, x2, y2):
"""Return all items in rectangle defined
by X1,Y1,X2,Y2."""
return self.find('enclosed', x1, y1, x2, y2)
def find_overlapping(self, x1, y1, x2, y2):
"""Return all items which overlap the rectangle
defined by X1,Y1,X2,Y2."""
return self.find('overlapping', x1, y1, x2, y2)
def find_withtag(self, tagOrId):
"""Return all items with TAGORID."""
return self.find('withtag', tagOrId)
def focus(self, *args):
"""Set focus to the first item specified in ARGS."""
return self.tk.call((self._w, 'focus') + args)
def gettags(self, *args):
"""Return tags associated with the first item specified in ARGS."""
return self.tk.splitlist(
self.tk.call((self._w, 'gettags') + args))
def icursor(self, *args):
"""Set cursor at position POS in the item identified by TAGORID.
In ARGS TAGORID must be first."""
self.tk.call((self._w, 'icursor') + args)
def index(self, *args):
"""Return position of cursor as integer in item specified in ARGS."""
return getint(self.tk.call((self._w, 'index') + args))
def insert(self, *args):
"""Insert TEXT in item TAGORID at position POS. ARGS must
be TAGORID POS TEXT."""
self.tk.call((self._w, 'insert') + args)
def itemcget(self, tagOrId, option):
"""Return the resource value for an OPTION for item TAGORID."""
return self.tk.call(
(self._w, 'itemcget') + (tagOrId, '-'+option))
def itemconfigure(self, tagOrId, cnf=None, **kw):
"""Configure resources of an item TAGORID.
The values for resources are specified as keyword
arguments. To get an overview about
the allowed keyword arguments call the method without arguments.
"""
return self._configure(('itemconfigure', tagOrId), cnf, kw)
itemconfig = itemconfigure
# lower, tkraise/lift hide Misc.lower, Misc.tkraise/lift,
# so the preferred name for them is tag_lower, tag_raise
# (similar to tag_bind, and similar to the Text widget);
# unfortunately can't delete the old ones yet (maybe in 1.6)
def tag_lower(self, *args):
"""Lower an item TAGORID given in ARGS
(optional below another item)."""
self.tk.call((self._w, 'lower') + args)
lower = tag_lower
def move(self, *args):
"""Move an item TAGORID given in ARGS."""
self.tk.call((self._w, 'move') + args)
def postscript(self, cnf={}, **kw):
"""Print the contents of the canvas to a postscript
file. Valid options: colormap, colormode, file, fontmap,
height, pageanchor, pageheight, pagewidth, pagex, pagey,
rotate, witdh, x, y."""
return self.tk.call((self._w, 'postscript') +
self._options(cnf, kw))
def tag_raise(self, *args):
"""Raise an item TAGORID given in ARGS
(optional above another item)."""
self.tk.call((self._w, 'raise') + args)
lift = tkraise = tag_raise
def scale(self, *args):
"""Scale item TAGORID with XORIGIN, YORIGIN, XSCALE, YSCALE."""
self.tk.call((self._w, 'scale') + args)
def scan_mark(self, x, y):
"""Remember the current X, Y coordinates."""
self.tk.call(self._w, 'scan', 'mark', x, y)
def scan_dragto(self, x, y, gain=10):
"""Adjust the view of the canvas to GAIN times the
difference between X and Y and the coordinates given in
scan_mark."""
self.tk.call(self._w, 'scan', 'dragto', x, y, gain)
def select_adjust(self, tagOrId, index):
"""Adjust the end of the selection near the cursor of an item TAGORID to index."""
self.tk.call(self._w, 'select', 'adjust', tagOrId, index)
def select_clear(self):
"""Clear the selection if it is in this widget."""
self.tk.call(self._w, 'select', 'clear')
def select_from(self, tagOrId, index):
"""Set the fixed end of a selection in item TAGORID to INDEX."""
self.tk.call(self._w, 'select', 'from', tagOrId, index)
def select_item(self):
"""Return the item which has the selection."""
return self.tk.call(self._w, 'select', 'item') or None
def select_to(self, tagOrId, index):
"""Set the variable end of a selection in item TAGORID to INDEX."""
self.tk.call(self._w, 'select', 'to', tagOrId, index)
def type(self, tagOrId):
"""Return the type of the item TAGORID."""
return self.tk.call(self._w, 'type', tagOrId) or None
class Checkbutton(Widget):
"""Checkbutton widget which is either in on- or off-state."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a checkbutton widget with the parent MASTER.
Valid resource names: activebackground, activeforeground, anchor,
background, bd, bg, bitmap, borderwidth, command, cursor,
disabledforeground, fg, font, foreground, height,
highlightbackground, highlightcolor, highlightthickness, image,
indicatoron, justify, offvalue, onvalue, padx, pady, relief,
selectcolor, selectimage, state, takefocus, text, textvariable,
underline, variable, width, wraplength."""
Widget.__init__(self, master, 'checkbutton', cnf, kw)
def deselect(self):
"""Put the button in off-state."""
self.tk.call(self._w, 'deselect')
def flash(self):
"""Flash the button."""
self.tk.call(self._w, 'flash')
def invoke(self):
"""Toggle the button and invoke a command if given as resource."""
return self.tk.call(self._w, 'invoke')
def select(self):
"""Put the button in on-state."""
self.tk.call(self._w, 'select')
def toggle(self):
"""Toggle the button."""
self.tk.call(self._w, 'toggle')
class Entry(Widget, XView):
"""Entry widget which allows to display simple text."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct an entry widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, cursor,
exportselection, fg, font, foreground, highlightbackground,
highlightcolor, highlightthickness, insertbackground,
insertborderwidth, insertofftime, insertontime, insertwidth,
invalidcommand, invcmd, justify, relief, selectbackground,
selectborderwidth, selectforeground, show, state, takefocus,
textvariable, validate, validatecommand, vcmd, width,
xscrollcommand."""
Widget.__init__(self, master, 'entry', cnf, kw)
def delete(self, first, last=None):
"""Delete text from FIRST to LAST (not included)."""
self.tk.call(self._w, 'delete', first, last)
def get(self):
"""Return the text."""
return self.tk.call(self._w, 'get')
def icursor(self, index):
"""Insert cursor at INDEX."""
self.tk.call(self._w, 'icursor', index)
def index(self, index):
"""Return position of cursor."""
return getint(self.tk.call(
self._w, 'index', index))
def insert(self, index, string):
"""Insert STRING at INDEX."""
self.tk.call(self._w, 'insert', index, string)
def scan_mark(self, x):
"""Remember the current X, Y coordinates."""
self.tk.call(self._w, 'scan', 'mark', x)
def scan_dragto(self, x):
"""Adjust the view of the canvas to 10 times the
difference between X and Y and the coordinates given in
scan_mark."""
self.tk.call(self._w, 'scan', 'dragto', x)
def selection_adjust(self, index):
"""Adjust the end of the selection near the cursor to INDEX."""
self.tk.call(self._w, 'selection', 'adjust', index)
select_adjust = selection_adjust
def selection_clear(self):
"""Clear the selection if it is in this widget."""
self.tk.call(self._w, 'selection', 'clear')
select_clear = selection_clear
def selection_from(self, index):
"""Set the fixed end of a selection to INDEX."""
self.tk.call(self._w, 'selection', 'from', index)
select_from = selection_from
def selection_present(self):
"""Return True if there are characters selected in the entry, False
otherwise."""
return self.tk.getboolean(
self.tk.call(self._w, 'selection', 'present'))
select_present = selection_present
def selection_range(self, start, end):
"""Set the selection from START to END (not included)."""
self.tk.call(self._w, 'selection', 'range', start, end)
select_range = selection_range
def selection_to(self, index):
"""Set the variable end of a selection to INDEX."""
self.tk.call(self._w, 'selection', 'to', index)
select_to = selection_to
class Frame(Widget):
"""Frame widget which may contain other widgets and can have a 3D border."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a frame widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, class,
colormap, container, cursor, height, highlightbackground,
highlightcolor, highlightthickness, relief, takefocus, visual, width."""
cnf = _cnfmerge((cnf, kw))
extra = ()
if 'class_' in cnf:
extra = ('-class', cnf['class_'])
del cnf['class_']
elif 'class' in cnf:
extra = ('-class', cnf['class'])
del cnf['class']
Widget.__init__(self, master, 'frame', cnf, {}, extra)
class Label(Widget):
"""Label widget which can display text and bitmaps."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a label widget with the parent MASTER.
STANDARD OPTIONS
activebackground, activeforeground, anchor,
background, bitmap, borderwidth, cursor,
disabledforeground, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, image, justify,
padx, pady, relief, takefocus, text,
textvariable, underline, wraplength
WIDGET-SPECIFIC OPTIONS
height, state, width
"""
Widget.__init__(self, master, 'label', cnf, kw)
class Listbox(Widget, XView, YView):
"""Listbox widget which can display a list of strings."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a listbox widget with the parent MASTER.
Valid resource names: background, bd, bg, borderwidth, cursor,
exportselection, fg, font, foreground, height, highlightbackground,
highlightcolor, highlightthickness, relief, selectbackground,
selectborderwidth, selectforeground, selectmode, setgrid, takefocus,
width, xscrollcommand, yscrollcommand, listvariable."""
Widget.__init__(self, master, 'listbox', cnf, kw)
def activate(self, index):
"""Activate item identified by INDEX."""
self.tk.call(self._w, 'activate', index)
def bbox(self, *args):
"""Return a tuple of X1,Y1,X2,Y2 coordinates for a rectangle
which encloses the item identified by index in ARGS."""
return self._getints(
self.tk.call((self._w, 'bbox') + args)) or None
def curselection(self):
"""Return list of indices of currently selected item."""
# XXX Ought to apply self._getints()...
return self.tk.splitlist(self.tk.call(
self._w, 'curselection'))
def delete(self, first, last=None):
"""Delete items from FIRST to LAST (not included)."""
self.tk.call(self._w, 'delete', first, last)
def get(self, first, last=None):
"""Get list of items from FIRST to LAST (not included)."""
if last:
return self.tk.splitlist(self.tk.call(
self._w, 'get', first, last))
else:
return self.tk.call(self._w, 'get', first)
def index(self, index):
"""Return index of item identified with INDEX."""
i = self.tk.call(self._w, 'index', index)
if i == 'none': return None
return getint(i)
def insert(self, index, *elements):
"""Insert ELEMENTS at INDEX."""
self.tk.call((self._w, 'insert', index) + elements)
def nearest(self, y):
"""Get index of item which is nearest to y coordinate Y."""
return getint(self.tk.call(
self._w, 'nearest', y))
def scan_mark(self, x, y):
"""Remember the current X, Y coordinates."""
self.tk.call(self._w, 'scan', 'mark', x, y)
def scan_dragto(self, x, y):
"""Adjust the view of the listbox to 10 times the
difference between X and Y and the coordinates given in
scan_mark."""
self.tk.call(self._w, 'scan', 'dragto', x, y)
def see(self, index):
"""Scroll such that INDEX is visible."""
self.tk.call(self._w, 'see', index)
def selection_anchor(self, index):
"""Set the fixed end oft the selection to INDEX."""
self.tk.call(self._w, 'selection', 'anchor', index)
select_anchor = selection_anchor
def selection_clear(self, first, last=None):
"""Clear the selection from FIRST to LAST (not included)."""
self.tk.call(self._w,
'selection', 'clear', first, last)
select_clear = selection_clear
def selection_includes(self, index):
"""Return 1 if INDEX is part of the selection."""
return self.tk.getboolean(self.tk.call(
self._w, 'selection', 'includes', index))
select_includes = selection_includes
def selection_set(self, first, last=None):
"""Set the selection from FIRST to LAST (not included) without
changing the currently selected elements."""
self.tk.call(self._w, 'selection', 'set', first, last)
select_set = selection_set
def size(self):
"""Return the number of elements in the listbox."""
return getint(self.tk.call(self._w, 'size'))
def itemcget(self, index, option):
"""Return the resource value for an ITEM and an OPTION."""
return self.tk.call(
(self._w, 'itemcget') + (index, '-'+option))
def itemconfigure(self, index, cnf=None, **kw):
"""Configure resources of an ITEM.
The values for resources are specified as keyword arguments.
To get an overview about the allowed keyword arguments
call the method without arguments.
Valid resource names: background, bg, foreground, fg,
selectbackground, selectforeground."""
return self._configure(('itemconfigure', index), cnf, kw)
itemconfig = itemconfigure
class Menu(Widget):
"""Menu widget which allows to display menu bars, pull-down menus and pop-up menus."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct menu widget with the parent MASTER.
Valid resource names: activebackground, activeborderwidth,
activeforeground, background, bd, bg, borderwidth, cursor,
disabledforeground, fg, font, foreground, postcommand, relief,
selectcolor, takefocus, tearoff, tearoffcommand, title, type."""
Widget.__init__(self, master, 'menu', cnf, kw)
def tk_bindForTraversal(self):
pass # obsolete since Tk 4.0
def tk_mbPost(self):
self.tk.call('tk_mbPost', self._w)
def tk_mbUnpost(self):
self.tk.call('tk_mbUnpost')
def tk_traverseToMenu(self, char):
self.tk.call('tk_traverseToMenu', self._w, char)
def tk_traverseWithinMenu(self, char):
self.tk.call('tk_traverseWithinMenu', self._w, char)
def tk_getMenuButtons(self):
return self.tk.call('tk_getMenuButtons', self._w)
def tk_nextMenu(self, count):
self.tk.call('tk_nextMenu', count)
def tk_nextMenuEntry(self, count):
self.tk.call('tk_nextMenuEntry', count)
def tk_invokeMenu(self):
self.tk.call('tk_invokeMenu', self._w)
def tk_firstMenu(self):
self.tk.call('tk_firstMenu', self._w)
def tk_mbButtonDown(self):
self.tk.call('tk_mbButtonDown', self._w)
def tk_popup(self, x, y, entry=""):
"""Post the menu at position X,Y with entry ENTRY."""
self.tk.call('tk_popup', self._w, x, y, entry)
def activate(self, index):
"""Activate entry at INDEX."""
self.tk.call(self._w, 'activate', index)
def add(self, itemType, cnf={}, **kw):
"""Internal function."""
self.tk.call((self._w, 'add', itemType) +
self._options(cnf, kw))
def add_cascade(self, cnf={}, **kw):
"""Add hierarchical menu item."""
self.add('cascade', cnf or kw)
def add_checkbutton(self, cnf={}, **kw):
"""Add checkbutton menu item."""
self.add('checkbutton', cnf or kw)
def add_command(self, cnf={}, **kw):
"""Add command menu item."""
self.add('command', cnf or kw)
def add_radiobutton(self, cnf={}, **kw):
"""Addd radio menu item."""
self.add('radiobutton', cnf or kw)
def add_separator(self, cnf={}, **kw):
"""Add separator."""
self.add('separator', cnf or kw)
def insert(self, index, itemType, cnf={}, **kw):
"""Internal function."""
self.tk.call((self._w, 'insert', index, itemType) +
self._options(cnf, kw))
def insert_cascade(self, index, cnf={}, **kw):
"""Add hierarchical menu item at INDEX."""
self.insert(index, 'cascade', cnf or kw)
def insert_checkbutton(self, index, cnf={}, **kw):
"""Add checkbutton menu item at INDEX."""
self.insert(index, 'checkbutton', cnf or kw)
def insert_command(self, index, cnf={}, **kw):
"""Add command menu item at INDEX."""
self.insert(index, 'command', cnf or kw)
def insert_radiobutton(self, index, cnf={}, **kw):
"""Addd radio menu item at INDEX."""
self.insert(index, 'radiobutton', cnf or kw)
def insert_separator(self, index, cnf={}, **kw):
"""Add separator at INDEX."""
self.insert(index, 'separator', cnf or kw)
def delete(self, index1, index2=None):
"""Delete menu items between INDEX1 and INDEX2 (included)."""
if index2 is None:
index2 = index1
num_index1, num_index2 = self.index(index1), self.index(index2)
if (num_index1 is None) or (num_index2 is None):
num_index1, num_index2 = 0, -1
for i in range(num_index1, num_index2 + 1):
if 'command' in self.entryconfig(i):
c = str(self.entrycget(i, 'command'))
if c:
self.deletecommand(c)
self.tk.call(self._w, 'delete', index1, index2)
def entrycget(self, index, option):
"""Return the resource value of an menu item for OPTION at INDEX."""
return self.tk.call(self._w, 'entrycget', index, '-' + option)
def entryconfigure(self, index, cnf=None, **kw):
"""Configure a menu item at INDEX."""
return self._configure(('entryconfigure', index), cnf, kw)
entryconfig = entryconfigure
def index(self, index):
"""Return the index of a menu item identified by INDEX."""
i = self.tk.call(self._w, 'index', index)
if i == 'none': return None
return getint(i)
def invoke(self, index):
"""Invoke a menu item identified by INDEX and execute
the associated command."""
return self.tk.call(self._w, 'invoke', index)
def post(self, x, y):
"""Display a menu at position X,Y."""
self.tk.call(self._w, 'post', x, y)
def type(self, index):
"""Return the type of the menu item at INDEX."""
return self.tk.call(self._w, 'type', index)
def unpost(self):
"""Unmap a menu."""
self.tk.call(self._w, 'unpost')
def yposition(self, index):
"""Return the y-position of the topmost pixel of the menu item at INDEX."""
return getint(self.tk.call(
self._w, 'yposition', index))
class Menubutton(Widget):
"""Menubutton widget, obsolete since Tk8.0."""
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, 'menubutton', cnf, kw)
class Message(Widget):
"""Message widget to display multiline text. Obsolete since Label does it too."""
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, 'message', cnf, kw)
class Radiobutton(Widget):
"""Radiobutton widget which shows only one of several buttons in on-state."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a radiobutton widget with the parent MASTER.
Valid resource names: activebackground, activeforeground, anchor,
background, bd, bg, bitmap, borderwidth, command, cursor,
disabledforeground, fg, font, foreground, height,
highlightbackground, highlightcolor, highlightthickness, image,
indicatoron, justify, padx, pady, relief, selectcolor, selectimage,
state, takefocus, text, textvariable, underline, value, variable,
width, wraplength."""
Widget.__init__(self, master, 'radiobutton', cnf, kw)
def deselect(self):
"""Put the button in off-state."""
self.tk.call(self._w, 'deselect')
def flash(self):
"""Flash the button."""
self.tk.call(self._w, 'flash')
def invoke(self):
"""Toggle the button and invoke a command if given as resource."""
return self.tk.call(self._w, 'invoke')
def select(self):
"""Put the button in on-state."""
self.tk.call(self._w, 'select')
class Scale(Widget):
"""Scale widget which can display a numerical scale."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a scale widget with the parent MASTER.
Valid resource names: activebackground, background, bigincrement, bd,
bg, borderwidth, command, cursor, digits, fg, font, foreground, from,
highlightbackground, highlightcolor, highlightthickness, label,
length, orient, relief, repeatdelay, repeatinterval, resolution,
showvalue, sliderlength, sliderrelief, state, takefocus,
tickinterval, to, troughcolor, variable, width."""
Widget.__init__(self, master, 'scale', cnf, kw)
def get(self):
"""Get the current value as integer or float."""
value = self.tk.call(self._w, 'get')
try:
return getint(value)
except ValueError:
return getdouble(value)
def set(self, value):
"""Set the value to VALUE."""
self.tk.call(self._w, 'set', value)
def coords(self, value=None):
"""Return a tuple (X,Y) of the point along the centerline of the
trough that corresponds to VALUE or the current value if None is
given."""
return self._getints(self.tk.call(self._w, 'coords', value))
def identify(self, x, y):
"""Return where the point X,Y lies. Valid return values are "slider",
"though1" and "though2"."""
return self.tk.call(self._w, 'identify', x, y)
class Scrollbar(Widget):
"""Scrollbar widget which displays a slider at a certain position."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a scrollbar widget with the parent MASTER.
Valid resource names: activebackground, activerelief,
background, bd, bg, borderwidth, command, cursor,
elementborderwidth, highlightbackground,
highlightcolor, highlightthickness, jump, orient,
relief, repeatdelay, repeatinterval, takefocus,
troughcolor, width."""
Widget.__init__(self, master, 'scrollbar', cnf, kw)
def activate(self, index):
"""Display the element at INDEX with activebackground and activerelief.
INDEX can be "arrow1","slider" or "arrow2"."""
self.tk.call(self._w, 'activate', index)
def delta(self, deltax, deltay):
"""Return the fractional change of the scrollbar setting if it
would be moved by DELTAX or DELTAY pixels."""
return getdouble(
self.tk.call(self._w, 'delta', deltax, deltay))
def fraction(self, x, y):
"""Return the fractional value which corresponds to a slider
position of X,Y."""
return getdouble(self.tk.call(self._w, 'fraction', x, y))
def identify(self, x, y):
"""Return the element under position X,Y as one of
"arrow1","slider","arrow2" or ""."""
return self.tk.call(self._w, 'identify', x, y)
def get(self):
"""Return the current fractional values (upper and lower end)
of the slider position."""
return self._getdoubles(self.tk.call(self._w, 'get'))
def set(self, *args):
"""Set the fractional values of the slider position (upper and
lower ends as value between 0 and 1)."""
self.tk.call((self._w, 'set') + args)
class Text(Widget, XView, YView):
"""Text widget which can display text in various forms."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a text widget with the parent MASTER.
STANDARD OPTIONS
background, borderwidth, cursor,
exportselection, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, insertbackground,
insertborderwidth, insertofftime,
insertontime, insertwidth, padx, pady,
relief, selectbackground,
selectborderwidth, selectforeground,
setgrid, takefocus,
xscrollcommand, yscrollcommand,
WIDGET-SPECIFIC OPTIONS
autoseparators, height, maxundo,
spacing1, spacing2, spacing3,
state, tabs, undo, width, wrap,
"""
Widget.__init__(self, master, 'text', cnf, kw)
def bbox(self, *args):
"""Return a tuple of (x,y,width,height) which gives the bounding
box of the visible part of the character at the index in ARGS."""
return self._getints(
self.tk.call((self._w, 'bbox') + args)) or None
def tk_textSelectTo(self, index):
self.tk.call('tk_textSelectTo', self._w, index)
def tk_textBackspace(self):
self.tk.call('tk_textBackspace', self._w)
def tk_textIndexCloser(self, a, b, c):
self.tk.call('tk_textIndexCloser', self._w, a, b, c)
def tk_textResetAnchor(self, index):
self.tk.call('tk_textResetAnchor', self._w, index)
def compare(self, index1, op, index2):
"""Return whether between index INDEX1 and index INDEX2 the
relation OP is satisfied. OP is one of <, <=, ==, >=, >, or !=."""
return self.tk.getboolean(self.tk.call(
self._w, 'compare', index1, op, index2))
def debug(self, boolean=None):
"""Turn on the internal consistency checks of the B-Tree inside the text
widget according to BOOLEAN."""
return self.tk.getboolean(self.tk.call(
self._w, 'debug', boolean))
def delete(self, index1, index2=None):
"""Delete the characters between INDEX1 and INDEX2 (not included)."""
self.tk.call(self._w, 'delete', index1, index2)
def dlineinfo(self, index):
"""Return tuple (x,y,width,height,baseline) giving the bounding box
and baseline position of the visible part of the line containing
the character at INDEX."""
return self._getints(self.tk.call(self._w, 'dlineinfo', index))
def dump(self, index1, index2=None, command=None, **kw):
"""Return the contents of the widget between index1 and index2.
The type of contents returned in filtered based on the keyword
parameters; if 'all', 'image', 'mark', 'tag', 'text', or 'window' are
given and true, then the corresponding items are returned. The result
is a list of triples of the form (key, value, index). If none of the
keywords are true then 'all' is used by default.
If the 'command' argument is given, it is called once for each element
of the list of triples, with the values of each triple serving as the
arguments to the function. In this case the list is not returned."""
args = []
func_name = None
result = None
if not command:
# Never call the dump command without the -command flag, since the
# output could involve Tcl quoting and would be a pain to parse
# right. Instead just set the command to build a list of triples
# as if we had done the parsing.
result = []
def append_triple(key, value, index, result=result):
result.append((key, value, index))
command = append_triple
try:
if not isinstance(command, str):
func_name = command = self._register(command)
args += ["-command", command]
for key in kw:
if kw[key]: args.append("-" + key)
args.append(index1)
if index2:
args.append(index2)
self.tk.call(self._w, "dump", *args)
return result
finally:
if func_name:
self.deletecommand(func_name)
## new in tk8.4
def edit(self, *args):
"""Internal method
This method controls the undo mechanism and
the modified flag. The exact behavior of the
command depends on the option argument that
follows the edit argument. The following forms
of the command are currently supported:
edit_modified, edit_redo, edit_reset, edit_separator
and edit_undo
"""
return self.tk.call(self._w, 'edit', *args)
def edit_modified(self, arg=None):
"""Get or Set the modified flag
If arg is not specified, returns the modified
flag of the widget. The insert, delete, edit undo and
edit redo commands or the user can set or clear the
modified flag. If boolean is specified, sets the
modified flag of the widget to arg.
"""
return self.edit("modified", arg)
def edit_redo(self):
"""Redo the last undone edit
When the undo option is true, reapplies the last
undone edits provided no other edits were done since
then. Generates an error when the redo stack is empty.
Does nothing when the undo option is false.
"""
return self.edit("redo")
def edit_reset(self):
"""Clears the undo and redo stacks
"""
return self.edit("reset")
def edit_separator(self):
"""Inserts a separator (boundary) on the undo stack.
Does nothing when the undo option is false
"""
return self.edit("separator")
def edit_undo(self):
"""Undoes the last edit action
If the undo option is true. An edit action is defined
as all the insert and delete commands that are recorded
on the undo stack in between two separators. Generates
an error when the undo stack is empty. Does nothing
when the undo option is false
"""
return self.edit("undo")
def get(self, index1, index2=None):
"""Return the text from INDEX1 to INDEX2 (not included)."""
return self.tk.call(self._w, 'get', index1, index2)
# (Image commands are new in 8.0)
def image_cget(self, index, option):
"""Return the value of OPTION of an embedded image at INDEX."""
if option[:1] != "-":
option = "-" + option
if option[-1:] == "_":
option = option[:-1]
return self.tk.call(self._w, "image", "cget", index, option)
def image_configure(self, index, cnf=None, **kw):
"""Configure an embedded image at INDEX."""
return self._configure(('image', 'configure', index), cnf, kw)
def image_create(self, index, cnf={}, **kw):
"""Create an embedded image at INDEX."""
return self.tk.call(
self._w, "image", "create", index,
*self._options(cnf, kw))
def image_names(self):
"""Return all names of embedded images in this widget."""
return self.tk.call(self._w, "image", "names")
def index(self, index):
"""Return the index in the form line.char for INDEX."""
return str(self.tk.call(self._w, 'index', index))
def insert(self, index, chars, *args):
"""Insert CHARS before the characters at INDEX. An additional
tag can be given in ARGS. Additional CHARS and tags can follow in ARGS."""
self.tk.call((self._w, 'insert', index, chars) + args)
def mark_gravity(self, markName, direction=None):
"""Change the gravity of a mark MARKNAME to DIRECTION (LEFT or RIGHT).
Return the current value if None is given for DIRECTION."""
return self.tk.call(
(self._w, 'mark', 'gravity', markName, direction))
def mark_names(self):
"""Return all mark names."""
return self.tk.splitlist(self.tk.call(
self._w, 'mark', 'names'))
def mark_set(self, markName, index):
"""Set mark MARKNAME before the character at INDEX."""
self.tk.call(self._w, 'mark', 'set', markName, index)
def mark_unset(self, *markNames):
"""Delete all marks in MARKNAMES."""
self.tk.call((self._w, 'mark', 'unset') + markNames)
def mark_next(self, index):
"""Return the name of the next mark after INDEX."""
return self.tk.call(self._w, 'mark', 'next', index) or None
def mark_previous(self, index):
"""Return the name of the previous mark before INDEX."""
return self.tk.call(self._w, 'mark', 'previous', index) or None
def scan_mark(self, x, y):
"""Remember the current X, Y coordinates."""
self.tk.call(self._w, 'scan', 'mark', x, y)
def scan_dragto(self, x, y):
"""Adjust the view of the text to 10 times the
difference between X and Y and the coordinates given in
scan_mark."""
self.tk.call(self._w, 'scan', 'dragto', x, y)
def search(self, pattern, index, stopindex=None,
forwards=None, backwards=None, exact=None,
regexp=None, nocase=None, count=None, elide=None):
"""Search PATTERN beginning from INDEX until STOPINDEX.
Return the index of the first character of a match or an
empty string."""
args = [self._w, 'search']
if forwards: args.append('-forwards')
if backwards: args.append('-backwards')
if exact: args.append('-exact')
if regexp: args.append('-regexp')
if nocase: args.append('-nocase')
if elide: args.append('-elide')
if count: args.append('-count'); args.append(count)
if pattern and pattern[0] == '-': args.append('--')
args.append(pattern)
args.append(index)
if stopindex: args.append(stopindex)
return str(self.tk.call(tuple(args)))
def see(self, index):
"""Scroll such that the character at INDEX is visible."""
self.tk.call(self._w, 'see', index)
def tag_add(self, tagName, index1, *args):
"""Add tag TAGNAME to all characters between INDEX1 and index2 in ARGS.
Additional pairs of indices may follow in ARGS."""
self.tk.call(
(self._w, 'tag', 'add', tagName, index1) + args)
def tag_unbind(self, tagName, sequence, funcid=None):
"""Unbind for all characters with TAGNAME for event SEQUENCE the
function identified with FUNCID."""
self.tk.call(self._w, 'tag', 'bind', tagName, sequence, '')
if funcid:
self.deletecommand(funcid)
def tag_bind(self, tagName, sequence, func, add=None):
"""Bind to all characters with TAGNAME at event SEQUENCE a call to function FUNC.
An additional boolean parameter ADD specifies whether FUNC will be
called additionally to the other bound function or whether it will
replace the previous function. See bind for the return value."""
return self._bind((self._w, 'tag', 'bind', tagName),
sequence, func, add)
def tag_cget(self, tagName, option):
"""Return the value of OPTION for tag TAGNAME."""
if option[:1] != '-':
option = '-' + option
if option[-1:] == '_':
option = option[:-1]
return self.tk.call(self._w, 'tag', 'cget', tagName, option)
def tag_configure(self, tagName, cnf=None, **kw):
"""Configure a tag TAGNAME."""
return self._configure(('tag', 'configure', tagName), cnf, kw)
tag_config = tag_configure
def tag_delete(self, *tagNames):
"""Delete all tags in TAGNAMES."""
self.tk.call((self._w, 'tag', 'delete') + tagNames)
def tag_lower(self, tagName, belowThis=None):
"""Change the priority of tag TAGNAME such that it is lower
than the priority of BELOWTHIS."""
self.tk.call(self._w, 'tag', 'lower', tagName, belowThis)
def tag_names(self, index=None):
"""Return a list of all tag names."""
return self.tk.splitlist(
self.tk.call(self._w, 'tag', 'names', index))
def tag_nextrange(self, tagName, index1, index2=None):
"""Return a list of start and end index for the first sequence of
characters between INDEX1 and INDEX2 which all have tag TAGNAME.
The text is searched forward from INDEX1."""
return self.tk.splitlist(self.tk.call(
self._w, 'tag', 'nextrange', tagName, index1, index2))
def tag_prevrange(self, tagName, index1, index2=None):
"""Return a list of start and end index for the first sequence of
characters between INDEX1 and INDEX2 which all have tag TAGNAME.
The text is searched backwards from INDEX1."""
return self.tk.splitlist(self.tk.call(
self._w, 'tag', 'prevrange', tagName, index1, index2))
def tag_raise(self, tagName, aboveThis=None):
"""Change the priority of tag TAGNAME such that it is higher
than the priority of ABOVETHIS."""
self.tk.call(
self._w, 'tag', 'raise', tagName, aboveThis)
def tag_ranges(self, tagName):
"""Return a list of ranges of text which have tag TAGNAME."""
return self.tk.splitlist(self.tk.call(
self._w, 'tag', 'ranges', tagName))
def tag_remove(self, tagName, index1, index2=None):
"""Remove tag TAGNAME from all characters between INDEX1 and INDEX2."""
self.tk.call(
self._w, 'tag', 'remove', tagName, index1, index2)
def window_cget(self, index, option):
"""Return the value of OPTION of an embedded window at INDEX."""
if option[:1] != '-':
option = '-' + option
if option[-1:] == '_':
option = option[:-1]
return self.tk.call(self._w, 'window', 'cget', index, option)
def window_configure(self, index, cnf=None, **kw):
"""Configure an embedded window at INDEX."""
return self._configure(('window', 'configure', index), cnf, kw)
window_config = window_configure
def window_create(self, index, cnf={}, **kw):
"""Create a window at INDEX."""
self.tk.call(
(self._w, 'window', 'create', index)
+ self._options(cnf, kw))
def window_names(self):
"""Return all names of embedded windows in this widget."""
return self.tk.splitlist(
self.tk.call(self._w, 'window', 'names'))
def yview_pickplace(self, *what):
"""Obsolete function, use see."""
self.tk.call((self._w, 'yview', '-pickplace') + what)
class _setit:
"""Internal class. It wraps the command in the widget OptionMenu."""
def __init__(self, var, value, callback=None):
self.__value = value
self.__var = var
self.__callback = callback
def __call__(self, *args):
self.__var.set(self.__value)
if self.__callback:
self.__callback(self.__value, *args)
class OptionMenu(Menubutton):
"""OptionMenu which allows the user to select a value from a menu."""
def __init__(self, master, variable, value, *values, **kwargs):
"""Construct an optionmenu widget with the parent MASTER, with
the resource textvariable set to VARIABLE, the initially selected
value VALUE, the other menu values VALUES and an additional
keyword argument command."""
kw = {"borderwidth": 2, "textvariable": variable,
"indicatoron": 1, "relief": RAISED, "anchor": "c",
"highlightthickness": 2}
Widget.__init__(self, master, "menubutton", kw)
self.widgetName = 'tk_optionMenu'
menu = self.__menu = Menu(self, name="menu", tearoff=0)
self.menuname = menu._w
# 'command' is the only supported keyword
callback = kwargs.get('command')
if 'command' in kwargs:
del kwargs['command']
if kwargs:
raise TclError, 'unknown option -'+kwargs.keys()[0]
menu.add_command(label=value,
command=_setit(variable, value, callback))
for v in values:
menu.add_command(label=v,
command=_setit(variable, v, callback))
self["menu"] = menu
def __getitem__(self, name):
if name == 'menu':
return self.__menu
return Widget.__getitem__(self, name)
def destroy(self):
"""Destroy this widget and the associated menu."""
Menubutton.destroy(self)
self.__menu = None
class Image:
"""Base class for images."""
_last_id = 0
def __init__(self, imgtype, name=None, cnf={}, master=None, **kw):
self.name = None
if not master:
master = _default_root
if not master:
raise RuntimeError, 'Too early to create image'
self.tk = master.tk
if not name:
Image._last_id += 1
name = "pyimage%r" % (Image._last_id,) # tk itself would use image<x>
# The following is needed for systems where id(x)
# can return a negative number, such as Linux/m68k:
if name[0] == '-': name = '_' + name[1:]
if kw and cnf: cnf = _cnfmerge((cnf, kw))
elif kw: cnf = kw
options = ()
for k, v in cnf.items():
if hasattr(v, '__call__'):
v = self._register(v)
options = options + ('-'+k, v)
self.tk.call(('image', 'create', imgtype, name,) + options)
self.name = name
def __str__(self): return self.name
def __del__(self):
if self.name:
try:
self.tk.call('image', 'delete', self.name)
except TclError:
# May happen if the root was destroyed
pass
def __setitem__(self, key, value):
self.tk.call(self.name, 'configure', '-'+key, value)
def __getitem__(self, key):
return self.tk.call(self.name, 'configure', '-'+key)
def configure(self, **kw):
"""Configure the image."""
res = ()
for k, v in _cnfmerge(kw).items():
if v is not None:
if k[-1] == '_': k = k[:-1]
if hasattr(v, '__call__'):
v = self._register(v)
res = res + ('-'+k, v)
self.tk.call((self.name, 'config') + res)
config = configure
def height(self):
"""Return the height of the image."""
return getint(
self.tk.call('image', 'height', self.name))
def type(self):
"""Return the type of the imgage, e.g. "photo" or "bitmap"."""
return self.tk.call('image', 'type', self.name)
def width(self):
"""Return the width of the image."""
return getint(
self.tk.call('image', 'width', self.name))
class PhotoImage(Image):
"""Widget which can display colored images in GIF, PPM/PGM format."""
def __init__(self, name=None, cnf={}, master=None, **kw):
"""Create an image with NAME.
Valid resource names: data, format, file, gamma, height, palette,
width."""
Image.__init__(self, 'photo', name, cnf, master, **kw)
def blank(self):
"""Display a transparent image."""
self.tk.call(self.name, 'blank')
def cget(self, option):
"""Return the value of OPTION."""
return self.tk.call(self.name, 'cget', '-' + option)
# XXX config
def __getitem__(self, key):
return self.tk.call(self.name, 'cget', '-' + key)
# XXX copy -from, -to, ...?
def copy(self):
"""Return a new PhotoImage with the same image as this widget."""
destImage = PhotoImage()
self.tk.call(destImage, 'copy', self.name)
return destImage
def zoom(self,x,y=''):
"""Return a new PhotoImage with the same image as this widget
but zoom it with X and Y."""
destImage = PhotoImage()
if y=='': y=x
self.tk.call(destImage, 'copy', self.name, '-zoom',x,y)
return destImage
def subsample(self,x,y=''):
"""Return a new PhotoImage based on the same image as this widget
but use only every Xth or Yth pixel."""
destImage = PhotoImage()
if y=='': y=x
self.tk.call(destImage, 'copy', self.name, '-subsample',x,y)
return destImage
def get(self, x, y):
"""Return the color (red, green, blue) of the pixel at X,Y."""
return self.tk.call(self.name, 'get', x, y)
def put(self, data, to=None):
"""Put row formatted colors to image starting from
position TO, e.g. image.put("{red green} {blue yellow}", to=(4,6))"""
args = (self.name, 'put', data)
if to:
if to[0] == '-to':
to = to[1:]
args = args + ('-to',) + tuple(to)
self.tk.call(args)
# XXX read
def write(self, filename, format=None, from_coords=None):
"""Write image to file FILENAME in FORMAT starting from
position FROM_COORDS."""
args = (self.name, 'write', filename)
if format:
args = args + ('-format', format)
if from_coords:
args = args + ('-from',) + tuple(from_coords)
self.tk.call(args)
class BitmapImage(Image):
"""Widget which can display a bitmap."""
def __init__(self, name=None, cnf={}, master=None, **kw):
"""Create a bitmap with NAME.
Valid resource names: background, data, file, foreground, maskdata, maskfile."""
Image.__init__(self, 'bitmap', name, cnf, master, **kw)
def image_names(): return _default_root.tk.call('image', 'names')
def image_types(): return _default_root.tk.call('image', 'types')
class Spinbox(Widget, XView):
"""spinbox widget."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a spinbox widget with the parent MASTER.
STANDARD OPTIONS
activebackground, background, borderwidth,
cursor, exportselection, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, insertbackground,
insertborderwidth, insertofftime,
insertontime, insertwidth, justify, relief,
repeatdelay, repeatinterval,
selectbackground, selectborderwidth
selectforeground, takefocus, textvariable
xscrollcommand.
WIDGET-SPECIFIC OPTIONS
buttonbackground, buttoncursor,
buttondownrelief, buttonuprelief,
command, disabledbackground,
disabledforeground, format, from,
invalidcommand, increment,
readonlybackground, state, to,
validate, validatecommand values,
width, wrap,
"""
Widget.__init__(self, master, 'spinbox', cnf, kw)
def bbox(self, index):
"""Return a tuple of X1,Y1,X2,Y2 coordinates for a
rectangle which encloses the character given by index.
The first two elements of the list give the x and y
coordinates of the upper-left corner of the screen
area covered by the character (in pixels relative
to the widget) and the last two elements give the
width and height of the character, in pixels. The
bounding box may refer to a region outside the
visible area of the window.
"""
return self.tk.call(self._w, 'bbox', index)
def delete(self, first, last=None):
"""Delete one or more elements of the spinbox.
First is the index of the first character to delete,
and last is the index of the character just after
the last one to delete. If last isn't specified it
defaults to first+1, i.e. a single character is
deleted. This command returns an empty string.
"""
return self.tk.call(self._w, 'delete', first, last)
def get(self):
"""Returns the spinbox's string"""
return self.tk.call(self._w, 'get')
def icursor(self, index):
"""Alter the position of the insertion cursor.
The insertion cursor will be displayed just before
the character given by index. Returns an empty string
"""
return self.tk.call(self._w, 'icursor', index)
def identify(self, x, y):
"""Returns the name of the widget at position x, y
Return value is one of: none, buttondown, buttonup, entry
"""
return self.tk.call(self._w, 'identify', x, y)
def index(self, index):
"""Returns the numerical index corresponding to index
"""
return self.tk.call(self._w, 'index', index)
def insert(self, index, s):
"""Insert string s at index
Returns an empty string.
"""
return self.tk.call(self._w, 'insert', index, s)
def invoke(self, element):
"""Causes the specified element to be invoked
The element could be buttondown or buttonup
triggering the action associated with it.
"""
return self.tk.call(self._w, 'invoke', element)
def scan(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'scan') + args)) or ()
def scan_mark(self, x):
"""Records x and the current view in the spinbox window;
used in conjunction with later scan dragto commands.
Typically this command is associated with a mouse button
press in the widget. It returns an empty string.
"""
return self.scan("mark", x)
def scan_dragto(self, x):
"""Compute the difference between the given x argument
and the x argument to the last scan mark command
It then adjusts the view left or right by 10 times the
difference in x-coordinates. This command is typically
associated with mouse motion events in the widget, to
produce the effect of dragging the spinbox at high speed
through the window. The return value is an empty string.
"""
return self.scan("dragto", x)
def selection(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'selection') + args)) or ()
def selection_adjust(self, index):
"""Locate the end of the selection nearest to the character
given by index,
Then adjust that end of the selection to be at index
(i.e including but not going beyond index). The other
end of the selection is made the anchor point for future
select to commands. If the selection isn't currently in
the spinbox, then a new selection is created to include
the characters between index and the most recent selection
anchor point, inclusive. Returns an empty string.
"""
return self.selection("adjust", index)
def selection_clear(self):
"""Clear the selection
If the selection isn't in this widget then the
command has no effect. Returns an empty string.
"""
return self.selection("clear")
def selection_element(self, element=None):
"""Sets or gets the currently selected element.
If a spinbutton element is specified, it will be
displayed depressed
"""
return self.selection("element", element)
###########################################################################
class LabelFrame(Widget):
"""labelframe widget."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a labelframe widget with the parent MASTER.
STANDARD OPTIONS
borderwidth, cursor, font, foreground,
highlightbackground, highlightcolor,
highlightthickness, padx, pady, relief,
takefocus, text
WIDGET-SPECIFIC OPTIONS
background, class, colormap, container,
height, labelanchor, labelwidget,
visual, width
"""
Widget.__init__(self, master, 'labelframe', cnf, kw)
########################################################################
class PanedWindow(Widget):
"""panedwindow widget."""
def __init__(self, master=None, cnf={}, **kw):
"""Construct a panedwindow widget with the parent MASTER.
STANDARD OPTIONS
background, borderwidth, cursor, height,
orient, relief, width
WIDGET-SPECIFIC OPTIONS
handlepad, handlesize, opaqueresize,
sashcursor, sashpad, sashrelief,
sashwidth, showhandle,
"""
Widget.__init__(self, master, 'panedwindow', cnf, kw)
def add(self, child, **kw):
"""Add a child widget to the panedwindow in a new pane.
The child argument is the name of the child widget
followed by pairs of arguments that specify how to
manage the windows. The possible options and values
are the ones accepted by the paneconfigure method.
"""
self.tk.call((self._w, 'add', child) + self._options(kw))
def remove(self, child):
"""Remove the pane containing child from the panedwindow
All geometry management options for child will be forgotten.
"""
self.tk.call(self._w, 'forget', child)
forget=remove
def identify(self, x, y):
"""Identify the panedwindow component at point x, y
If the point is over a sash or a sash handle, the result
is a two element list containing the index of the sash or
handle, and a word indicating whether it is over a sash
or a handle, such as {0 sash} or {2 handle}. If the point
is over any other part of the panedwindow, the result is
an empty list.
"""
return self.tk.call(self._w, 'identify', x, y)
def proxy(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'proxy') + args)) or ()
def proxy_coord(self):
"""Return the x and y pair of the most recent proxy location
"""
return self.proxy("coord")
def proxy_forget(self):
"""Remove the proxy from the display.
"""
return self.proxy("forget")
def proxy_place(self, x, y):
"""Place the proxy at the given x and y coordinates.
"""
return self.proxy("place", x, y)
def sash(self, *args):
"""Internal function."""
return self._getints(
self.tk.call((self._w, 'sash') + args)) or ()
def sash_coord(self, index):
"""Return the current x and y pair for the sash given by index.
Index must be an integer between 0 and 1 less than the
number of panes in the panedwindow. The coordinates given are
those of the top left corner of the region containing the sash.
pathName sash dragto index x y This command computes the
difference between the given coordinates and the coordinates
given to the last sash coord command for the given sash. It then
moves that sash the computed difference. The return value is the
empty string.
"""
return self.sash("coord", index)
def sash_mark(self, index):
"""Records x and y for the sash given by index;
Used in conjunction with later dragto commands to move the sash.
"""
return self.sash("mark", index)
def sash_place(self, index, x, y):
"""Place the sash given by index at the given coordinates
"""
return self.sash("place", index, x, y)
def panecget(self, child, option):
"""Query a management option for window.
Option may be any value allowed by the paneconfigure subcommand
"""
return self.tk.call(
(self._w, 'panecget') + (child, '-'+option))
def paneconfigure(self, tagOrId, cnf=None, **kw):
"""Query or modify the management options for window.
If no option is specified, returns a list describing all
of the available options for pathName. If option is
specified with no value, then the command returns a list
describing the one named option (this list will be identical
to the corresponding sublist of the value returned if no
option is specified). If one or more option-value pairs are
specified, then the command modifies the given widget
option(s) to have the given value(s); in this case the
command returns an empty string. The following options
are supported:
after window
Insert the window after the window specified. window
should be the name of a window already managed by pathName.
before window
Insert the window before the window specified. window
should be the name of a window already managed by pathName.
height size
Specify a height for the window. The height will be the
outer dimension of the window including its border, if
any. If size is an empty string, or if -height is not
specified, then the height requested internally by the
window will be used initially; the height may later be
adjusted by the movement of sashes in the panedwindow.
Size may be any value accepted by Tk_GetPixels.
minsize n
Specifies that the size of the window cannot be made
less than n. This constraint only affects the size of
the widget in the paned dimension -- the x dimension
for horizontal panedwindows, the y dimension for
vertical panedwindows. May be any value accepted by
Tk_GetPixels.
padx n
Specifies a non-negative value indicating how much
extra space to leave on each side of the window in
the X-direction. The value may have any of the forms
accepted by Tk_GetPixels.
pady n
Specifies a non-negative value indicating how much
extra space to leave on each side of the window in
the Y-direction. The value may have any of the forms
accepted by Tk_GetPixels.
sticky style
If a window's pane is larger than the requested
dimensions of the window, this option may be used
to position (or stretch) the window within its pane.
Style is a string that contains zero or more of the
characters n, s, e or w. The string can optionally
contains spaces or commas, but they are ignored. Each
letter refers to a side (north, south, east, or west)
that the window will "stick" to. If both n and s
(or e and w) are specified, the window will be
stretched to fill the entire height (or width) of
its cavity.
width size
Specify a width for the window. The width will be
the outer dimension of the window including its
border, if any. If size is an empty string, or
if -width is not specified, then the width requested
internally by the window will be used initially; the
width may later be adjusted by the movement of sashes
in the panedwindow. Size may be any value accepted by
Tk_GetPixels.
"""
if cnf is None and not kw:
cnf = {}
for x in self.tk.split(
self.tk.call(self._w,
'paneconfigure', tagOrId)):
cnf[x[0][1:]] = (x[0][1:],) + x[1:]
return cnf
if type(cnf) == StringType and not kw:
x = self.tk.split(self.tk.call(
self._w, 'paneconfigure', tagOrId, '-'+cnf))
return (x[0][1:],) + x[1:]
self.tk.call((self._w, 'paneconfigure', tagOrId) +
self._options(cnf, kw))
paneconfig = paneconfigure
def panes(self):
"""Returns an ordered list of the child panes."""
return self.tk.call(self._w, 'panes')
######################################################################
# Extensions:
class Studbutton(Button):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, 'studbutton', cnf, kw)
self.bind('<Any-Enter>', self.tkButtonEnter)
self.bind('<Any-Leave>', self.tkButtonLeave)
self.bind('<1>', self.tkButtonDown)
self.bind('<ButtonRelease-1>', self.tkButtonUp)
class Tributton(Button):
def __init__(self, master=None, cnf={}, **kw):
Widget.__init__(self, master, 'tributton', cnf, kw)
self.bind('<Any-Enter>', self.tkButtonEnter)
self.bind('<Any-Leave>', self.tkButtonLeave)
self.bind('<1>', self.tkButtonDown)
self.bind('<ButtonRelease-1>', self.tkButtonUp)
self['fg'] = self['bg']
self['activebackground'] = self['bg']
######################################################################
# Test:
def _test():
root = Tk()
text = "This is Tcl/Tk version %s" % TclVersion
if TclVersion >= 8.1:
try:
text = text + unicode("\nThis should be a cedilla: \347",
"iso-8859-1")
except NameError:
pass # no unicode support
label = Label(root, text=text)
label.pack()
test = Button(root, text="Click me!",
command=lambda root=root: root.test.configure(
text="[%s]" % root.test['text']))
test.pack()
root.test = test
quit = Button(root, text="QUIT", command=root.destroy)
quit.pack()
# The following three commands are needed so the window pops
# up on top on Windows...
root.iconify()
root.update()
root.deiconify()
root.mainloop()
if __name__ == '__main__':
_test()
| gpl-2.0 |
donkirkby/django | tests/select_related/models.py | 276 | 3480 | """
Tests for select_related()
``select_related()`` follows all relationships and pre-caches any foreign key
values so that complex trees can be fetched in a single query. However, this
isn't always a good idea, so the ``depth`` argument control how many "levels"
the select-related behavior will traverse.
"""
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# Who remembers high school biology?
@python_2_unicode_compatible
class Domain(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Kingdom(models.Model):
name = models.CharField(max_length=50)
domain = models.ForeignKey(Domain, models.CASCADE)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Phylum(models.Model):
name = models.CharField(max_length=50)
kingdom = models.ForeignKey(Kingdom, models.CASCADE)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Klass(models.Model):
name = models.CharField(max_length=50)
phylum = models.ForeignKey(Phylum, models.CASCADE)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Order(models.Model):
name = models.CharField(max_length=50)
klass = models.ForeignKey(Klass, models.CASCADE)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Family(models.Model):
name = models.CharField(max_length=50)
order = models.ForeignKey(Order, models.CASCADE)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Genus(models.Model):
name = models.CharField(max_length=50)
family = models.ForeignKey(Family, models.CASCADE)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Species(models.Model):
name = models.CharField(max_length=50)
genus = models.ForeignKey(Genus, models.CASCADE)
def __str__(self):
return self.name
# and we'll invent a new thing so we have a model with two foreign keys
@python_2_unicode_compatible
class HybridSpecies(models.Model):
name = models.CharField(max_length=50)
parent_1 = models.ForeignKey(Species, models.CASCADE, related_name='child_1')
parent_2 = models.ForeignKey(Species, models.CASCADE, related_name='child_2')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Topping(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Pizza(models.Model):
name = models.CharField(max_length=100)
toppings = models.ManyToManyField(Topping)
def __str__(self):
return self.name
@python_2_unicode_compatible
class TaggedItem(models.Model):
tag = models.CharField(max_length=30)
content_type = models.ForeignKey(ContentType, models.CASCADE, related_name='select_related_tagged_items')
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.tag
@python_2_unicode_compatible
class Bookmark(models.Model):
url = models.URLField()
tags = GenericRelation(TaggedItem)
def __str__(self):
return self.url
| bsd-3-clause |
PaulKinlan/cli-caniuse | site/app/scripts/bower_components/jsrepl-build/extern/python/reloop-closured/lib/python2.7/distutils/cygwinccompiler.py | 132 | 17270 | """distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate a import library for its dll
# - create a def-file for python??.dll
# - create a import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id$"
import os,sys,copy
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils import log
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
class CygwinCCompiler (UnixCCompiler):
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__ (self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__ (self, verbose, dry_run, force)
(status, details) = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# cygwin and mingw32 need different sets of libraries
if self.gcc_version == "2.91.57":
# cygwin shouldn't need msvcrt, but without the dlls will crash
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError, msg:
raise CompileError, msg
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError, msg:
raise CompileError, msg
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KB < stripped_file < ??100KB
# unstripped_file = stripped_file + XXX KB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
# link ()
# -- Miscellaneous methods -----------------------------------------
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
# class CygwinCCompiler
# the same as cygwin plus some additional parameters
class Mingw32CCompiler (CygwinCCompiler):
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
self.set_executables(compiler='gcc -mno-cygwin -O -Wall',
compiler_so='gcc -mno-cygwin -mdll -O -Wall',
compiler_cxx='g++ -mno-cygwin -O -Wall',
linker_exe='gcc -mno-cygwin',
linker_so='%s -mno-cygwin %s %s'
% (self.linker_dll, shared_option,
entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# __init__ ()
# class Mingw32CCompiler
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using a unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation (specifically, pyconfig.h)
appears amenable to building extensions with GCC. Returns a tuple
(status, details), where 'status' is one of the following constants:
CONFIG_H_OK
all is well, go ahead and compile
CONFIG_H_NOTOK
doesn't look good
CONFIG_H_UNCERTAIN
not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
import string
# if sys.version contains GCC then python was compiled with
# GCC, and the pyconfig.h file should be OK
if string.find(sys.version,"GCC") >= 0:
return (CONFIG_H_OK, "sys.version mentions 'GCC'")
fn = sysconfig.get_config_h_filename()
try:
# It would probably better to read single lines to search.
# But we do this only once, and it is fast enough
f = open(fn)
try:
s = f.read()
finally:
f.close()
except IOError, exc:
# if we can't read this file, we cannot say it is wrong
# the compiler will complain later about this file as missing
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
else:
# "pyconfig.h" contains an "#ifdef __GNUC__" or something similar
if string.find(s,"__GNUC__") >= 0:
return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn)
else:
return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
from distutils.version import LooseVersion
from distutils.spawn import find_executable
import re
gcc_exe = find_executable('gcc')
if gcc_exe:
out = os.popen(gcc_exe + ' -dumpversion','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
gcc_version = LooseVersion(result.group(1))
else:
gcc_version = None
else:
gcc_version = None
ld_exe = find_executable('ld')
if ld_exe:
out = os.popen(ld_exe + ' -v','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+(\.\d+)*)',out_string)
if result:
ld_version = LooseVersion(result.group(1))
else:
ld_version = None
else:
ld_version = None
dllwrap_exe = find_executable('dllwrap')
if dllwrap_exe:
out = os.popen(dllwrap_exe + ' --version','r')
out_string = out.read()
out.close()
result = re.search(' (\d+\.\d+(\.\d+)*)',out_string)
if result:
dllwrap_version = LooseVersion(result.group(1))
else:
dllwrap_version = None
else:
dllwrap_version = None
return (gcc_version, ld_version, dllwrap_version)
| apache-2.0 |
lseyesl/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/model/queues.py | 120 | 3658 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
from config.queues import all_queue_names
from model.activeworkitems import ActiveWorkItems
from model.workitems import WorkItems
class Queue(object):
def __init__(self, name):
assert(name in all_queue_names)
self._name = name
@classmethod
def queue_with_name(cls, queue_name):
if queue_name not in all_queue_names:
return None
return Queue(queue_name)
@classmethod
def all(cls):
return [Queue(name) for name in all_queue_names]
@classmethod
def all_ews(cls):
return [queue for queue in cls.all() if queue.is_ews()]
def name(self):
return self._name
def work_items(self):
return WorkItems.lookup_by_queue(self._name)
# FIXME: active_work_items is a bad name for this lock-table.
def active_work_items(self):
return ActiveWorkItems.lookup_by_queue(self._name)
def _caplitalize_after_dash(self, string):
return "-".join([word[0].upper() + word[1:] for word in string.split("-")])
# For use in status bubbles or table headers
def short_name(self):
short_name = self._name.replace("-ews", "")
short_name = short_name.replace("-queue", "")
return self._caplitalize_after_dash(short_name.capitalize())
def display_name(self):
display_name = self._name.replace("-", " ")
display_name = display_name.title()
display_name = display_name.replace("Wk2", "WK2")
display_name = display_name.replace("Ews", "EWS")
return display_name
_dash_regexp = re.compile("-")
def name_with_underscores(self):
return self._dash_regexp.sub("_", self._name)
def is_ews(self):
# Note: The style-queue is just like an EWS in that it has an EWS
# bubble, and it works off of the r? patches. If at some later
# point code wants to not treat the style-queue as an EWS
# (e.g. expecting is_ews() queues to have build results?)
# then we should fix all callers and change this check.
return self._name.endswith("-ews") or self._name == "style-queue"
| bsd-3-clause |
dezynetechnologies/odoo | addons/website_blog/wizard/document_page_show_diff.py | 372 | 2184 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class showdiff(osv.osv_memory):
""" Disp[ay Difference for History """
_name = 'blog.post.history.show_diff'
def get_diff(self, cr, uid, context=None):
if context is None:
context = {}
history = self.pool.get('blog.post.history')
ids = context.get('active_ids', [])
diff = ""
if len(ids) == 2:
if ids[0] > ids[1]:
diff = history.getDiff(cr, uid, ids[1], ids[0])
else:
diff = history.getDiff(cr, uid, ids[0], ids[1])
elif len(ids) == 1:
old = history.browse(cr, uid, ids[0])
nids = history.search(cr, uid, [('post_id', '=', old.post_id.id)])
nids.sort()
diff = history.getDiff(cr, uid, ids[0], nids[-1])
else:
raise osv.except_osv(_('Warning!'), _('You need to select minimum one or maximum two history revisions!'))
return diff
_columns = {
'diff': fields.text('Diff', readonly=True),
}
_defaults = {
'diff': get_diff
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gtlzxt/synergy | ext/toolchain/generators.py | 33 | 2582 | # synergy -- mouse and keyboard sharing utility
# Copyright (C) 2012 Synergy Si Ltd.
# Copyright (C) 2009 Nick Bolton
#
# This package is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# found in the file LICENSE that should have accompanied this file.
#
# This package is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class Generator(object):
def __init__(self, cmakeName, buildDir='build', sourceDir='..', binDir='bin'):
self.cmakeName = cmakeName
self.buildDir = buildDir
self.sourceDir = sourceDir
self.binDir = binDir
def getBuildDir(self, target):
return self.buildDir
def getBinDir(self, target=''):
return self.binDir
def getSourceDir(self):
return self.sourceDir
class VisualStudioGenerator(Generator):
def __init__(self, version):
super(VisualStudioGenerator, self).__init__('Visual Studio ' + version)
def getBinDir(self, target=''):
return super(VisualStudioGenerator, self).getBinDir(target) + '/' + target
class MakefilesGenerator(Generator):
def __init__(self):
super(MakefilesGenerator, self).__init__('Unix Makefiles')
def getBuildDir(self, target):
return super(MakefilesGenerator, self).getBuildDir(target) + '/' + target
def getBinDir(self, target=''):
workingDir = super(MakefilesGenerator, self).getBinDir(target)
# only put debug files in separate bin dir
if target == 'debug':
workingDir += '/debug'
return workingDir
def getSourceDir(self):
return super(MakefilesGenerator, self).getSourceDir() + '/..'
class XcodeGenerator(Generator):
def __init__(self):
super(XcodeGenerator, self).__init__('Xcode')
def getBinDir(self, target=''):
if target == "":
return super(XcodeGenerator, self).getBinDir(target)
xcodeTarget = target[0].upper() + target[1:]
return super(XcodeGenerator, self).getBinDir(target) + '/' + xcodeTarget
class EclipseGenerator(Generator):
def __init__(self):
super(EclipseGenerator, self).__init__('Eclipse CDT4 - Unix Makefiles', '', '')
def getBuildDir(self, target):
# eclipse only works with in-source build.
return ''
def getBinDir(self, target=''):
# eclipse only works with in-source build.
return 'bin'
def getSourceDir(self):
return ''
| gpl-2.0 |
pombredanne/logbook | tests/test_logging_api.py | 6 | 2823 | import pickle
import sys
import logbook
from logbook.helpers import iteritems, xrange, u
import pytest
def test_basic_logging(active_handler, logger):
logger.warn('This is a warning. Nice hah?')
assert active_handler.has_warning('This is a warning. Nice hah?')
assert active_handler.formatted_records == [
'[WARNING] testlogger: This is a warning. Nice hah?']
def test_exception_catching(active_handler, logger):
assert not active_handler.has_error()
try:
1 / 0
except Exception:
logger.exception()
try:
1 / 0
except Exception:
logger.exception('Awesome')
assert active_handler.has_error('Uncaught exception occurred')
assert active_handler.has_error('Awesome')
assert active_handler.records[0].exc_info is not None
assert '1 / 0' in active_handler.records[0].formatted_exception
def test_exception_catching_with_unicode():
""" See https://github.com/getlogbook/logbook/issues/104
"""
try:
raise Exception(u('\u202a test \u202c'))
except:
r = logbook.LogRecord('channel', 'DEBUG', 'test',
exc_info=sys.exc_info())
r.exception_message
@pytest.mark.parametrize('as_tuple', [True, False])
def test_exc_info(as_tuple, logger, active_handler):
try:
1 / 0
except Exception:
exc_info = sys.exc_info()
logger.info("Exception caught",
exc_info=exc_info if as_tuple else True)
assert active_handler.records[0].exc_info is not None
assert active_handler.records[0].exc_info == exc_info
def test_to_dict(logger, active_handler):
try:
1 / 0
except Exception:
logger.exception()
record = active_handler.records[0]
exported = record.to_dict()
record.close()
imported = logbook.LogRecord.from_dict(exported)
for key, value in iteritems(record.__dict__):
if key[0] == '_':
continue
assert value == getattr(imported, key)
def test_pickle(active_handler, logger):
try:
1 / 0
except Exception:
logger.exception()
record = active_handler.records[0]
record.pull_information()
record.close()
for p in xrange(pickle.HIGHEST_PROTOCOL):
exported = pickle.dumps(record, p)
imported = pickle.loads(exported)
for key, value in iteritems(record.__dict__):
if key[0] == '_':
continue
imported_value = getattr(imported, key)
if isinstance(value, ZeroDivisionError):
# in Python 3.2, ZeroDivisionError(x) != ZeroDivisionError(x)
assert type(value) is type(imported_value)
assert value.args == imported_value.args
else:
assert value == imported_value
| bsd-3-clause |
google-code-export/django-hotclub | libs/external_libs/dateutil/dateutil/parser.py | 13 | 32492 | # -*- coding:iso-8859-1 -*-
"""
Copyright (c) 2003-2005 Gustavo Niemeyer <[email protected]>
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer <[email protected]>"
__license__ = "PSF License"
import os.path
import string
import sys
import time
import datetime
import relativedelta
import tz
__all__ = ["parse", "parserinfo"]
# Some pointers:
#
# http://www.cl.cam.ac.uk/~mgk25/iso-time.html
# http://www.iso.ch/iso/en/prods-services/popstds/datesandtime.html
# http://www.w3.org/TR/NOTE-datetime
# http://ringmaster.arc.nasa.gov/tools/time_formats.html
# http://search.cpan.org/author/MUIR/Time-modules-2003.0211/lib/Time/ParseDate.pm
# http://stein.cshl.org/jade/distrib/docs/java.text.SimpleDateFormat.html
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class _timelex:
def __init__(self, instream):
if isinstance(instream, basestring):
instream = StringIO(instream)
self.instream = instream
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ_'
'ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.numchars = '0123456789'
self.whitespace = ' \t\r\n'
self.charstack = []
self.tokenstack = []
self.eof = False
def get_token(self):
if self.tokenstack:
return self.tokenstack.pop(0)
seenletters = False
token = None
state = None
wordchars = self.wordchars
numchars = self.numchars
whitespace = self.whitespace
while not self.eof:
if self.charstack:
nextchar = self.charstack.pop(0)
else:
nextchar = self.instream.read(1)
while nextchar == '\x00':
nextchar = self.instream.read(1)
if not nextchar:
self.eof = True
break
elif not state:
token = nextchar
if nextchar in wordchars:
state = 'a'
elif nextchar in numchars:
state = '0'
elif nextchar in whitespace:
token = ' '
break # emit token
else:
break # emit token
elif state == 'a':
seenletters = True
if nextchar in wordchars:
token += nextchar
elif nextchar == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0':
if nextchar in numchars:
token += nextchar
elif nextchar == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == 'a.':
seenletters = True
if nextchar == '.' or nextchar in wordchars:
token += nextchar
elif nextchar in numchars and token[-1] == '.':
token += nextchar
state = '0.'
else:
self.charstack.append(nextchar)
break # emit token
elif state == '0.':
if nextchar == '.' or nextchar in numchars:
token += nextchar
elif nextchar in wordchars and token[-1] == '.':
token += nextchar
state = 'a.'
else:
self.charstack.append(nextchar)
break # emit token
if (state in ('a.', '0.') and
(seenletters or token.count('.') > 1 or token[-1] == '.')):
l = token.split('.')
token = l[0]
for tok in l[1:]:
self.tokenstack.append('.')
if tok:
self.tokenstack.append(tok)
return token
def __iter__(self):
return self
def next(self):
token = self.get_token()
if token is None:
raise StopIteration
return token
def split(cls, s):
return list(cls(s))
split = classmethod(split)
class _resultbase(object):
def __init__(self):
for attr in self.__slots__:
setattr(self, attr, None)
def _repr(self, classname):
l = []
for attr in self.__slots__:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, `value`))
return "%s(%s)" % (classname, ", ".join(l))
def __repr__(self):
return self._repr(self.__class__.__name__)
class parserinfo:
# m from a.m/p.m, t from ISO T separator
JUMP = [" ", ".", ",", ";", "-", "/", "'",
"at", "on", "and", "ad", "m", "t", "of",
"st", "nd", "rd", "th"]
WEEKDAYS = [("Mon", "Monday"),
("Tue", "Tuesday"),
("Wed", "Wednesday"),
("Thu", "Thursday"),
("Fri", "Friday"),
("Sat", "Saturday"),
("Sun", "Sunday")]
MONTHS = [("Jan", "January"),
("Feb", "February"),
("Mar", "March"),
("Apr", "April"),
("May", "May"),
("Jun", "June"),
("Jul", "July"),
("Aug", "August"),
("Sep", "September"),
("Oct", "October"),
("Nov", "November"),
("Dec", "December")]
HMS = [("h", "hour", "hours"),
("m", "minute", "minutes"),
("s", "second", "seconds")]
AMPM = [("am", "a"),
("pm", "p")]
UTCZONE = ["UTC", "GMT", "Z"]
PERTAIN = ["of"]
TZOFFSET = {}
def __init__(self, dayfirst=False, yearfirst=False):
self._jump = self._convert(self.JUMP)
self._weekdays = self._convert(self.WEEKDAYS)
self._months = self._convert(self.MONTHS)
self._hms = self._convert(self.HMS)
self._ampm = self._convert(self.AMPM)
self._utczone = self._convert(self.UTCZONE)
self._pertain = self._convert(self.PERTAIN)
self.dayfirst = dayfirst
self.yearfirst = yearfirst
self._year = time.localtime().tm_year
self._century = self._year/100*100
def _convert(self, lst):
dct = {}
for i in range(len(lst)):
v = lst[i]
if isinstance(v, tuple):
for v in v:
dct[v.lower()] = i
else:
dct[v.lower()] = i
return dct
def jump(self, name):
return name.lower() in self._jump
def weekday(self, name):
if len(name) >= 3:
try:
return self._weekdays[name.lower()]
except KeyError:
pass
return None
def month(self, name):
if len(name) >= 3:
try:
return self._months[name.lower()]+1
except KeyError:
pass
return None
def hms(self, name):
try:
return self._hms[name.lower()]
except KeyError:
return None
def ampm(self, name):
try:
return self._ampm[name.lower()]
except KeyError:
return None
def pertain(self, name):
return name.lower() in self._pertain
def utczone(self, name):
return name.lower() in self._utczone
def tzoffset(self, name):
if name in self._utczone:
return 0
return self.TZOFFSET.get(name)
def convertyear(self, year):
if year < 100:
year += self._century
if abs(year-self._year) >= 50:
if year < self._year:
year += 100
else:
year -= 100
return year
def validate(self, res):
# move to info
if res.year is not None:
res.year = self.convertyear(res.year)
if res.tzoffset == 0 and not res.tzname or res.tzname == 'Z':
res.tzname = "UTC"
res.tzoffset = 0
elif res.tzoffset != 0 and res.tzname and self.utczone(res.tzname):
res.tzoffset = 0
return True
class parser:
def __init__(self, info=parserinfo):
if issubclass(info, parserinfo):
self.info = parserinfo()
elif isinstance(info, parserinfo):
self.info = info
else:
raise TypeError, "Unsupported parserinfo type"
def parse(self, timestr, default=None,
ignoretz=False, tzinfos=None,
**kwargs):
if not default:
default = datetime.datetime.now().replace(hour=0, minute=0,
second=0, microsecond=0)
res = self._parse(timestr, **kwargs)
if res is None:
raise ValueError, "unknown string format"
repl = {}
for attr in ["year", "month", "day", "hour",
"minute", "second", "microsecond"]:
value = getattr(res, attr)
if value is not None:
repl[attr] = value
ret = default.replace(**repl)
if res.weekday is not None and not res.day:
ret = ret+relativedelta.relativedelta(weekday=res.weekday)
if not ignoretz:
if callable(tzinfos) or tzinfos and res.tzname in tzinfos:
if callable(tzinfos):
tzdata = tzinfos(res.tzname, res.tzoffset)
else:
tzdata = tzinfos.get(res.tzname)
if isinstance(tzdata, datetime.tzinfo):
tzinfo = tzdata
elif isinstance(tzdata, basestring):
tzinfo = tz.tzstr(tzdata)
elif isinstance(tzdata, int):
tzinfo = tz.tzoffset(res.tzname, tzdata)
else:
raise ValueError, "offset must be tzinfo subclass, " \
"tz string, or int offset"
ret = ret.replace(tzinfo=tzinfo)
elif res.tzname and res.tzname in time.tzname:
ret = ret.replace(tzinfo=tz.tzlocal())
elif res.tzoffset == 0:
ret = ret.replace(tzinfo=tz.tzutc())
elif res.tzoffset:
ret = ret.replace(tzinfo=tz.tzoffset(res.tzname, res.tzoffset))
return ret
class _result(_resultbase):
__slots__ = ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond",
"tzname", "tzoffset"]
def _parse(self, timestr, dayfirst=None, yearfirst=None, fuzzy=False):
info = self.info
if dayfirst is None:
dayfirst = info.dayfirst
if yearfirst is None:
yearfirst = info.yearfirst
res = self._result()
l = _timelex.split(timestr)
try:
# year/month/day list
ymd = []
# Index of the month string in ymd
mstridx = -1
len_l = len(l)
i = 0
while i < len_l:
# Check if it's a number
try:
value = float(l[i])
except ValueError:
value = None
if value is not None:
# Token is a number
len_li = len(l[i])
i += 1
if (len(ymd) == 3 and len_li in (2, 4)
and (i >= len_l or (l[i] != ':' and
info.hms(l[i]) is None))):
# 19990101T23[59]
s = l[i-1]
res.hour = int(s[:2])
if len_li == 4:
res.minute = int(s[2:])
elif len_li == 6 or (len_li > 6 and l[i-1].find('.') == 6):
# YYMMDD or HHMMSS[.ss]
s = l[i-1]
if not ymd and l[i-1].find('.') == -1:
ymd.append(info.convertyear(int(s[:2])))
ymd.append(int(s[2:4]))
ymd.append(int(s[4:]))
else:
# 19990101T235959[.59]
res.hour = int(s[:2])
res.minute = int(s[2:4])
value = float(s[4:])
res.second = int(value)
if value%1:
res.microsecond = int(1000000*(value%1))
elif len_li == 8:
# YYYYMMDD
s = l[i-1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:]))
elif len_li in (12, 14):
# YYYYMMDDhhmm[ss]
s = l[i-1]
ymd.append(int(s[:4]))
ymd.append(int(s[4:6]))
ymd.append(int(s[6:8]))
res.hour = int(s[8:10])
res.minute = int(s[10:12])
if len_li == 14:
res.second = int(s[12:])
elif ((i < len_l and info.hms(l[i]) is not None) or
(i+1 < len_l and l[i] == ' ' and
info.hms(l[i+1]) is not None)):
# HH[ ]h or MM[ ]m or SS[.ss][ ]s
if l[i] == ' ':
i += 1
idx = info.hms(l[i])
while True:
if idx == 0:
res.hour = int(value)
if value%1:
res.minute = int(60*(value%1))
elif idx == 1:
res.minute = int(value)
if value%1:
res.second = int(60*(value%1))
elif idx == 2:
res.second = int(value)
if value%1:
res.microsecond = int(1000000*(value%1))
i += 1
if i >= len_l or idx == 2:
break
# 12h00
try:
value = float(l[i])
except ValueError:
break
else:
i += 1
idx += 1
if i < len_l:
newidx = info.hms(l[i])
if newidx is not None:
idx = newidx
elif i+1 < len_l and l[i] == ':':
# HH:MM[:SS[.ss]]
res.hour = int(value)
i += 1
value = float(l[i])
res.minute = int(value)
if value%1:
res.second = int(60*(value%1))
i += 1
if i < len_l and l[i] == ':':
value = float(l[i+1])
res.second = int(value)
if value%1:
res.microsecond = int(1000000*(value%1))
i += 2
elif i < len_l and l[i] in ('-', '/', '.'):
sep = l[i]
ymd.append(int(value))
i += 1
if i < len_l and not info.jump(l[i]):
try:
# 01-01[-01]
ymd.append(int(l[i]))
except ValueError:
# 01-Jan[-01]
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd)-1
else:
return None
i += 1
if i < len_l and l[i] == sep:
# We have three members
i += 1
value = info.month(l[i])
if value is not None:
ymd.append(value)
mstridx = len(ymd)-1
assert mstridx == -1
else:
ymd.append(int(l[i]))
i += 1
elif i >= len_l or info.jump(l[i]):
if i+1 < len_l and info.ampm(l[i+1]) is not None:
# 12 am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i+1]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i+1]) == 0:
res.hour = 0
i += 1
else:
# Year, month or day
ymd.append(int(value))
i += 1
elif info.ampm(l[i]) is not None:
# 12am
res.hour = int(value)
if res.hour < 12 and info.ampm(l[i]) == 1:
res.hour += 12
elif res.hour == 12 and info.ampm(l[i]) == 0:
res.hour = 0
i += 1
elif not fuzzy:
return None
else:
i += 1
continue
# Check weekday
value = info.weekday(l[i])
if value is not None:
res.weekday = value
i += 1
continue
# Check month name
value = info.month(l[i])
if value is not None:
ymd.append(value)
assert mstridx == -1
mstridx = len(ymd)-1
i += 1
if i < len_l:
if l[i] in ('-', '/'):
# Jan-01[-99]
sep = l[i]
i += 1
ymd.append(int(l[i]))
i += 1
if i < len_l and l[i] == sep:
# Jan-01-99
i += 1
ymd.append(int(l[i]))
i += 1
elif (i+3 < len_l and l[i] == l[i+2] == ' '
and info.pertain(l[i+1])):
# Jan of 01
# In this case, 01 is clearly year
try:
value = int(l[i+3])
except ValueError:
# Wrong guess
pass
else:
# Convert it here to become unambiguous
ymd.append(info.convertyear(value))
i += 4
continue
# Check am/pm
value = info.ampm(l[i])
if value is not None:
if value == 1 and res.hour < 12:
res.hour += 12
elif value == 0 and res.hour == 12:
res.hour = 0
i += 1
continue
# Check for a timezone name
if (res.hour is not None and len(l[i]) <= 5 and
res.tzname is None and res.tzoffset is None and
not [x for x in l[i] if x not in string.ascii_uppercase]):
res.tzname = l[i]
res.tzoffset = info.tzoffset(res.tzname)
i += 1
# Check for something like GMT+3, or BRST+3. Notice
# that it doesn't mean "I am 3 hours after GMT", but
# "my time +3 is GMT". If found, we reverse the
# logic so that timezone parsing code will get it
# right.
if i < len_l and l[i] in ('+', '-'):
l[i] = ('+', '-')[l[i] == '+']
res.tzoffset = None
if info.utczone(res.tzname):
# With something like GMT+3, the timezone
# is *not* GMT.
res.tzname = None
continue
# Check for a numbered timezone
if res.hour is not None and l[i] in ('+', '-'):
signal = (-1,1)[l[i] == '+']
i += 1
len_li = len(l[i])
if len_li == 4:
# -0300
res.tzoffset = int(l[i][:2])*3600+int(l[i][2:])*60
elif i+1 < len_l and l[i+1] == ':':
# -03:00
res.tzoffset = int(l[i])*3600+int(l[i+2])*60
i += 2
elif len_li <= 2:
# -[0]3
res.tzoffset = int(l[i][:2])*3600
else:
return None
i += 1
res.tzoffset *= signal
# Look for a timezone name between parenthesis
if (i+3 < len_l and
info.jump(l[i]) and l[i+1] == '(' and l[i+3] == ')' and
3 <= len(l[i+2]) <= 5 and
not [x for x in l[i+2]
if x not in string.ascii_uppercase]):
# -0300 (BRST)
res.tzname = l[i+2]
i += 4
continue
# Check jumps
if not (info.jump(l[i]) or fuzzy):
return None
i += 1
# Process year/month/day
len_ymd = len(ymd)
if len_ymd > 3:
# More than three members!?
return None
elif len_ymd == 1 or (mstridx != -1 and len_ymd == 2):
# One member, or two members with a month string
if mstridx != -1:
res.month = ymd[mstridx]
del ymd[mstridx]
if len_ymd > 1 or mstridx == -1:
if ymd[0] > 31:
res.year = ymd[0]
else:
res.day = ymd[0]
elif len_ymd == 2:
# Two members with numbers
if ymd[0] > 31:
# 99-01
res.year, res.month = ymd
elif ymd[1] > 31:
# 01-99
res.month, res.year = ymd
elif dayfirst and ymd[1] <= 12:
# 13-01
res.day, res.month = ymd
else:
# 01-13
res.month, res.day = ymd
if len_ymd == 3:
# Three members
if mstridx == 0:
res.month, res.day, res.year = ymd
elif mstridx == 1:
if ymd[0] > 31 or (yearfirst and ymd[2] <= 31):
# 99-Jan-01
res.year, res.month, res.day = ymd
else:
# 01-Jan-01
# Give precendence to day-first, since
# two-digit years is usually hand-written.
res.day, res.month, res.year = ymd
elif mstridx == 2:
# WTF!?
if ymd[1] > 31:
# 01-99-Jan
res.day, res.year, res.month = ymd
else:
# 99-01-Jan
res.year, res.day, res.month = ymd
else:
if ymd[0] > 31 or \
(yearfirst and ymd[1] <= 12 and ymd[2] <= 31):
# 99-01-01
res.year, res.month, res.day = ymd
elif ymd[0] > 12 or (dayfirst and ymd[1] <= 12):
# 13-01-01
res.day, res.month, res.year = ymd
else:
# 01-13-01
res.month, res.day, res.year = ymd
except (IndexError, ValueError, AssertionError):
return None
if not info.validate(res):
return None
return res
DEFAULTPARSER = parser()
def parse(timestr, parserinfo=None, **kwargs):
if parserinfo:
return parser(parserinfo).parse(timestr, **kwargs)
else:
return DEFAULTPARSER.parse(timestr, **kwargs)
class _tzparser:
class _result(_resultbase):
__slots__ = ["stdabbr", "stdoffset", "dstabbr", "dstoffset",
"start", "end"]
class _attr(_resultbase):
__slots__ = ["month", "week", "weekday",
"yday", "jyday", "day", "time"]
def __repr__(self):
return self._repr("")
def __init__(self):
_resultbase.__init__(self)
self.start = self._attr()
self.end = self._attr()
def parse(self, tzstr):
res = self._result()
l = _timelex.split(tzstr)
try:
len_l = len(l)
i = 0
while i < len_l:
# BRST+3[BRDT[+2]]
j = i
while j < len_l and not [x for x in l[j]
if x in "0123456789:,-+"]:
j += 1
if j != i:
if not res.stdabbr:
offattr = "stdoffset"
res.stdabbr = "".join(l[i:j])
else:
offattr = "dstoffset"
res.dstabbr = "".join(l[i:j])
i = j
if (i < len_l and
(l[i] in ('+', '-') or l[i][0] in "0123456789")):
if l[i] in ('+', '-'):
signal = (1,-1)[l[i] == '+']
i += 1
else:
signal = -1
len_li = len(l[i])
if len_li == 4:
# -0300
setattr(res, offattr,
(int(l[i][:2])*3600+int(l[i][2:])*60)*signal)
elif i+1 < len_l and l[i+1] == ':':
# -03:00
setattr(res, offattr,
(int(l[i])*3600+int(l[i+2])*60)*signal)
i += 2
elif len_li <= 2:
# -[0]3
setattr(res, offattr,
int(l[i][:2])*3600*signal)
else:
return None
i += 1
if res.dstabbr:
break
else:
break
if i < len_l:
for j in range(i, len_l):
if l[j] == ';': l[j] = ','
assert l[i] == ','
i += 1
if i >= len_l:
pass
elif (8 <= l.count(',') <= 9 and
not [y for x in l[i:] if x != ','
for y in x if y not in "0123456789"]):
# GMT0BST,3,0,30,3600,10,0,26,7200[,3600]
for x in (res.start, res.end):
x.month = int(l[i])
i += 2
if l[i] == '-':
value = int(l[i+1])*-1
i += 1
else:
value = int(l[i])
i += 2
if value:
x.week = value
x.weekday = (int(l[i])-1)%7
else:
x.day = int(l[i])
i += 2
x.time = int(l[i])
i += 2
if i < len_l:
if l[i] in ('-','+'):
signal = (-1,1)[l[i] == "+"]
i += 1
else:
signal = 1
res.dstoffset = (res.stdoffset+int(l[i]))*signal
elif (l.count(',') == 2 and l[i:].count('/') <= 2 and
not [y for x in l[i:] if x not in (',','/','J','M',
'.','-',':')
for y in x if y not in "0123456789"]):
for x in (res.start, res.end):
if l[i] == 'J':
# non-leap year day (1 based)
i += 1
x.jyday = int(l[i])
elif l[i] == 'M':
# month[-.]week[-.]weekday
i += 1
x.month = int(l[i])
i += 1
assert l[i] in ('-', '.')
i += 1
x.week = int(l[i])
if x.week == 5:
x.week = -1
i += 1
assert l[i] in ('-', '.')
i += 1
x.weekday = (int(l[i])-1)%7
else:
# year day (zero based)
x.yday = int(l[i])+1
i += 1
if i < len_l and l[i] == '/':
i += 1
# start time
len_li = len(l[i])
if len_li == 4:
# -0300
x.time = (int(l[i][:2])*3600+int(l[i][2:])*60)
elif i+1 < len_l and l[i+1] == ':':
# -03:00
x.time = int(l[i])*3600+int(l[i+2])*60
i += 2
if i+1 < len_l and l[i+1] == ':':
i += 2
x.time += int(l[i])
elif len_li <= 2:
# -[0]3
x.time = (int(l[i][:2])*3600)
else:
return None
i += 1
assert i == len_l or l[i] == ','
i += 1
assert i >= len_l
except (IndexError, ValueError, AssertionError):
return None
return res
DEFAULTTZPARSER = _tzparser()
def _parsetz(tzstr):
return DEFAULTTZPARSER.parse(tzstr)
# vim:ts=4:sw=4:et
| mit |
F483/ngcccbase | ngcccbase/tests/test_wallet_model.py | 5 | 4256 | #!/usr/bin/env python
import unittest
from coloredcoinlib import (ColorSet, ColorDataBuilderManager,
AidedColorDataBuilder, ThinColorData)
from ngcccbase.deterministic import DWalletAddressManager
from ngcccbase.pwallet import PersistentWallet
from ngcccbase.txcons import BasicTxSpec, InvalidTargetError
from ngcccbase.txdb import TxDb
from ngcccbase.utxodb import UTXOQuery
from ngcccbase.wallet_model import CoinQueryFactory
from ngcccbase.wallet_controller import WalletController
class TestWalletModel(unittest.TestCase):
def setUp(self):
self.path = ":memory:"
self.config = {
'hdw_master_key':
'91813223e97697c42f05e54b3a85bae601f04526c5c053ff0811747db77cfdf5f1accb50b3765377c379379cd5aa512c38bf24a57e4173ef592305d16314a0f4',
'testnet': True,
'ccc': {'colordb_path' : self.path},
}
self.pwallet = PersistentWallet(self.path, self.config)
self.pwallet.init_model()
self.model = self.pwallet.get_model()
self.colormap = self.model.get_color_map()
self.bcolorset = ColorSet(self.colormap, [''])
self.basset = self.model.get_asset_definition_manager(
).get_asset_by_moniker('bitcoin')
self.cqf = self.model.get_coin_query_factory()
def test_get_tx_db(self):
self.assertTrue(isinstance(self.model.get_tx_db(), TxDb))
def test_is_testnet(self):
self.assertTrue(self.model.is_testnet())
def test_get_coin_query_factory(self):
self.assertTrue(isinstance(self.cqf, CoinQueryFactory))
self.cqf.make_query({'color_set': self.bcolorset})
self.cqf.make_query({'color_id_set': self.bcolorset.color_id_set})
self.cqf.make_query({'asset': self.basset})
self.assertRaises(Exception, self.cqf.make_query, {})
def test_transform(self):
tx_spec = BasicTxSpec(self.model)
self.assertRaises(InvalidTargetError,
self.model.transform_tx_spec, tx_spec, 'signed')
def test_make_query(self):
q = self.model.make_coin_query({'color_set': self.bcolorset})
self.assertTrue(isinstance(q, UTXOQuery))
def test_get_address_manager(self):
m = self.model.get_address_manager()
self.assertTrue(issubclass(m.__class__, DWalletAddressManager))
def test_get_history(self):
self.config['asset_definitions'] = [
{"color_set": [""], "monikers": ["bitcoin"], "unit": 100000000},
{"color_set": ["obc:03524a4d6492e8d43cb6f3906a99be5a1bcd93916241f759812828b301f25a6c:0:153267"], "monikers": ['test'], "unit": 1},]
self.config['hdwam'] = {
"genesis_color_sets": [
["obc:03524a4d6492e8d43cb6f3906a99be5a1bcd93916241f759812828b301f25a6c:0:153267"],
],
"color_set_states": [
{"color_set": [""], "max_index": 1},
{"color_set": ["obc:03524a4d6492e8d43cb6f3906a99be5a1bcd93916241f759812828b301f25a6c:0:153267"], "max_index": 7},
]
}
self.config['bip0032'] = True
self.pwallet = PersistentWallet(self.path, self.config)
self.pwallet.init_model()
self.model = self.pwallet.get_model()
# modify model colored coin context, so test runs faster
ccc = self.model.ccc
cdbuilder = ColorDataBuilderManager(
ccc.colormap, ccc.blockchain_state, ccc.cdstore,
ccc.metastore, AidedColorDataBuilder)
ccc.colordata = ThinColorData(
cdbuilder, ccc.blockchain_state, ccc.cdstore, ccc.colormap)
wc = WalletController(self.model)
adm = self.model.get_asset_definition_manager()
asset = adm.get_asset_by_moniker('test')
self.model.utxo_man.update_all()
cq = self.model.make_coin_query({"asset": asset})
utxo_list = cq.get_result()
# send to the second address so the mempool has something
addrs = wc.get_all_addresses(asset)
wc.send_coins(asset, [addrs[1].get_color_address()], [1000])
history = self.model.get_history_for_asset(asset)
self.assertTrue(len(history) > 30)
if __name__ == '__main__':
unittest.main()
| mit |
marketdial/helloworld | accounts/forms.py | 2 | 5965 | # -*- coding: utf-8 -*-
from flask import render_template, current_app
from wtforms import *
from flask.ext.wtf import Form
from flask_mail import Message
from application import mail
from common.utils import get_signer
from accounts.models import User
class LoginForm(Form):
user = None
username = TextField(
label=u'Login',
validators=[
validators.required()
]
)
password = PasswordField(
label=u'Password',
validators=[
validators.required()
]
)
next = HiddenField()
def validate_username(form, field):
username = field.data
try:
form.user = User.objects.get(username=username)
except:
raise ValidationError(u'Login not found.')
def validate_password(form, field):
password = field.data
if form.user:
if not form.user.check_password(password):
raise ValidationError(u'Incorrect password.')
class SignupForm(Form):
email = TextField(
label=u'E-mail',
validators=[
validators.required(),
validators.length(max=100),
validators.Email()
]
)
def validate_email(form, field):
email = field.data
if User.objects.filter(email=email):
raise ValidationError(u'E-mail in use.')
def save(self):
email = self.email.data
site_name = current_app.config['PROJECT_SITE_NAME']
site_url = current_app.config['PROJECT_SITE_URL']
sender = current_app.config['MAIL_DEFAULT_SENDER']
# create signed data
s = get_signer()
data = {
'email': email,
'signup': True
}
signed_data = s.dumps(data)
# set context to template render
context = dict(
site_name=site_name,
site_url=site_url,
email=email,
signed_data=signed_data
)
# load template
html = render_template(
'accounts/emails/signup.html', **context
)
# create and send message
msg = Message(
u'Confirm your account - {0}.'.format(site_name),
sender=sender,
recipients=[email]
)
msg.html = html
mail.send(msg)
class SignupConfirmForm(Form):
name = TextField(
label=u'Name',
validators=[
validators.required(),
validators.length(max=100)
]
)
username = TextField(
label=u'Login',
validators=[
validators.required(),
validators.length(min=3, max=30),
validators.Regexp(
regex=r'^[\w]+$',
message=u'Just letters and numbers.'
)
]
)
password = PasswordField(
label=u'Password',
validators=[
validators.required(),
validators.length(min=6, max=16)
]
)
password_confirm = PasswordField(
label=u'Password Confirm',
validators=[
validators.required()
]
)
next = HiddenField()
def validate_username(form, field):
username = field.data
if User.objects.filter(username=username):
raise ValidationError(u'Login in use.')
def validate_password_confirm(form, field):
password = form.password.data
password_confirm = field.data
if password != password_confirm:
raise ValidationError(u'Incorrect password.')
def save(self, email):
name = self.name.data
username = self.username.data
password = self.password.data
email = email
user = User(name=name, username=username, password=password, email=email)
user.save()
return user
class RecoverPasswordForm(Form):
email = TextField(
label=u'E-mail',
validators=[
validators.required(),
validators.length(max=100),
validators.Email()
]
)
def validate_email(form, field):
email = field.data
if not User.objects.filter(email=email):
raise ValidationError(u'E-mail not found.')
def save(self):
email = self.email.data
site_name = current_app.config['PROJECT_SITE_NAME']
site_url = current_app.config['PROJECT_SITE_URL']
sender = current_app.config['MAIL_DEFAULT_SENDER']
# create signed data
s = get_signer()
data = {
'email': email,
'recover-password': True
}
signed_data = s.dumps(data)
# set context to template render
context = dict(
site_name=site_name,
site_url=site_url,
email=email,
signed_data=signed_data
)
# load template
html = render_template(
'accounts/emails/recover_password.html', **context
)
# create and send message
msg = Message(
u'Recover your password - {0}.'.format(site_name),
sender=sender,
recipients=[email]
)
msg.html = html
mail.send(msg)
class RecoverPasswordConfirmForm(Form):
password = PasswordField(
label=u'Password',
validators=[
validators.required(),
validators.length(min=6, max=16)
]
)
password_confirm = PasswordField(
label=u'Password Confirm',
validators=[
validators.required()
]
)
def validate_password_confirm(form, field):
password = form.password.data
password_confirm = field.data
if password != password_confirm:
raise ValidationError(u'Incorrect password.')
def save(self):
password = self.password.data
self.user.set_password(password)
self.user.save()
return self.user
| mit |
daniaki/Enrich2 | enrich2/gui/seqlib_apply_dialog.py | 1 | 3571 | # Copyright 2016 Alan F Rubin
#
# This file is part of Enrich2.
#
# Enrich2 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Enrich2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Enrich2. If not, see <http://www.gnu.org/licenses/>.
import tkinter as tk
import tkinter.ttk
import tkinter.simpledialog
import tkinter.messagebox
import tkinter.filedialog
class SeqLibApplyDialog(tkinter.simpledialog.Dialog):
"""
Confirmation dialog box for applying FASTQ filtering
options to selected SeqLibs from the Treeview.
"""
def __init__(self, parent_window, tree, source_id,
title="Confirm Filtering Changes"):
self.tree = tree
self.source_id = source_id
sel = self.tree.treeview.selection()
tree_elem = self.tree.get_element(self.source_id)
self.target_ids = [
x for x in sel
if x != source_id
and isinstance(tree_elem, self.tree.get_element(x))
]
tkinter.simpledialog.Dialog.__init__(self, parent_window, title)
def body(self, master):
"""
Generates the required text listing all SeqLibs that
will have their FASTQ options updated.
Displays the "OK" and "Cancel" buttons.
"""
if len(self.target_ids) == 0:
message_string = "No elegible SeqLibs selected."
elif len(self.target_ids) == 1:
message_string = \
'Apply FASTQ filtering options from "{}" to "{}"?' \
''.format(
self.tree.get_element(self.source_id).name,
self.tree.get_element(self.target_ids[0]).name
)
else:
bullet = " " + u"\u25C6"
message_string = \
'Apply FASTQ filtering options from "{}"" to the following?' \
'\n'.format(self.tree.get_element(self.source_id).name)
for x in self.target_ids:
message_string += u"{bullet} {name}\n".format(
bullet=bullet, name=self.tree.get_element(x).name)
message = tkinter.ttk.Label(
master, text=message_string, justify="left")
message.grid(row=0, sticky="w")
def buttonbox(self):
"""
Display only one button if there's no selection. Otherwise,
use the default method to display two buttons.
"""
if len(self.target_ids) == 0:
box = tk.Frame(self)
w = tk.Button(
box, text="OK", width=10,
command=self.cancel, default="active")
w.pack(side="left", padx=5, pady=5)
self.bind("<Return>", self.cancel)
box.pack()
else:
tkinter.simpledialog.Dialog.buttonbox(self)
def apply(self):
"""
Called when the user chooses "OK". Performs the FASTQ
filtering update.
"""
filter_cfg = self.tree.get_element(self.source_id).serialize_filters()
for x in self.target_ids:
self.tree.get_element(x).filters = filter_cfg
self.tree.refresh_treeview()
| gpl-3.0 |
shinexwang/Classy | Testing/TestMain/profParser.py | 1 | 4044 | """
Copyright 2013 Shine Wang
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import urllib
from webParser import CustomHTMLParser
class RateMyProfParser:
"""We will retrieve the attributes of the instructor"""
# call str.format(last name, school id, pageNum)
# OKAY, this search relies on the assumption that
# the last name is unique enough to narrow profs down to ~10
# which is the limit for a page
requestURL = "http://www.ratemyprofessors.com/SelectTeacher.jsp" \
"?searchName={}&search_submit1=Search&sid={}&pageNo={}"
cacheFile = "teacherCache.txt"
cache = {}
gotCache = False
def __init__(self, name, schoolID="1490"): # professor's name
# 1490 = Waterloo
# TODO: look up some other schools' IDs?
self.name = name.strip()
self.schoolID = schoolID
self.webData = []
def getCache(self):
"""get values of teacher cache"""
if self.gotCache:
# we only want to read the file once
return
self.gotCache = True
try:
# file data stored in standard "key\nvalue\n" format
with open(self.cacheFile, "r") as f:
name = f.readline().strip()
while name:
self.cache[name] = eval(f.readline().strip())
name = f.readline().strip()
except:
return
def getInfo(self):
"""will return (avgRating, numRatings) if exists.
Else, return None"""
# get cache names (if they exist)
self.getCache()
if self.name in self.cache:
return self.cache[self.name]
if self.name == "":
# lecture/tutorial has no name
return
# start at page 1
pageNum = 1
while pageNum <= 3: # if there are 60 Wang's, for example, tough
# two possible errors (page out of range, or website down)
err = self.getWebData(pageNum)
if err:
return
ret = self.parseWebData()
if ret:
# form of: (# ratings, overall quality, easiness)
with open(self.cacheFile, "a") as f:
f.write(self.name + "\n")
f.write(str(ret) + "\n")
return ret
else:
self.webData = [] # clear the data
pageNum += 1
def getWebData(self, pageNum):
"""fetching data from the webpage"""
try:
URL = self.requestURL.format(self.name.split()[1],
self.schoolID, str(pageNum))
page = urllib.urlopen(URL)
parser = CustomHTMLParser(self.webData)
parser.feed(page.read().replace(" ", " "))
for data in self.webData:
if "Invalid page number" in data or \
"didn't return any results for professors" in data:
# essentially, page out of range
return "InvalidPageError"
except:
return "WebPageError"
def parseWebData(self):
"""parsing the webData list to get attrs"""
"""if we have the desirable attributes"""
firstName, lastName = self.name.split()
for i, data in enumerate(self.webData):
if firstName in data and lastName in data:
# we found it!
return (int(self.webData[i+4]), float(self.webData[i+6]),
float(self.webData[i+8]))
| apache-2.0 |
luhanhan/horizon | openstack_dashboard/test/test_data/heat_data.py | 50 | 11072 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heatclient.v1 import resource_types
from heatclient.v1 import services
from heatclient.v1 import stacks
from openstack_dashboard.test.test_data import utils
# A slightly hacked up copy of a sample cloudformation template for testing.
TEMPLATE = """
{
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "AWS CloudFormation Sample Template.",
"Parameters": {
"KeyName": {
"Description": "Name of an EC2 Key Pair to enable SSH access to the instances",
"Type": "String"
},
"InstanceType": {
"Description": "WebServer EC2 instance type",
"Type": "String",
"Default": "m1.small",
"AllowedValues": [
"m1.tiny",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge"
],
"ConstraintDescription": "must be a valid EC2 instance type."
},
"DBName": {
"Default": "wordpress",
"Description": "The WordPress database name",
"Type": "String",
"MinLength": "1",
"MaxLength": "64",
"AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*",
"ConstraintDescription": "must begin with a letter and..."
},
"DBUsername": {
"Default": "admin",
"NoEcho": "true",
"Description": "The WordPress database admin account username",
"Type": "String",
"MinLength": "1",
"MaxLength": "16",
"AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*",
"ConstraintDescription": "must begin with a letter and..."
},
"DBPassword": {
"Default": "admin",
"NoEcho": "true",
"Description": "The WordPress database admin account password",
"Type": "String",
"MinLength": "1",
"MaxLength": "41",
"AllowedPattern": "[a-zA-Z0-9]*",
"ConstraintDescription": "must contain only alphanumeric characters."
},
"DBRootPassword": {
"Default": "admin",
"NoEcho": "true",
"Description": "Root password for MySQL",
"Type": "String",
"MinLength": "1",
"MaxLength": "41",
"AllowedPattern": "[a-zA-Z0-9]*",
"ConstraintDescription": "must contain only alphanumeric characters."
},
"LinuxDistribution": {
"Default": "F17",
"Description": "Distribution of choice",
"Type": "String",
"AllowedValues": [
"F18",
"F17",
"U10",
"RHEL-6.1",
"RHEL-6.2",
"RHEL-6.3"
]
},
"Network": {
"Type": "String",
"CustomConstraint": "neutron.network"
}
},
"Mappings": {
"AWSInstanceType2Arch": {
"m1.tiny": {
"Arch": "32"
},
"m1.small": {
"Arch": "64"
},
"m1.medium": {
"Arch": "64"
},
"m1.large": {
"Arch": "64"
},
"m1.xlarge": {
"Arch": "64"
}
},
"DistroArch2AMI": {
"F18": {
"32": "F18-i386-cfntools",
"64": "F18-x86_64-cfntools"
},
"F17": {
"32": "F17-i386-cfntools",
"64": "F17-x86_64-cfntools"
},
"U10": {
"32": "U10-i386-cfntools",
"64": "U10-x86_64-cfntools"
},
"RHEL-6.1": {
"32": "rhel61-i386-cfntools",
"64": "rhel61-x86_64-cfntools"
},
"RHEL-6.2": {
"32": "rhel62-i386-cfntools",
"64": "rhel62-x86_64-cfntools"
},
"RHEL-6.3": {
"32": "rhel63-i386-cfntools",
"64": "rhel63-x86_64-cfntools"
}
}
},
"Resources": {
"WikiDatabase": {
"Type": "AWS::EC2::Instance",
"Metadata": {
"AWS::CloudFormation::Init": {
"config": {
"packages": {
"yum": {
"mysql": [],
"mysql-server": [],
"httpd": [],
"wordpress": []
}
},
"services": {
"systemd": {
"mysqld": {
"enabled": "true",
"ensureRunning": "true"
},
"httpd": {
"enabled": "true",
"ensureRunning": "true"
}
}
}
}
}
},
"Properties": {
"ImageId": {
"Fn::FindInMap": [
"DistroArch2AMI",
{
"Ref": "LinuxDistribution"
},
{
"Fn::FindInMap": [
"AWSInstanceType2Arch",
{
"Ref": "InstanceType"
},
"Arch"
]
}
]
},
"InstanceType": {
"Ref": "InstanceType"
},
"KeyName": {
"Ref": "KeyName"
},
"UserData": {
"Fn::Base64": {
"Fn::Join": [
"",
[
"#!/bin/bash -v\n",
"/opt/aws/bin/cfn-init\n"
]
]
}
}
}
}
},
"Outputs": {
"WebsiteURL": {
"Value": {
"Fn::Join": [
"",
[
"http://",
{
"Fn::GetAtt": [
"WikiDatabase",
"PublicIp"
]
},
"/wordpress"
]
]
},
"Description": "URL for Wordpress wiki"
}
}
}
"""
VALIDATE = """
{
"Description": "AWS CloudFormation Sample Template.",
"Parameters": {
"DBUsername": {
"Type": "String",
"Description": "The WordPress database admin account username",
"Default": "admin",
"MinLength": "1",
"AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*",
"NoEcho": "true",
"MaxLength": "16",
"ConstraintDescription": "must begin with a letter and..."
},
"LinuxDistribution": {
"Default": "F17",
"Type": "String",
"Description": "Distribution of choice",
"AllowedValues": [
"F18",
"F17",
"U10",
"RHEL-6.1",
"RHEL-6.2",
"RHEL-6.3"
]
},
"DBRootPassword": {
"Type": "String",
"Description": "Root password for MySQL",
"Default": "admin",
"MinLength": "1",
"AllowedPattern": "[a-zA-Z0-9]*",
"NoEcho": "true",
"MaxLength": "41",
"ConstraintDescription": "must contain only alphanumeric characters."
},
"KeyName": {
"Type": "String",
"Description": "Name of an EC2 Key Pair to enable SSH access to the instances"
},
"DBName": {
"Type": "String",
"Description": "The WordPress database name",
"Default": "wordpress",
"MinLength": "1",
"AllowedPattern": "[a-zA-Z][a-zA-Z0-9]*",
"MaxLength": "64",
"ConstraintDescription": "must begin with a letter and..."
},
"DBPassword": {
"Type": "String",
"Description": "The WordPress database admin account password",
"Default": "admin",
"MinLength": "1",
"AllowedPattern": "[a-zA-Z0-9]*",
"NoEcho": "true",
"MaxLength": "41",
"ConstraintDescription": "must contain only alphanumeric characters."
},
"InstanceType": {
"Default": "m1.small",
"Type": "String",
"ConstraintDescription": "must be a valid EC2 instance type.",
"Description": "WebServer EC2 instance type",
"AllowedValues": [
"m1.tiny",
"m1.small",
"m1.medium",
"m1.large",
"m1.xlarge"
]
},
"Network": {
"Type": "String",
"CustomConstraint": "neutron.network"
}
}
}
"""
ENVIRONMENT = """
parameters:
InstanceType: m1.xsmall
db_password: verybadpass
KeyName: heat_key
"""
class Environment(object):
def __init__(self, data):
self.data = data
class Template(object):
def __init__(self, data, validate):
self.data = data
self.validate = validate
def data(TEST):
TEST.stacks = utils.TestDataContainer()
TEST.stack_templates = utils.TestDataContainer()
TEST.stack_environments = utils.TestDataContainer()
TEST.resource_types = utils.TestDataContainer()
TEST.heat_services = utils.TestDataContainer()
# Services
service_1 = services.Service(services.ServiceManager(None), {
"status": "up",
"binary": "heat-engine",
"report_interval": 60,
"engine_id": "2f7b5a9b-c50b-4b01-8248-f89f5fb338d1",
"created_at": "2015-02-06T03:23:32.000000",
"hostname": "mrkanag",
"updated_at": "2015-02-20T09:49:52.000000",
"topic": "engine",
"host": "engine-1",
"deleted_at": None,
"id": "1efd7015-5016-4caa-b5c8-12438af7b100"
})
service_2 = services.Service(services.ServiceManager(None), {
"status": "up",
"binary": "heat-engine",
"report_interval": 60,
"engine_id": "2f7b5a9b-c50b-4b01-8248-f89f5fb338d2",
"created_at": "2015-02-06T03:23:32.000000",
"hostname": "mrkanag",
"updated_at": "2015-02-20T09:49:52.000000",
"topic": "engine",
"host": "engine-2",
"deleted_at": None,
"id": "1efd7015-5016-4caa-b5c8-12438af7b100"
})
TEST.heat_services.add(service_1)
TEST.heat_services.add(service_2)
# Data return by heatclient.
TEST.api_resource_types = utils.TestDataContainer()
for i in range(10):
stack_data = {
"description": "No description",
"links": [{
"href": "http://192.168.1.70:8004/v1/"
"051c727ee67040d6a7b7812708485a97/"
"stacks/stack-1211-38/"
"05b4f39f-ea96-4d91-910c-e758c078a089",
"rel": "self"
}],
"parameters": {
'DBUsername': '******',
'InstanceType': 'm1.small',
'AWS::StackId': (
'arn:openstack:heat::2ce287:stacks/teststack/88553ec'),
'DBRootPassword': '******',
'AWS::StackName': "teststack{0}".format(i),
'DBPassword': '******',
'AWS::Region': 'ap-southeast-1',
'DBName': u'wordpress'
},
"stack_status_reason": "Stack successfully created",
"stack_name": "stack-test{0}".format(i),
"creation_time": "2013-04-22T00:11:39Z",
"updated_time": "2013-04-22T00:11:39Z",
"stack_status": "CREATE_COMPLETE",
"id": "05b4f39f-ea96-4d91-910c-e758c078a089{0}".format(i)
}
stack = stacks.Stack(stacks.StackManager(None), stack_data)
TEST.stacks.add(stack)
TEST.stack_templates.add(Template(TEMPLATE, VALIDATE))
TEST.stack_environments.add(Environment(ENVIRONMENT))
# Resource types list
r_type_1 = {
"resource_type": "AWS::CloudFormation::Stack",
"attributes": {},
"properties": {
"Parameters": {
"description":
"The set of parameters passed to this nested stack.",
"immutable": False,
"required": False,
"type": "map",
"update_allowed": True},
"TemplateURL": {
"description": "The URL of a template that specifies"
" the stack to be created as a resource.",
"immutable": False,
"required": True,
"type": "string",
"update_allowed": True},
"TimeoutInMinutes": {
"description": "The length of time, in minutes,"
" to wait for the nested stack creation.",
"immutable": False,
"required": False,
"type": "number",
"update_allowed": True}
}
}
r_type_2 = {
"resource_type": "OS::Heat::CloudConfig",
"attributes": {
"config": {
"description": "The config value of the software config."}
},
"properties": {
"cloud_config": {
"description": "Map representing the cloud-config data"
" structure which will be formatted as YAML.",
"immutable": False,
"required": False,
"type": "map",
"update_allowed": False}
}
}
r_types_list = [r_type_1, r_type_2]
for rt in r_types_list:
r_type = resource_types.ResourceType(
resource_types.ResourceTypeManager(None), rt['resource_type'])
TEST.resource_types.add(r_type)
TEST.api_resource_types.add(rt)
| apache-2.0 |
ketjow4/NOV | Lib/fpformat.py | 322 | 4699 | """General floating point formatting functions.
Functions:
fix(x, digits_behind)
sci(x, digits_behind)
Each takes a number or a string and a number of digits as arguments.
Parameters:
x: number to be formatted; or a string resembling a number
digits_behind: number of digits behind the decimal point
"""
from warnings import warnpy3k
warnpy3k("the fpformat module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import re
__all__ = ["fix","sci","NotANumber"]
# Compiled regular expression to "decode" a number
decoder = re.compile(r'^([-+]?)0*(\d*)((?:\.\d*)?)(([eE][-+]?\d+)?)$')
# \0 the whole thing
# \1 leading sign or empty
# \2 digits left of decimal point
# \3 fraction (empty or begins with point)
# \4 exponent part (empty or begins with 'e' or 'E')
try:
class NotANumber(ValueError):
pass
except TypeError:
NotANumber = 'fpformat.NotANumber'
def extract(s):
"""Return (sign, intpart, fraction, expo) or raise an exception:
sign is '+' or '-'
intpart is 0 or more digits beginning with a nonzero
fraction is 0 or more digits
expo is an integer"""
res = decoder.match(s)
if res is None: raise NotANumber, s
sign, intpart, fraction, exppart = res.group(1,2,3,4)
if sign == '+': sign = ''
if fraction: fraction = fraction[1:]
if exppart: expo = int(exppart[1:])
else: expo = 0
return sign, intpart, fraction, expo
def unexpo(intpart, fraction, expo):
"""Remove the exponent by changing intpart and fraction."""
if expo > 0: # Move the point left
f = len(fraction)
intpart, fraction = intpart + fraction[:expo], fraction[expo:]
if expo > f:
intpart = intpart + '0'*(expo-f)
elif expo < 0: # Move the point right
i = len(intpart)
intpart, fraction = intpart[:expo], intpart[expo:] + fraction
if expo < -i:
fraction = '0'*(-expo-i) + fraction
return intpart, fraction
def roundfrac(intpart, fraction, digs):
"""Round or extend the fraction to size digs."""
f = len(fraction)
if f <= digs:
return intpart, fraction + '0'*(digs-f)
i = len(intpart)
if i+digs < 0:
return '0'*-digs, ''
total = intpart + fraction
nextdigit = total[i+digs]
if nextdigit >= '5': # Hard case: increment last digit, may have carry!
n = i + digs - 1
while n >= 0:
if total[n] != '9': break
n = n-1
else:
total = '0' + total
i = i+1
n = 0
total = total[:n] + chr(ord(total[n]) + 1) + '0'*(len(total)-n-1)
intpart, fraction = total[:i], total[i:]
if digs >= 0:
return intpart, fraction[:digs]
else:
return intpart[:digs] + '0'*-digs, ''
def fix(x, digs):
"""Format x as [-]ddd.ddd with 'digs' digits after the point
and at least one digit before.
If digs <= 0, the point is suppressed."""
if type(x) != type(''): x = repr(x)
try:
sign, intpart, fraction, expo = extract(x)
except NotANumber:
return x
intpart, fraction = unexpo(intpart, fraction, expo)
intpart, fraction = roundfrac(intpart, fraction, digs)
while intpart and intpart[0] == '0': intpart = intpart[1:]
if intpart == '': intpart = '0'
if digs > 0: return sign + intpart + '.' + fraction
else: return sign + intpart
def sci(x, digs):
"""Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point
and exactly one digit before.
If digs is <= 0, one digit is kept and the point is suppressed."""
if type(x) != type(''): x = repr(x)
sign, intpart, fraction, expo = extract(x)
if not intpart:
while fraction and fraction[0] == '0':
fraction = fraction[1:]
expo = expo - 1
if fraction:
intpart, fraction = fraction[0], fraction[1:]
expo = expo - 1
else:
intpart = '0'
else:
expo = expo + len(intpart) - 1
intpart, fraction = intpart[0], intpart[1:] + fraction
digs = max(0, digs)
intpart, fraction = roundfrac(intpart, fraction, digs)
if len(intpart) > 1:
intpart, fraction, expo = \
intpart[0], intpart[1:] + fraction[:-1], \
expo + len(intpart) - 1
s = sign + intpart
if digs > 0: s = s + '.' + fraction
e = repr(abs(expo))
e = '0'*(3-len(e)) + e
if expo < 0: e = '-' + e
else: e = '+' + e
return s + 'e' + e
def test():
"""Interactive test run."""
try:
while 1:
x, digs = input('Enter (x, digs): ')
print x, fix(x, digs), sci(x, digs)
except (EOFError, KeyboardInterrupt):
pass
| gpl-3.0 |
detiber/lib_openshift | test/test_wrapper.py | 2 | 3097 | # coding: utf-8
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import pyfakefs.fake_filesystem_unittest as fake_fs_unittest
import json
from nose.tools import assert_raises, assert_is_instance
from lib_openshift import Wrapper,WrapperException
class TestWrapper(fake_fs_unittest.TestCase):
""" Wrapper unit test stubs """
def setUp(self):
self.setUpPyfakefs()
def tearDown(self):
pass
def test_wrapper_no_args_kubeconfig(self):
kubeconfig = """
apiVersion: v1
clusters:
- cluster:
server: http://localhost:8080
name: local-server
contexts:
- context:
cluster: local-server
namespace: the-right-prefix
user: myself
name: default-context
current-context: default-context
kind: Config
preferences: {}
users:
- name: myself
user:
password: secret
username: admin"""
self.fs.CreateFile(os.path.expanduser('~/.kube/config'), contents=kubeconfig)
self.assertTrue(os.path.exists(os.path.expanduser('~/.kube/config')))
#wrapper = Wrapper()
#TODO: finish this test
def test_wrapper_no_args_no_kubeconfig(self):
self.assertFalse(os.path.exists(os.path.expanduser('~/.kube/config')))
assert_raises(WrapperException, Wrapper)
try:
Wrapper()
except WrapperException as e:
assert_is_instance(json.loads(str(e)), dict)
def test_wrapper_invalid_auth_args(self):
self.assertFalse(os.path.exists(os.path.expanduser('~/.kube/config')))
assert_raises(WrapperException, Wrapper, username="blue")
assert_raises(WrapperException, Wrapper, password="green")
assert_raises(WrapperException, Wrapper, client_cert="here")
assert_raises(WrapperException, Wrapper, client_key="there")
assert_raises(WrapperException, Wrapper, username="blue", token="orange")
assert_raises(WrapperException, Wrapper, password="green", token="orange")
assert_raises(WrapperException, Wrapper, username="green", client_cert="here")
assert_raises(WrapperException, Wrapper, username="green", client_key="here")
assert_raises(WrapperException, Wrapper, password="green", client_cert="here")
assert_raises(WrapperException, Wrapper, password="green", client_key="here")
assert_raises(WrapperException, Wrapper, token="green", client_cert="here")
assert_raises(WrapperException, Wrapper, token="green", client_key="here")
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
cybertk/depot_tools | third_party/pylint/checkers/classes.py | 19 | 23939 | # Copyright (c) 2003-2011 LOGILAB S.A. (Paris, FRANCE).
# http://www.logilab.fr/ -- mailto:[email protected]
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""classes checker for Python code
"""
from __future__ import generators
from logilab import astng
from logilab.astng import YES, Instance, are_exclusive
from pylint.interfaces import IASTNGChecker
from pylint.checkers import BaseChecker
from pylint.checkers.utils import PYMETHODS, overrides_a_method, check_messages
def class_is_abstract(node):
"""return true if the given class node should be considered as an abstract
class
"""
for method in node.methods():
if method.parent.frame() is node:
if method.is_abstract(pass_is_abstract=False):
return True
return False
MSGS = {
'F0202': ('Unable to check methods signature (%s / %s)',
'Used when PyLint has been unable to check methods signature \
compatibility for an unexpected reason. Please report this kind \
if you don\'t make sense of it.'),
'E0202': ('An attribute affected in %s line %s hide this method',
'Used when a class defines a method which is hidden by an '
'instance attribute from an ancestor class or set by some '
'client code.'),
'E0203': ('Access to member %r before its definition line %s',
'Used when an instance member is accessed before it\'s actually\
assigned.'),
'W0201': ('Attribute %r defined outside __init__',
'Used when an instance attribute is defined outside the __init__\
method.'),
'W0212': ('Access to a protected member %s of a client class', # E0214
'Used when a protected member (i.e. class member with a name \
beginning with an underscore) is access outside the class or a \
descendant of the class where it\'s defined.'),
'E0211': ('Method has no argument',
'Used when a method which should have the bound instance as \
first argument has no argument defined.'),
'E0213': ('Method should have "self" as first argument',
'Used when a method has an attribute different the "self" as\
first argument. This is considered as an error since this is\
a so common convention that you shouldn\'t break it!'),
'C0202': ('Class method should have %s as first argument', # E0212
'Used when a class method has an attribute different than "cls"\
as first argument, to easily differentiate them from regular \
instance methods.'),
'C0203': ('Metaclass method should have "mcs" as first argument', # E0214
'Used when a metaclass method has an attribute different the \
"mcs" as first argument.'),
'W0211': ('Static method with %r as first argument',
'Used when a static method has "self" or "cls" as first argument.'
),
'R0201': ('Method could be a function',
'Used when a method doesn\'t use its bound instance, and so could\
be written as a function.'
),
'E0221': ('Interface resolved to %s is not a class',
'Used when a class claims to implement an interface which is not \
a class.'),
'E0222': ('Missing method %r from %s interface',
'Used when a method declared in an interface is missing from a \
class implementing this interface'),
'W0221': ('Arguments number differs from %s method',
'Used when a method has a different number of arguments than in \
the implemented interface or in an overridden method.'),
'W0222': ('Signature differs from %s method',
'Used when a method signature is different than in the \
implemented interface or in an overridden method.'),
'W0223': ('Method %r is abstract in class %r but is not overridden',
'Used when an abstract method (i.e. raise NotImplementedError) is \
not overridden in concrete class.'
),
'F0220': ('failed to resolve interfaces implemented by %s (%s)', # W0224
'Used when a PyLint as failed to find interfaces implemented by \
a class'),
'W0231': ('__init__ method from base class %r is not called',
'Used when an ancestor class method has an __init__ method \
which is not called by a derived class.'),
'W0232': ('Class has no __init__ method',
'Used when a class has no __init__ method, neither its parent \
classes.'),
'W0233': ('__init__ method from a non direct base class %r is called',
'Used when an __init__ method is called on a class which is not \
in the direct ancestors for the analysed class.'),
}
class ClassChecker(BaseChecker):
"""checks for :
* methods without self as first argument
* overridden methods signature
* access only to existent members via self
* attributes not defined in the __init__ method
* supported interfaces implementation
* unreachable code
"""
__implements__ = (IASTNGChecker,)
# configuration section name
name = 'classes'
# messages
msgs = MSGS
priority = -2
# configuration options
options = (('ignore-iface-methods',
{'default' : (#zope interface
'isImplementedBy', 'deferred', 'extends', 'names',
'namesAndDescriptions', 'queryDescriptionFor', 'getBases',
'getDescriptionFor', 'getDoc', 'getName', 'getTaggedValue',
'getTaggedValueTags', 'isEqualOrExtendedBy', 'setTaggedValue',
'isImplementedByInstancesOf',
# twisted
'adaptWith',
# logilab.common interface
'is_implemented_by'),
'type' : 'csv',
'metavar' : '<method names>',
'help' : 'List of interface methods to ignore, \
separated by a comma. This is used for instance to not check methods defines \
in Zope\'s Interface base class.'}
),
('defining-attr-methods',
{'default' : ('__init__', '__new__', 'setUp'),
'type' : 'csv',
'metavar' : '<method names>',
'help' : 'List of method names used to declare (i.e. assign) \
instance attributes.'}
),
('valid-classmethod-first-arg',
{'default' : ('cls',),
'type' : 'csv',
'metavar' : '<argument names>',
'help' : 'List of valid names for the first argument in \
a class method.'}
),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self._accessed = []
self._first_attrs = []
self._meth_could_be_func = None
def visit_class(self, node):
"""init visit variable _accessed and check interfaces
"""
self._accessed.append({})
self._check_bases_classes(node)
self._check_interfaces(node)
# if not an interface, exception, metaclass
if node.type == 'class':
try:
node.local_attr('__init__')
except astng.NotFoundError:
self.add_message('W0232', args=node, node=node)
@check_messages('E0203', 'W0201')
def leave_class(self, cnode):
"""close a class node:
check that instance attributes are defined in __init__ and check
access to existent members
"""
# check access to existent members on non metaclass classes
accessed = self._accessed.pop()
if cnode.type != 'metaclass':
self._check_accessed_members(cnode, accessed)
# checks attributes are defined in an allowed method such as __init__
if 'W0201' not in self.active_msgs:
return
defining_methods = self.config.defining_attr_methods
for attr, nodes in cnode.instance_attrs.items():
nodes = [n for n in nodes if not
isinstance(n.statement(), (astng.Delete, astng.AugAssign))]
if not nodes:
continue # error detected by typechecking
attr_defined = False
# check if any method attr is defined in is a defining method
for node in nodes:
if node.frame().name in defining_methods:
attr_defined = True
if not attr_defined:
# check attribute is defined in a parent's __init__
for parent in cnode.instance_attr_ancestors(attr):
attr_defined = False
# check if any parent method attr is defined in is a defining method
for node in parent.instance_attrs[attr]:
if node.frame().name in defining_methods:
attr_defined = True
if attr_defined:
# we're done :)
break
else:
# check attribute is defined as a class attribute
try:
cnode.local_attr(attr)
except astng.NotFoundError:
self.add_message('W0201', args=attr, node=node)
def visit_function(self, node):
"""check method arguments, overriding"""
# ignore actual functions
if not node.is_method():
return
klass = node.parent.frame()
self._meth_could_be_func = True
# check first argument is self if this is actually a method
self._check_first_arg_for_type(node, klass.type == 'metaclass')
if node.name == '__init__':
self._check_init(node)
return
# check signature if the method overloads inherited method
for overridden in klass.local_attr_ancestors(node.name):
# get astng for the searched method
try:
meth_node = overridden[node.name]
except KeyError:
# we have found the method but it's not in the local
# dictionary.
# This may happen with astng build from living objects
continue
if not isinstance(meth_node, astng.Function):
continue
self._check_signature(node, meth_node, 'overridden')
break
# check if the method overload an attribute
try:
overridden = klass.instance_attr(node.name)[0] # XXX
args = (overridden.root().name, overridden.fromlineno)
self.add_message('E0202', args=args, node=node)
except astng.NotFoundError:
pass
def leave_function(self, node):
"""on method node, check if this method couldn't be a function
ignore class, static and abstract methods, initializer,
methods overridden from a parent class and any
kind of method defined in an interface for this warning
"""
if node.is_method():
if node.args.args is not None:
self._first_attrs.pop()
if 'R0201' not in self.active_msgs:
return
class_node = node.parent.frame()
if (self._meth_could_be_func and node.type == 'method'
and not node.name in PYMETHODS
and not (node.is_abstract() or
overrides_a_method(class_node, node.name))
and class_node.type != 'interface'):
self.add_message('R0201', node=node)
def visit_getattr(self, node):
"""check if the getattr is an access to a class member
if so, register it. Also check for access to protected
class member from outside its class (but ignore __special__
methods)
"""
attrname = node.attrname
if self._first_attrs and isinstance(node.expr, astng.Name) and \
node.expr.name == self._first_attrs[-1]:
self._accessed[-1].setdefault(attrname, []).append(node)
return
if 'W0212' not in self.active_msgs:
return
if attrname[0] == '_' and not attrname == '_' and not (
attrname.startswith('__') and attrname.endswith('__')):
# XXX move this in a reusable function
klass = node.frame()
while klass is not None and not isinstance(klass, astng.Class):
if klass.parent is None:
klass = None
else:
klass = klass.parent.frame()
# XXX infer to be more safe and less dirty ??
# in classes, check we are not getting a parent method
# through the class object or through super
callee = node.expr.as_string()
if klass is None or not (callee == klass.name or
callee in klass.basenames
or (isinstance(node.expr, astng.CallFunc)
and isinstance(node.expr.func, astng.Name)
and node.expr.func.name == 'super')):
self.add_message('W0212', node=node, args=attrname)
def visit_name(self, node):
"""check if the name handle an access to a class member
if so, register it
"""
if self._first_attrs and (node.name == self._first_attrs[-1] or
not self._first_attrs[-1]):
self._meth_could_be_func = False
def _check_accessed_members(self, node, accessed):
"""check that accessed members are defined"""
# XXX refactor, probably much simpler now that E0201 is in type checker
for attr, nodes in accessed.items():
# deactivate "except doesn't do anything", that's expected
# pylint: disable=W0704
# is it a class attribute ?
try:
node.local_attr(attr)
# yes, stop here
continue
except astng.NotFoundError:
pass
# is it an instance attribute of a parent class ?
try:
node.instance_attr_ancestors(attr).next()
# yes, stop here
continue
except StopIteration:
pass
# is it an instance attribute ?
try:
defstmts = node.instance_attr(attr)
except astng.NotFoundError:
pass
else:
if len(defstmts) == 1:
defstmt = defstmts[0]
# check that if the node is accessed in the same method as
# it's defined, it's accessed after the initial assignment
frame = defstmt.frame()
lno = defstmt.fromlineno
for _node in nodes:
if _node.frame() is frame and _node.fromlineno < lno \
and not are_exclusive(_node.statement(), defstmt, ('AttributeError', 'Exception', 'BaseException')):
self.add_message('E0203', node=_node,
args=(attr, lno))
def _check_first_arg_for_type(self, node, metaclass=0):
"""check the name of first argument, expect:
* 'self' for a regular method
* 'cls' for a class method
* 'mcs' for a metaclass
* not one of the above for a static method
"""
# don't care about functions with unknown argument (builtins)
if node.args.args is None:
return
first_arg = node.args.args and node.argnames()[0]
self._first_attrs.append(first_arg)
first = self._first_attrs[-1]
# static method
if node.type == 'staticmethod':
if first_arg in ('self', 'cls', 'mcs'):
self.add_message('W0211', args=first, node=node)
self._first_attrs[-1] = None
# class / regular method with no args
elif not node.args.args:
self.add_message('E0211', node=node)
# metaclass method
elif metaclass:
if first != 'mcs':
self.add_message('C0203', node=node)
# class method
elif node.type == 'classmethod':
if first not in self.config.valid_classmethod_first_arg:
if len(self.config.valid_classmethod_first_arg) == 1:
valid = repr(self.config.valid_classmethod_first_arg[0])
else:
valid = ', '.join(
repr(v)
for v in self.config.valid_classmethod_first_arg[:-1])
valid = '%s or %r' % (
valid, self.config.valid_classmethod_first_arg[-1])
self.add_message('C0202', args=valid, node=node)
# regular method without self as argument
elif first != 'self':
self.add_message('E0213', node=node)
def _check_bases_classes(self, node):
"""check that the given class node implements abstract methods from
base classes
"""
# check if this class abstract
if class_is_abstract(node):
return
for method in node.methods():
owner = method.parent.frame()
if owner is node:
continue
# owner is not this class, it must be a parent class
# check that the ancestor's method is not abstract
if method.is_abstract(pass_is_abstract=False):
self.add_message('W0223', node=node,
args=(method.name, owner.name))
def _check_interfaces(self, node):
"""check that the given class node really implements declared
interfaces
"""
e0221_hack = [False]
def iface_handler(obj):
"""filter interface objects, it should be classes"""
if not isinstance(obj, astng.Class):
e0221_hack[0] = True
self.add_message('E0221', node=node,
args=(obj.as_string(),))
return False
return True
ignore_iface_methods = self.config.ignore_iface_methods
try:
for iface in node.interfaces(handler_func=iface_handler):
for imethod in iface.methods():
name = imethod.name
if name.startswith('_') or name in ignore_iface_methods:
# don't check method beginning with an underscore,
# usually belonging to the interface implementation
continue
# get class method astng
try:
method = node_method(node, name)
except astng.NotFoundError:
self.add_message('E0222', args=(name, iface.name),
node=node)
continue
# ignore inherited methods
if method.parent.frame() is not node:
continue
# check signature
self._check_signature(method, imethod,
'%s interface' % iface.name)
except astng.InferenceError:
if e0221_hack[0]:
return
implements = Instance(node).getattr('__implements__')[0]
assignment = implements.parent
assert isinstance(assignment, astng.Assign)
# assignment.expr can be a Name or a Tuple or whatever.
# Use as_string() for the message
# FIXME: in case of multiple interfaces, find which one could not
# be resolved
self.add_message('F0220', node=implements,
args=(node.name, assignment.value.as_string()))
def _check_init(self, node):
"""check that the __init__ method call super or ancestors'__init__
method
"""
if not set(('W0231', 'W0233')) & self.active_msgs:
return
klass_node = node.parent.frame()
to_call = _ancestors_to_call(klass_node)
not_called_yet = dict(to_call)
for stmt in node.nodes_of_class(astng.CallFunc):
expr = stmt.func
if not isinstance(expr, astng.Getattr) \
or expr.attrname != '__init__':
continue
# skip the test if using super
if isinstance(expr.expr, astng.CallFunc) and \
isinstance(expr.expr.func, astng.Name) and \
expr.expr.func.name == 'super':
return
try:
klass = expr.expr.infer().next()
if klass is YES:
continue
try:
del not_called_yet[klass]
except KeyError:
if klass not in to_call:
self.add_message('W0233', node=expr, args=klass.name)
except astng.InferenceError:
continue
for klass in not_called_yet.keys():
if klass.name == 'object':
continue
self.add_message('W0231', args=klass.name, node=node)
def _check_signature(self, method1, refmethod, class_type):
"""check that the signature of the two given methods match
class_type is in 'class', 'interface'
"""
if not (isinstance(method1, astng.Function)
and isinstance(refmethod, astng.Function)):
self.add_message('F0202', args=(method1, refmethod), node=method1)
return
# don't care about functions with unknown argument (builtins)
if method1.args.args is None or refmethod.args.args is None:
return
# if we use *args, **kwargs, skip the below checks
if method1.args.vararg or method1.args.kwarg:
return
if len(method1.args.args) != len(refmethod.args.args):
self.add_message('W0221', args=class_type, node=method1)
elif len(method1.args.defaults) < len(refmethod.args.defaults):
self.add_message('W0222', args=class_type, node=method1)
def _ancestors_to_call(klass_node, method='__init__'):
"""return a dictionary where keys are the list of base classes providing
the queried method, and so that should/may be called from the method node
"""
to_call = {}
for base_node in klass_node.ancestors(recurs=False):
try:
base_node.local_attr(method)
to_call[base_node] = 1
except astng.NotFoundError:
continue
return to_call
def node_method(node, method_name):
"""get astng for <method_name> on the given class node, ensuring it
is a Function node
"""
for n in node.local_attr(method_name):
if isinstance(n, astng.Function):
return n
raise astng.NotFoundError(method_name)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(ClassChecker(linter))
| bsd-3-clause |
Lukc/ospace-lukc | client-pygame/lib/osci/dialog/NewMessageDlg.py | 1 | 7294 | #
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import pygameui as ui
from osci import gdata, res, client
from ige.ospace.Const import *
import ige, string
class NewMessageDlg:
def __init__(self, app):
self.app = app
self.createUI()
def display(self, caller, objID, objType, forum, msgID = None):
self.caller = caller
self.msgSpec = gdata.mailboxSpec[objType, gdata.mailboxStripLang(forum)]
messages = client.get(objID)._messages
if self.msgSpec[1] == None:
# cannot reply
return
elif self.msgSpec[1] == "forum":
self.recipientObjID = [objID]
self.recipientForum = forum
if msgID != None:
self.topic = messages[msgID]["topic"]
else:
self.topic = ""
elif self.msgSpec[1] == "sender" and msgID != None:
message = messages[msgID]
self.recipientObjID = [message["senderID"]]
self.recipientForum = "INBOX"
self.topic = message["topic"]
if self.topic[:4] != "Re: ":
self.topic = "Re: %s" % self.topic
elif self.msgSpec[1]:
self.recipientObjID = []
self.recipientForum = "INBOX"
self.topic = ""
self.show()
self.win.show()
# register for updates
if self not in gdata.updateDlgs:
gdata.updateDlgs.append(self)
def hide(self):
self.win.setStatus(_("Ready."))
self.win.hide()
# unregister updates
if self in gdata.updateDlgs:
gdata.updateDlgs.remove(self)
def update(self):
pass
# self.show()
def show(self):
if len(self.recipientObjID) > 0:
text = ""
for objId in self.recipientObjID:
recipient = client.get(objId)
text = u"%s, %s" % (text, _("%s / %s") % (recipient.name, _(self.msgSpec[0])))
self.win.vRecipient.text = text[2:]
self.win.vRecipient.action = None
else:
self.win.vRecipient.text = _("[Click to select]")
self.win.vRecipient.action = "onSelectContact"
if self.topic:
self.win.vTopic.text = self.topic
self.win.vTopic.enabled = 0
else:
self.win.vTopic.text = ""
self.win.vTopic.enabled = 1
self.win.vText.text = [""]
def onSelectContact(self, widget, action, data):
player = client.getPlayer()
items = []
for contactID in player.diplomacyRels:
contact = client.get(contactID)
item = ui.Item(contact.name, tRecipientID = contactID)
items.append(item)
self.cwin.vContacts.items = items
self.cwin.vContacts.itemsChanged()
self.cwin.show()
def onContactCancel(self, widget, action, data):
self.cwin.hide()
def onContactSelected(self, widget, action, data):
self.recipientObjID = []
text = ""
for item in self.cwin.vContacts.selection:
self.recipientObjID.append(item.tRecipientID)
recipient = client.get(item.tRecipientID)
text = u"%s, %s" % (text, _("%s / %s") % (recipient.name, _(self.msgSpec[0])))
self.win.vRecipient.text = text[2:]
self.cwin.hide()
def onCancel(self, widget, action, data):
self.hide()
def onSend(self, widget, action, data):
if not self.recipientObjID:
self.win.setStatus(_("Select a recipient, please."))
return
if not self.win.vTopic.text:
self.win.setStatus(_("Specify a topic, please."))
return
if self.win.vText.text == [""]:
self.win.setStatus(_("Type a message, please."))
return
try:
self.win.setStatus(_("Executing SEND MESSAGE command..."))
message = {
"forum": self.recipientForum,
"topic": self.win.vTopic.text,
"text": string.join(self.win.vText.text, "\n"),
}
# send message to all recipients
for objID in self.recipientObjID:
client.cmdProxy.sendMsg(objID, message)
# put message into outbox if forum is INBOX
if self.recipientForum == "INBOX":
recipients = ""
for objID in self.recipientObjID:
recipient = client.get(objID)
recipients = u"%s, %s" % (recipients, recipient.name)
message = {
"forum": "OUTBOX",
"topic": "To %s - %s" % (
recipients[2:],
self.win.vTopic.text,
),
"text": _("To %s / %s:\n\n%s") % (
recipients[2:],
_(self.msgSpec[0]),
"\n".join(self.win.vText.text),
)
}
client.cmdProxy.sendMsg(client.getPlayerID(), message)
self.win.setStatus(_("Command has been executed."))
except ige.GameException, e:
self.win.setStatus(e.args[0])
return
client.getMessages()
self.hide()
self.caller.update()
def createUI(self):
w, h = gdata.scrnSize
width = 764 # 38 * 20 + 4
height = 464 # 23 * 20 + 4
self.win = ui.Window(self.app,
modal = 1,
escKeyClose = 1,
movable = 0,
title = _("New message"),
rect = ui.Rect((w - width) / 2, (h - height) / 2, width, height),
layoutManager = ui.SimpleGridLM(),
tabChange = True
)
self.win.subscribeAction('*', self)
# headers
ui.Label(self.win, layout = (0, 0, 5, 1), text = _("Recipient"), align = ui.ALIGN_W)
ui.ActiveLabel(self.win, layout = (5, 0, 33, 1), id = "vRecipient", align = ui.ALIGN_W)
ui.Label(self.win, layout = (0, 1, 5, 1), text = _("Subject"), align = ui.ALIGN_W)
ui.Entry(self.win, layout = (5, 1, 33, 1), id = "vTopic", align = ui.ALIGN_W, orderNo = 1)
ui.Title(self.win, layout = (0, 2, 38, 1), text = _("Message"),
font = "normal-bold", align = ui.ALIGN_W)
s = ui.Scrollbar(self.win, layout = (37, 3, 1, 18))
t = ui.Text(self.win, layout = (0, 3, 37, 18), id = "vText", orderNo = 2)
t.attachVScrollbar(s)
# info
ui.Title(self.win, layout = (0, 21, 28, 1), id = 'vStatusBar', align = ui.ALIGN_W)
ui.TitleButton(self.win, layout = (28, 21, 5, 1), text = _("Cancel"), action = 'onCancel')
ui.TitleButton(self.win, layout = (33, 21, 5, 1), text = _("Send"), action = 'onSend')
# status bar
#self.win.statusBar = self.win.vStatusBar
#
# contact window
#
width = 304 # 15 * 20 + 4
height = 264 # 13 * 20 + 4
self.cwin = ui.Window(self.app,
modal = 1,
escKeyClose = 1,
titleOnly = 0,
movable = 0,
title = _("Select recipient"),
rect = ui.Rect((w - width) / 2, (h - height) / 2, width, height),
layoutManager = ui.SimpleGridLM(),
)
self.cwin.subscribeAction('*', self)
# rename
ui.Listbox(self.cwin, layout = (0, 0, 15, 11), id = 'vContacts', columnLabels = 0,
columns = ((None, 'text', 0, ui.ALIGN_W),), multiselection = 1, sortedBy=('text', 1))
# status bar + submit/cancel
ui.TitleButton(self.cwin, layout = (10, 11, 5, 1), text = _("Select"), action = 'onContactSelected')
ui.TitleButton(self.cwin, layout = (5, 11, 5, 1), text = _("Cancel"), action = 'onContactCancel')
ui.Title(self.cwin, id = 'vStatusBar', layout = (0, 11, 5, 1), align = ui.ALIGN_W)
#self.cwin.statusBar = self.cwin.vStatusBar | gpl-2.0 |
thinkopensolutions/server-tools | module_prototyper/tests/test_prototype_module_export.py | 2 | 3093 | # -*- coding: utf-8 -*-
# #############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2010 - 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo.tests import common
import zipfile
import StringIO
class TestPrototypeModuleExport(common.TransactionCase):
def setUp(self):
super(TestPrototypeModuleExport, self).setUp()
self.main_model = self.env['module_prototyper.module.export']
self.prototype_model = self.env['module_prototyper']
self.module_category_model = self.env[
'ir.module.category'
]
self.prototype = self.prototype_model.create({
'name': 't_name',
'category_id': self.module_category_model.browse(1).id,
'human_name': 't_human_name',
'summary': 't_summary',
'description': 't_description',
'author': 't_author',
'maintainer': 't_maintainer',
'website': 't_website',
})
self.exporter = self.main_model.create({'name': 't_name'})
def test_action_export_assert_for_wrong_active_model(self):
"""Test if the assertion raises."""
exporter = self.main_model.with_context(
active_model='t_active_model'
).create({})
self.assertRaises(
AssertionError,
exporter.action_export,
[exporter.id],
)
def test_action_export_update_wizard(self):
"""Test if the wizard is updated during the process."""
exporter = self.main_model.with_context(
active_model=self.prototype_model._name,
active_id=self.prototype.id
).create({})
exporter.action_export(exporter.id)
self.assertEqual(exporter.state, 'get')
self.assertEqual(exporter.name, '%s.zip' % (self.prototype.name,))
def test_zip_files_returns_tuple(self):
"""Test the method return of the method that generate the zip file."""
ret = self.main_model.zip_files(self.exporter, [self.prototype])
self.assertIsInstance(ret, tuple)
self.assertIsInstance(
ret.zip_file, zipfile.ZipFile
)
self.assertIsInstance(
ret.stringIO, StringIO.StringIO
)
| agpl-3.0 |
datapythonista/pandas | pandas/tests/io/test_compression.py | 3 | 8199 | import io
import os
from pathlib import Path
import subprocess
import sys
import textwrap
import time
import pytest
import pandas as pd
import pandas._testing as tm
import pandas.io.common as icom
@pytest.mark.parametrize(
"obj",
[
pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
),
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
],
)
@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"])
def test_compression_size(obj, method, compression_only):
with tm.ensure_clean() as path:
getattr(obj, method)(path, compression=compression_only)
compressed_size = os.path.getsize(path)
getattr(obj, method)(path, compression=None)
uncompressed_size = os.path.getsize(path)
assert uncompressed_size > compressed_size
@pytest.mark.parametrize(
"obj",
[
pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
),
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
],
)
@pytest.mark.parametrize("method", ["to_csv", "to_json"])
def test_compression_size_fh(obj, method, compression_only):
with tm.ensure_clean() as path:
with icom.get_handle(path, "w", compression=compression_only) as handles:
getattr(obj, method)(handles.handle)
assert not handles.handle.closed
compressed_size = os.path.getsize(path)
with tm.ensure_clean() as path:
with icom.get_handle(path, "w", compression=None) as handles:
getattr(obj, method)(handles.handle)
assert not handles.handle.closed
uncompressed_size = os.path.getsize(path)
assert uncompressed_size > compressed_size
@pytest.mark.parametrize(
"write_method, write_kwargs, read_method",
[
("to_csv", {"index": False}, pd.read_csv),
("to_json", {}, pd.read_json),
("to_pickle", {}, pd.read_pickle),
],
)
def test_dataframe_compression_defaults_to_infer(
write_method, write_kwargs, read_method, compression_only
):
# GH22004
input = pd.DataFrame([[1.0, 0, -4], [3.4, 5, 2]], columns=["X", "Y", "Z"])
extension = icom._compression_to_extension[compression_only]
with tm.ensure_clean("compressed" + extension) as path:
getattr(input, write_method)(path, **write_kwargs)
output = read_method(path, compression=compression_only)
tm.assert_frame_equal(output, input)
@pytest.mark.parametrize(
"write_method,write_kwargs,read_method,read_kwargs",
[
("to_csv", {"index": False, "header": True}, pd.read_csv, {"squeeze": True}),
("to_json", {}, pd.read_json, {"typ": "series"}),
("to_pickle", {}, pd.read_pickle, {}),
],
)
def test_series_compression_defaults_to_infer(
write_method, write_kwargs, read_method, read_kwargs, compression_only
):
# GH22004
input = pd.Series([0, 5, -2, 10], name="X")
extension = icom._compression_to_extension[compression_only]
with tm.ensure_clean("compressed" + extension) as path:
getattr(input, write_method)(path, **write_kwargs)
output = read_method(path, compression=compression_only, **read_kwargs)
tm.assert_series_equal(output, input, check_names=False)
def test_compression_warning(compression_only):
# Assert that passing a file object to to_csv while explicitly specifying a
# compression protocol triggers a RuntimeWarning, as per GH21227.
df = pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
)
with tm.ensure_clean() as path:
with icom.get_handle(path, "w", compression=compression_only) as handles:
with tm.assert_produces_warning(RuntimeWarning):
df.to_csv(handles.handle, compression=compression_only)
def test_compression_binary(compression_only):
"""
Binary file handles support compression.
GH22555
"""
df = tm.makeDataFrame()
# with a file
with tm.ensure_clean() as path:
with open(path, mode="wb") as file:
df.to_csv(file, mode="wb", compression=compression_only)
file.seek(0) # file shouldn't be closed
tm.assert_frame_equal(
df, pd.read_csv(path, index_col=0, compression=compression_only)
)
# with BytesIO
file = io.BytesIO()
df.to_csv(file, mode="wb", compression=compression_only)
file.seek(0) # file shouldn't be closed
tm.assert_frame_equal(
df, pd.read_csv(file, index_col=0, compression=compression_only)
)
def test_gzip_reproducibility_file_name():
"""
Gzip should create reproducible archives with mtime.
Note: Archives created with different filenames will still be different!
GH 28103
"""
df = tm.makeDataFrame()
compression_options = {"method": "gzip", "mtime": 1}
# test for filename
with tm.ensure_clean() as path:
path = Path(path)
df.to_csv(path, compression=compression_options)
time.sleep(2)
output = path.read_bytes()
df.to_csv(path, compression=compression_options)
assert output == path.read_bytes()
def test_gzip_reproducibility_file_object():
"""
Gzip should create reproducible archives with mtime.
GH 28103
"""
df = tm.makeDataFrame()
compression_options = {"method": "gzip", "mtime": 1}
# test for file object
buffer = io.BytesIO()
df.to_csv(buffer, compression=compression_options, mode="wb")
output = buffer.getvalue()
time.sleep(2)
buffer = io.BytesIO()
df.to_csv(buffer, compression=compression_options, mode="wb")
assert output == buffer.getvalue()
def test_with_missing_lzma():
"""Tests if import pandas works when lzma is not present."""
# https://github.com/pandas-dev/pandas/issues/27575
code = textwrap.dedent(
"""\
import sys
sys.modules['lzma'] = None
import pandas
"""
)
subprocess.check_output([sys.executable, "-c", code], stderr=subprocess.PIPE)
def test_with_missing_lzma_runtime():
"""Tests if RuntimeError is hit when calling lzma without
having the module available.
"""
code = textwrap.dedent(
"""
import sys
import pytest
sys.modules['lzma'] = None
import pandas as pd
df = pd.DataFrame()
with pytest.raises(RuntimeError, match='lzma module'):
df.to_csv('foo.csv', compression='xz')
"""
)
subprocess.check_output([sys.executable, "-c", code], stderr=subprocess.PIPE)
@pytest.mark.parametrize(
"obj",
[
pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
),
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
],
)
@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"])
def test_gzip_compression_level(obj, method):
# GH33196
with tm.ensure_clean() as path:
getattr(obj, method)(path, compression="gzip")
compressed_size_default = os.path.getsize(path)
getattr(obj, method)(path, compression={"method": "gzip", "compresslevel": 1})
compressed_size_fast = os.path.getsize(path)
assert compressed_size_default < compressed_size_fast
@pytest.mark.parametrize(
"obj",
[
pd.DataFrame(
100 * [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],
columns=["X", "Y", "Z"],
),
pd.Series(100 * [0.123456, 0.234567, 0.567567], name="X"),
],
)
@pytest.mark.parametrize("method", ["to_pickle", "to_json", "to_csv"])
def test_bzip_compression_level(obj, method):
"""GH33196 bzip needs file size > 100k to show a size difference between
compression levels, so here we just check if the call works when
compression is passed as a dict.
"""
with tm.ensure_clean() as path:
getattr(obj, method)(path, compression={"method": "bz2", "compresslevel": 1})
| bsd-3-clause |
veblush/PyPhabricatorDb | sample/create-task-summary.py | 1 | 1927 | import sys
import codecs
import datetime
from pyphabricatordb import *
from sqlalchemy.orm import sessionmaker
def create_session():
DBSession = sessionmaker()
return DBSession()
def get_user(session, phid):
return session.query(user.User).filter(user.User.phid == phid).first()
def create_task_range_summary(session, file, start_date, end_date):
tasks = []
for t in session.query(maniphest.Task).filter(maniphest.Task.dateModified >= start_date).all():
t.status_last_modified_date = t.dateCreated
t.last_commit_date = None
for trx in t.transactions:
if trx.transactionType == u"status":
t.status_last_modified_date = max(t.status_last_modified_date, trx.dateModified)
if trx.transactionType == u"core:edge":
t.last_commit_date = trx.dateModified if t.last_commit_date is None else max(t.last_commit_date, trx.dateModified)
if t.status_last_modified_date >= start_date and t.status_last_modified_date < end_date:
tasks.append(t)
user_tasks = {}
for t in tasks:
owner = get_user(session, t.ownerPHID)
owner_name = owner.realName if owner else ""
user_tasks.setdefault(owner_name, [])
user_tasks[owner_name].append(t)
for user_name, tasks in user_tasks.iteritems():
print "[", user_name, "]"
for t in tasks:
if t.last_commit_date and t.last_commit_date >= start_date and t.last_commit_date < end_date:
print " ", t.id, t.title
def main():
db_url = sys.argv[1] if len(sys.argv) >= 2 else 'mysql://localhost'
file = codecs.open(sys.argv[2], "wb", "utf-8") if len(sys.argv) >= 3 else sys.stdout
connector.connect_all(db_url)
session = create_session()
create_task_range_summary(session, file, datetime.datetime(2014, 12, 1), datetime.datetime(2014, 12, 8))
if __name__ == "__main__":
main()
| mit |
wecatch/app-turbo | turbo/mongo_model.py | 1 | 10225 | # -*- coding:utf-8 -*-
from __future__ import (
absolute_import,
division,
print_function,
with_statement,
)
from collections import defaultdict
from datetime import datetime
import functools
import time
from bson.objectid import ObjectId
from turbo.log import model_log
from turbo.util import escape as _es, import_object
def _record(x):
return defaultdict(lambda: None, x)
def convert_to_record(func):
"""Wrap mongodb record to a dict record with default value None
"""
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if result is not None:
if isinstance(result, dict):
return _record(result)
return (_record(i) for i in result)
return result
return wrapper
class MixinModel(object):
@staticmethod
def utctimestamp(seconds=None):
if seconds:
return long(time.mktime(time.gmtime(seconds)))
else:
return long(time.mktime(time.gmtime()))
@staticmethod
def timestamp():
return long(time.time())
@staticmethod
def datetime(dt=None):
if dt:
return datetime.strptime(dt, '%Y-%m-%d %H:%M')
else:
return datetime.now()
@staticmethod
def utcdatetime(dt=None):
if dt:
return datetime.strptime(dt, '%Y-%m-%d %H:%M')
else:
return datetime.utcnow()
@classmethod
def to_one_str(cls, value, *args, **kwargs):
"""Convert single record's values to str
"""
if kwargs.get('wrapper'):
return cls._wrapper_to_one_str(value)
return _es.to_dict_str(value)
@classmethod
def to_str(cls, values, callback=None):
"""Convert many records's values to str
"""
if callback and callable(callback):
if isinstance(values, dict):
return callback(_es.to_str(values))
return [callback(_es.to_str(i)) for i in values]
return _es.to_str(values)
@staticmethod
@convert_to_record
def _wrapper_to_one_str(value):
return _es.to_dict_str(value)
@staticmethod
def default_encode(v):
return _es.default_encode(v)
@staticmethod
def json_encode(v):
return _es.json_encode(v)
@staticmethod
def json_decode(v):
return _es.json_decode(v)
@staticmethod
def to_objectid(objid):
return _es.to_objectid(objid)
@staticmethod
def create_objectid():
"""Create new objectid
"""
return ObjectId()
_instance = {}
@classmethod
def instance(cls, name):
"""Instantiate a model class according to import path
args:
name: class import path like `user.User`
return:
model instance
"""
if not cls._instance.get(name):
model_name = name.split('.')
ins_name = '.'.join(
['models', model_name[0], 'model', model_name[1]])
cls._instance[name] = cls.import_model(ins_name)()
return cls._instance[name]
@classmethod
def import_model(cls, ins_name):
"""Import model class in models package
"""
try:
package_space = getattr(cls, 'package_space')
except AttributeError:
raise ValueError('package_space not exist')
else:
return import_object(ins_name, package_space)
@staticmethod
def default_record():
"""Generate one default record that return empty str when key not exist
"""
return defaultdict(lambda: '')
def collection_method_call(turbo_connect_ins, name):
def outwrapper(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if name in turbo_connect_ins._write_operators:
turbo_connect_ins._model_ins.write_action_call(
name, *args, **kwargs)
if name in turbo_connect_ins._read_operators:
turbo_connect_ins._model_ins.read_action_call(
name, *args, **kwargs)
return func(*args, **kwargs)
return wrapper
return outwrapper
class MongoTurboConnect(object):
_write_operators = frozenset([
'insert',
'save',
'update',
'find_and_modify',
'bulk_write',
'insert_one',
'insert_many',
'replace_one',
'update_one',
'update_many',
'delete_one',
'delete_many',
'find_one_and_delete',
'find_one_and_replace',
'find_one_and_update',
'create_index',
'drop_index',
'create_indexes',
'drop_indexes',
'drop',
'remove',
'ensure_index',
'rename',
])
_read_operators = frozenset([
'find',
'find_one',
'count',
'index_information',
])
def __init__(self, model_ins, db_collect=None):
self._model_ins = model_ins
self._collect = db_collect
def __getattr__(self, name):
collection_method = getattr(self._collect, name)
if callable(collection_method):
return collection_method_call(self, name)(collection_method)
return collection_method
def __getitem__(self, name):
"""Sub-collection
"""
return self._collect[name]
class AbstractModel(MixinModel):
"""
name = None mongodb collection name
field = None collection record map
column = None need to query field
index = [
tuple([('uid', 1)])
] query index
"""
_operators = frozenset([
'$set',
'$unset',
'$rename',
'$currentDate',
'$inc',
'$max',
'$min',
'$mul',
'$setOnInsert',
'$addToSet',
'$pop',
'$pushAll',
'$push',
'$pull'])
PRIMARY_KEY_TYPE = ObjectId
def _init(self, db_name, _mongo_db_mapping):
if _mongo_db_mapping is None:
raise Exception("db mapping is invalid")
# databases
db = _mongo_db_mapping['db']
# databases file
db_file = _mongo_db_mapping['db_file']
# databse name
if db_name not in db or db.get(db_name, None) is None:
raise Exception('%s is invalid databse' % db_name)
# collection name
if not self.name:
raise Exception('%s is invalid collection name' % self.name)
# collection field
if not self.field or not isinstance(self.field, dict):
raise Exception('%s is invalid collection field' % self.field)
# collect as private variable
collect = getattr(db.get(db_name, object), self.name, None)
if collect is None:
raise Exception('%s is invalid collection' % self.name)
# replace pymongo collect with custome connect
_collect = MongoTurboConnect(self, collect)
# gridfs as private variable
_gridfs = db_file.get(db_name, None)
if _gridfs is None:
model_log.info('%s is invalid gridfs' % _gridfs)
return _collect, _gridfs
def _to_primary_key(self, _id):
if self.PRIMARY_KEY_TYPE is ObjectId:
return self.to_objectid(_id)
return _id
def __setitem__(self, k, v):
setattr(self, k, v)
def __getitem__(self, k):
return getattr(self, k)
def __str__(self):
if isinstance(self.field, dict):
return str(self.field)
return None
def sub_collection(self, name):
raise NotImplementedError()
def find_by_id(self, _id, column=None):
raise NotImplementedError()
def remove_by_id(self, _id):
raise NotImplementedError()
def find_new_one(self, *args, **kwargs):
"""return latest one record sort by _id
"""
raise NotImplementedError()
def get_as_dict(self, condition=None, column=None, skip=0, limit=0, sort=None):
raise NotImplementedError()
def _valide_update_document(self, document):
for opk in document.keys():
if not opk.startswith('$') or opk not in self._operators:
raise ValueError('invalid document update operator')
if not document:
raise ValueError('empty document update not allowed')
def _valid_record(self, record):
if not isinstance(record, dict):
raise Exception('%s record is not dict' % record)
rset = set(record.keys())
fset = set(self.field.keys())
rset.discard('_id')
fset.discard('_id')
if not (fset ^ rset) <= fset:
raise Exception('record keys is not equal to fields keys %s' % (
list((fset ^ rset) - fset)))
for k, v in self.field.items():
if k not in record:
if v[0] is datetime and not v[1]:
record[k] = self.datetime()
continue
if v[0] is time and not v[1]:
record[k] = self.timestamp()
continue
record[k] = v[1]
return record
def inc(self, spec_or_id, key, num=1):
raise NotImplementedError()
def put(self, value, **kwargs):
"""gridfs put method
"""
raise NotImplementedError()
def delete(self, _id):
"""gridfs delete method
"""
raise NotImplementedError()
def get(self, _id):
"""gridfs get method
"""
raise NotImplementedError()
def read(self, _id):
"""gridfs read method
"""
raise NotImplementedError()
def write_action_call(self, name, *args, **kwargs):
"""Execute when write action occurs, note: in this method write action must be called asynchronously
"""
pass
def read_action_call(self, name, *args, **kwargs):
"""Execute when read action occurs, note: in this method read action must be called asynchronously
"""
pass
| apache-2.0 |
jeremiahyan/odoo | odoo/addons/base/models/ir_qweb.py | 1 | 19636 | # -*- coding: utf-8 -*-
from __future__ import print_function
import ast
import copy
import logging
from collections import OrderedDict
from time import time
from lxml import html
from lxml import etree
from odoo import api, models, tools
from odoo.tools.safe_eval import assert_valid_codeobj, _BUILTINS, _SAFE_OPCODES
from odoo.tools.misc import get_lang
from odoo.http import request
from odoo.modules.module import get_resource_path
from odoo.addons.base.models.qweb import QWeb, Contextifier, MarkupSafeBytes
from odoo.addons.base.models.assetsbundle import AssetsBundle
from odoo.addons.base.models.ir_asset import can_aggregate, STYLE_EXTENSIONS, SCRIPT_EXTENSIONS
_logger = logging.getLogger(__name__)
class IrQWeb(models.AbstractModel, QWeb):
""" Base QWeb rendering engine
* to customize ``t-field`` rendering, subclass ``ir.qweb.field`` and
create new models called :samp:`ir.qweb.field.{widget}`
Beware that if you need extensions or alterations which could be
incompatible with other subsystems, you should create a local object
inheriting from ``ir.qweb`` and customize that.
"""
_name = 'ir.qweb'
_description = 'Qweb'
@api.model
def _render(self, id_or_xml_id, values=None, **options):
""" render(id_or_xml_id, values, **options)
Render the template specified by the given name.
:param id_or_xml_id: name or etree (see get_template)
:param dict values: template values to be used for rendering
:param options: used to compile the template (the dict available for the rendering is frozen)
* ``load`` (function) overrides the load method
* ``profile`` (float) profile the rendering (use astor lib) (filter
profile line with time ms >= profile)
"""
context = dict(self.env.context, dev_mode='qweb' in tools.config['dev_mode'])
context.update(options)
result = super(IrQWeb, self)._render(id_or_xml_id, values=values, **context)
if b'data-pagebreak=' not in result:
return result
fragments = html.fragments_fromstring(result.decode('utf-8'))
for fragment in fragments:
for row in fragment.iterfind('.//tr[@data-pagebreak]'):
table = next(row.iterancestors('table'))
newtable = html.Element('table', attrib=dict(table.attrib))
thead = table.find('thead')
if thead:
newtable.append(copy.deepcopy(thead))
# TODO: copy caption & tfoot as well?
# TODO: move rows in a tbody if row.getparent() is one?
pos = row.get('data-pagebreak')
assert pos in ('before', 'after')
for sibling in row.getparent().iterchildren('tr'):
if sibling is row:
if pos == 'after':
newtable.append(sibling)
break
newtable.append(sibling)
table.addprevious(newtable)
table.addprevious(html.Element('div', attrib={
'style': 'page-break-after: always'
}))
return MarkupSafeBytes(b''.join(html.tostring(f) for f in fragments))
def default_values(self):
""" attributes add to the values for each computed template
"""
default = super(IrQWeb, self).default_values()
default.update(request=request, cache_assets=round(time()/180), true=True, false=False) # true and false added for backward compatibility to remove after v10
return default
# assume cache will be invalidated by third party on write to ir.ui.view
def _get_template_cache_keys(self):
""" Return the list of context keys to use for caching ``_get_template``. """
return ['lang', 'inherit_branding', 'editable', 'translatable', 'edit_translations', 'website_id']
# apply ormcache_context decorator unless in dev mode...
@tools.conditional(
'xml' not in tools.config['dev_mode'],
tools.ormcache('id_or_xml_id', 'tuple(options.get(k) for k in self._get_template_cache_keys())'),
)
def compile(self, id_or_xml_id, options):
try:
id_or_xml_id = int(id_or_xml_id)
except:
pass
return super(IrQWeb, self).compile(id_or_xml_id, options=options)
def _load(self, name, options):
lang = options.get('lang', get_lang(self.env).code)
env = self.env
if lang != env.context.get('lang'):
env = env(context=dict(env.context, lang=lang))
view_id = self.env['ir.ui.view'].get_view_id(name)
template = env['ir.ui.view'].sudo()._read_template(view_id)
# QWeb's `_read_template` will check if one of the first children of
# what we send to it has a "t-name" attribute having `name` as value
# to consider it has found it. As it'll never be the case when working
# with view ids or children view or children primary views, force it here.
def is_child_view(view_name):
view_id = self.env['ir.ui.view'].get_view_id(view_name)
view = self.env['ir.ui.view'].sudo().browse(view_id)
return view.inherit_id is not None
if isinstance(name, int) or is_child_view(name):
view = etree.fromstring(template)
for node in view:
if node.get('t-name'):
node.set('t-name', str(name))
return view
else:
return template
# order
def _directives_eval_order(self):
directives = super(IrQWeb, self)._directives_eval_order()
directives.insert(directives.index('call'), 'lang')
directives.insert(directives.index('field'), 'call-assets')
return directives
# compile directives
def _compile_directive_lang(self, el, options):
lang = el.attrib.pop('t-lang', get_lang(self.env).code)
if el.get('t-call-options'):
el.set('t-call-options', el.get('t-call-options')[0:-1] + u', "lang": %s}' % lang)
else:
el.set('t-call-options', u'{"lang": %s}' % lang)
return self._compile_node(el, options)
def _compile_directive_call_assets(self, el, options):
""" This special 't-call' tag can be used in order to aggregate/minify javascript and css assets"""
if len(el):
raise SyntaxError("t-call-assets cannot contain children nodes")
# nodes = self._get_asset_nodes(bundle, options, css=css, js=js, debug=values.get('debug'), async=async, values=values)
#
# for index, (tagName, t_attrs, content) in enumerate(nodes):
# if index:
# append('\n ')
# append('<')
# append(tagName)
#
# self._post_processing_att(tagName, t_attrs, options)
# for name, value in t_attrs.items():
# if value or isinstance(value, string_types)):
# append(u' ')
# append(name)
# append(u'="')
# append(escape(pycompat.to_text((value)))
# append(u'"')
#
# if not content and tagName in self._void_elements:
# append('/>')
# else:
# append('>')
# if content:
# append(content)
# append('</')
# append(tagName)
# append('>')
#
space = el.getprevious() is not None and el.getprevious().tail or el.getparent().text
sep = u'\n' + space.rsplit('\n').pop()
return [
ast.Assign(
targets=[ast.Name(id='nodes', ctx=ast.Store())],
value=ast.Call(
func=ast.Attribute(
value=ast.Name(id='self', ctx=ast.Load()),
attr='_get_asset_nodes',
ctx=ast.Load()
),
args=[
ast.Str(el.get('t-call-assets')),
ast.Name(id='options', ctx=ast.Load()),
],
keywords=[
ast.keyword('css', self._get_attr_bool(el.get('t-css', True))),
ast.keyword('js', self._get_attr_bool(el.get('t-js', True))),
ast.keyword('debug', ast.Call(
func=ast.Attribute(
value=ast.Name(id='values', ctx=ast.Load()),
attr='get',
ctx=ast.Load()
),
args=[ast.Str('debug')],
keywords=[], starargs=None, kwargs=None
)),
ast.keyword('async_load', self._get_attr_bool(el.get('async_load', False))),
ast.keyword('defer_load', self._get_attr_bool(el.get('defer_load', False))),
ast.keyword('lazy_load', self._get_attr_bool(el.get('lazy_load', False))),
ast.keyword('media', ast.Constant(el.get('media'))),
],
starargs=None, kwargs=None
)
),
ast.For(
target=ast.Tuple(elts=[
ast.Name(id='index', ctx=ast.Store()),
ast.Tuple(elts=[
ast.Name(id='tagName', ctx=ast.Store()),
ast.Name(id='t_attrs', ctx=ast.Store()),
ast.Name(id='content', ctx=ast.Store())
], ctx=ast.Store())
], ctx=ast.Store()),
iter=ast.Call(
func=ast.Name(id='enumerate', ctx=ast.Load()),
args=[ast.Name(id='nodes', ctx=ast.Load())],
keywords=[],
starargs=None, kwargs=None
),
body=[
ast.If(
test=ast.Name(id='index', ctx=ast.Load()),
body=[self._append(ast.Str(sep))],
orelse=[]
),
self._append(ast.Str(u'<')),
self._append(ast.Name(id='tagName', ctx=ast.Load())),
] + self._append_attributes() + [
ast.If(
test=ast.BoolOp(
op=ast.And(),
values=[
ast.UnaryOp(ast.Not(), ast.Name(id='content', ctx=ast.Load()), lineno=0, col_offset=0),
ast.Compare(
left=ast.Name(id='tagName', ctx=ast.Load()),
ops=[ast.In()],
comparators=[ast.Attribute(
value=ast.Name(id='self', ctx=ast.Load()),
attr='_void_elements',
ctx=ast.Load()
)]
),
]
),
body=[self._append(ast.Str(u'/>'))],
orelse=[
self._append(ast.Str(u'>')),
ast.If(
test=ast.Name(id='content', ctx=ast.Load()),
body=[self._append(ast.Name(id='content', ctx=ast.Load()))],
orelse=[]
),
self._append(ast.Str(u'</')),
self._append(ast.Name(id='tagName', ctx=ast.Load())),
self._append(ast.Str(u'>')),
]
)
],
orelse=[]
)
]
# method called by computing code
def get_asset_bundle(self, bundle_name, files, env=None, css=True, js=True):
return AssetsBundle(bundle_name, files, env=env, css=css, js=js)
def _get_asset_nodes(self, bundle, options, css=True, js=True, debug=False, async_load=False, defer_load=False, lazy_load=False, media=None):
"""Generates asset nodes.
If debug=assets, the assets will be regenerated when a file which composes them has been modified.
Else, the assets will be generated only once and then stored in cache.
"""
if debug and 'assets' in debug:
return self._generate_asset_nodes(bundle, options, css, js, debug, async_load, defer_load, lazy_load, media)
else:
return self._generate_asset_nodes_cache(bundle, options, css, js, debug, async_load, defer_load, lazy_load, media)
@tools.conditional(
# in non-xml-debug mode we want assets to be cached forever, and the admin can force a cache clear
# by restarting the server after updating the source code (or using the "Clear server cache" in debug tools)
'xml' not in tools.config['dev_mode'],
tools.ormcache_context('bundle', 'options.get("lang", "en_US")', 'css', 'js', 'debug', 'async_load', 'defer_load', 'lazy_load', keys=("website_id",)),
)
def _generate_asset_nodes_cache(self, bundle, options, css=True, js=True, debug=False, async_load=False, defer_load=False, lazy_load=False, media=None):
return self._generate_asset_nodes(bundle, options, css, js, debug, async_load, defer_load, lazy_load, media)
def _generate_asset_nodes(self, bundle, options, css=True, js=True, debug=False, async_load=False, defer_load=False, lazy_load=False, media=None):
nodeAttrs = None
if css and media:
nodeAttrs = {
'media': media,
}
files, remains = self._get_asset_content(bundle, options, nodeAttrs)
asset = self.get_asset_bundle(bundle, files, env=self.env, css=css, js=js)
remains = [node for node in remains if (css and node[0] == 'link') or (js and node[0] == 'script')]
return remains + asset.to_node(css=css, js=js, debug=debug, async_load=async_load, defer_load=defer_load, lazy_load=lazy_load)
def _get_asset_link_urls(self, bundle, options):
asset_nodes = self._get_asset_nodes(bundle, options, js=False)
return [node[1]['href'] for node in asset_nodes if node[0] == 'link']
@tools.ormcache_context('bundle', 'options.get("lang", "en_US")', keys=("website_id",))
def _get_asset_content(self, bundle, options, nodeAttrs=None):
options = dict(options,
inherit_branding=False, inherit_branding_auto=False,
edit_translations=False, translatable=False,
rendering_bundle=True)
options['website_id'] = self.env.context.get('website_id')
asset_paths = self.env['ir.asset']._get_asset_paths(bundle=bundle, css=True, js=True)
files = []
remains = []
for path, *_ in asset_paths:
ext = path.split('.')[-1]
is_js = ext in SCRIPT_EXTENSIONS
is_css = ext in STYLE_EXTENSIONS
if not is_js and not is_css:
continue
mimetype = 'text/javascript' if is_js else 'text/%s' % ext
if can_aggregate(path):
segments = [segment for segment in path.split('/') if segment]
files.append({
'atype': mimetype,
'url': path,
'filename': get_resource_path(*segments) if segments else None,
'content': '',
'media': nodeAttrs and nodeAttrs.get('media'),
})
else:
if is_js:
tag = 'script'
attributes = {
"type": mimetype,
"src": path,
}
else:
tag = 'link'
attributes = {
"type": mimetype,
"rel": "stylesheet",
"href": path,
'media': nodeAttrs and nodeAttrs.get('media'),
}
remains.append((tag, attributes, ''))
return (files, remains)
def _get_field(self, record, field_name, expression, tagName, field_options, options, values):
field = record._fields[field_name]
# adds template compile options for rendering fields
field_options['template_options'] = options
# adds generic field options
field_options['tagName'] = tagName
field_options['expression'] = expression
field_options['type'] = field_options.get('widget', field.type)
inherit_branding = options.get('inherit_branding', options.get('inherit_branding_auto') and record.check_access_rights('write', False))
field_options['inherit_branding'] = inherit_branding
translate = options.get('edit_translations') and options.get('translatable') and field.translate
field_options['translate'] = translate
# field converter
model = 'ir.qweb.field.' + field_options['type']
converter = self.env[model] if model in self.env else self.env['ir.qweb.field']
# get content
content = converter.record_to_html(record, field_name, field_options)
attributes = converter.attributes(record, field_name, field_options, values)
return (attributes, content, inherit_branding or translate)
def _get_widget(self, value, expression, tagName, field_options, options, values):
# adds template compile options for rendering fields
field_options['template_options'] = options
field_options['type'] = field_options['widget']
field_options['tagName'] = tagName
field_options['expression'] = expression
# field converter
model = 'ir.qweb.field.' + field_options['type']
converter = self.env[model] if model in self.env else self.env['ir.qweb.field']
# get content
content = converter.value_to_html(value, field_options)
attributes = OrderedDict()
attributes['data-oe-type'] = field_options['type']
attributes['data-oe-expression'] = field_options['expression']
return (attributes, content, None)
# compile expression add safe_eval
def _compile_expr(self, expr):
""" Compiles a purported Python expression to ast, verifies that it's safe
(according to safe_eval's semantics) and alter its variable references to
access values data instead
"""
# string must be stripped otherwise whitespace before the start for
# formatting purpose are going to break parse/compile
st = ast.parse(expr.strip(), mode='eval')
assert_valid_codeobj(
_SAFE_OPCODES,
compile(st, '<>', 'eval'), # could be expr, but eval *should* be fine
expr
)
# ast.Expression().body -> expr
return Contextifier(_BUILTINS).visit(st).body
def _get_attr_bool(self, attr, default=False):
if attr:
if attr is True:
return ast.Constant(True)
attr = attr.lower()
if attr in ('false', '0'):
return ast.Constant(False)
elif attr in ('true', '1'):
return ast.Constant(True)
return ast.Constant(attr if attr is False else bool(default))
| gpl-3.0 |
Thraxis/pymedusa | lib/sqlalchemy/orm/persistence.py | 34 | 51028 | # orm/persistence.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to emit INSERT, UPDATE
and DELETE statements on behalf of a :class:`.Mapper` and its descending
mappers.
The functions here are called only by the unit of work functions
in unitofwork.py.
"""
import operator
from itertools import groupby, chain
from .. import sql, util, exc as sa_exc
from . import attributes, sync, exc as orm_exc, evaluator
from .base import state_str, _attr_as_key, _entity_descriptor
from ..sql import expression
from ..sql.base import _from_objects
from . import loading
def _bulk_insert(
mapper, mappings, session_transaction, isstates, return_defaults):
base_mapper = mapper.base_mapper
cached_connections = _cached_connection_dict(base_mapper)
if session_transaction.session.connection_callable:
raise NotImplementedError(
"connection_callable / per-instance sharding "
"not supported in bulk_insert()")
if isstates:
if return_defaults:
states = [(state, state.dict) for state in mappings]
mappings = [dict_ for (state, dict_) in states]
else:
mappings = [state.dict for state in mappings]
else:
mappings = list(mappings)
connection = session_transaction.connection(base_mapper)
for table, super_mapper in base_mapper._sorted_tables.items():
if not mapper.isa(super_mapper):
continue
records = (
(None, state_dict, params, mapper,
connection, value_params, has_all_pks, has_all_defaults)
for
state, state_dict, params, mp,
conn, value_params, has_all_pks,
has_all_defaults in _collect_insert_commands(table, (
(None, mapping, mapper, connection)
for mapping in mappings),
bulk=True, return_defaults=return_defaults
)
)
_emit_insert_statements(base_mapper, None,
cached_connections,
super_mapper, table, records,
bookkeeping=return_defaults)
if return_defaults and isstates:
identity_cls = mapper._identity_class
identity_props = [p.key for p in mapper._identity_key_props]
for state, dict_ in states:
state.key = (
identity_cls,
tuple([dict_[key] for key in identity_props])
)
def _bulk_update(mapper, mappings, session_transaction,
isstates, update_changed_only):
base_mapper = mapper.base_mapper
cached_connections = _cached_connection_dict(base_mapper)
def _changed_dict(mapper, state):
return dict(
(k, v)
for k, v in state.dict.items() if k in state.committed_state or k
in mapper._primary_key_propkeys
)
if isstates:
if update_changed_only:
mappings = [_changed_dict(mapper, state) for state in mappings]
else:
mappings = [state.dict for state in mappings]
else:
mappings = list(mappings)
if session_transaction.session.connection_callable:
raise NotImplementedError(
"connection_callable / per-instance sharding "
"not supported in bulk_update()")
connection = session_transaction.connection(base_mapper)
for table, super_mapper in base_mapper._sorted_tables.items():
if not mapper.isa(super_mapper):
continue
records = _collect_update_commands(None, table, (
(None, mapping, mapper, connection,
(mapping[mapper._version_id_prop.key]
if mapper._version_id_prop else None))
for mapping in mappings
), bulk=True)
_emit_update_statements(base_mapper, None,
cached_connections,
super_mapper, table, records,
bookkeeping=False)
def save_obj(
base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
This is called within the context of a UOWTransaction during a
flush operation, given a list of states to be flushed. The
base mapper in an inheritance hierarchy handles the inserts/
updates for all descendant mappers.
"""
# if batch=false, call _save_obj separately for each object
if not single and not base_mapper.batch:
for state in _sort_states(states):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
states_to_update = []
states_to_insert = []
cached_connections = _cached_connection_dict(base_mapper)
for (state, dict_, mapper, connection,
has_identity,
row_switch, update_version_id) in _organize_states_for_save(
base_mapper, states, uowtransaction
):
if has_identity or row_switch:
states_to_update.append(
(state, dict_, mapper, connection, update_version_id)
)
else:
states_to_insert.append(
(state, dict_, mapper, connection)
)
for table, mapper in base_mapper._sorted_tables.items():
if table not in mapper._pks_by_table:
continue
insert = _collect_insert_commands(table, states_to_insert)
update = _collect_update_commands(
uowtransaction, table, states_to_update)
_emit_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
_emit_insert_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, insert)
_finalize_insert_update_commands(
base_mapper, uowtransaction,
chain(
(
(state, state_dict, mapper, connection, False)
for state, state_dict, mapper, connection in states_to_insert
),
(
(state, state_dict, mapper, connection, True)
for state, state_dict, mapper, connection,
update_version_id in states_to_update
)
)
)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""Issue UPDATE statements on behalf of a relationship() which
specifies post_update.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_update = list(_organize_states_for_post_update(
base_mapper,
states, uowtransaction))
for table, mapper in base_mapper._sorted_tables.items():
if table not in mapper._pks_by_table:
continue
update = (
(state, state_dict, sub_mapper, connection)
for
state, state_dict, sub_mapper, connection in states_to_update
if table in sub_mapper._pks_by_table
)
update = _collect_post_update_commands(base_mapper, uowtransaction,
table, update,
post_update_cols)
_emit_post_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
def delete_obj(base_mapper, states, uowtransaction):
"""Issue ``DELETE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = list(_organize_states_for_delete(
base_mapper,
states,
uowtransaction))
table_to_mapper = base_mapper._sorted_tables
for table in reversed(list(table_to_mapper.keys())):
mapper = table_to_mapper[table]
if table not in mapper._pks_by_table:
continue
delete = _collect_delete_commands(base_mapper, uowtransaction,
table, states_to_delete)
_emit_delete_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, delete)
for state, state_dict, mapper, connection, \
update_version_id in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
"""
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
has_identity = bool(state.key)
instance_key = state.key or mapper._identity_key_from_state(state)
row_switch = update_version_id = None
# call before_XXX extensions
if not has_identity:
mapper.dispatch.before_insert(mapper, connection, state)
else:
mapper.dispatch.before_update(mapper, connection, state)
if mapper._validate_polymorphic_identity:
mapper._validate_polymorphic_identity(mapper, state, dict_)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if not has_identity and \
instance_key in uowtransaction.session.identity_map:
instance = \
uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
"New instance %s with identity key %s conflicts "
"with persistent instance %s" %
(state_str(state), instance_key,
state_str(existing)))
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
"transaction", instance_key,
state_str(state), state_str(existing))
# remove the "delete" flag from the existing element
uowtransaction.remove_state_actions(existing)
row_switch = existing
if (has_identity or row_switch) and mapper.version_id_col is not None:
update_version_id = mapper._get_committed_state_attr_by_column(
row_switch if row_switch else state,
row_switch.dict if row_switch else dict_,
mapper.version_id_col)
yield (state, dict_, mapper, connection,
has_identity, row_switch, update_version_id)
def _organize_states_for_post_update(base_mapper, states,
uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
the execution per state.
"""
return _connections_for_states(base_mapper, uowtransaction, states)
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
"""
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
mapper.dispatch.before_delete(mapper, connection, state)
if mapper.version_id_col is not None:
update_version_id = \
mapper._get_committed_state_attr_by_column(
state, dict_,
mapper.version_id_col)
else:
update_version_id = None
yield (
state, dict_, mapper, connection, update_version_id)
def _collect_insert_commands(
table, states_to_insert,
bulk=False, return_defaults=False):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
for state, state_dict, mapper, connection in states_to_insert:
if table not in mapper._pks_by_table:
continue
params = {}
value_params = {}
propkey_to_col = mapper._propkey_to_col[table]
for propkey in set(propkey_to_col).intersection(state_dict):
value = state_dict[propkey]
col = propkey_to_col[propkey]
if value is None:
continue
elif not bulk and isinstance(value, sql.ClauseElement):
value_params[col.key] = value
else:
params[col.key] = value
if not bulk:
for colkey in mapper._insert_cols_as_none[table].\
difference(params).difference(value_params):
params[colkey] = None
if not bulk or return_defaults:
has_all_pks = mapper._pk_keys_by_table[table].issubset(params)
if mapper.base_mapper.eager_defaults:
has_all_defaults = mapper._server_default_cols[table].\
issubset(params)
else:
has_all_defaults = True
else:
has_all_defaults = has_all_pks = True
if mapper.version_id_generator is not False \
and mapper.version_id_col is not None and \
mapper.version_id_col in mapper._cols_by_table[table]:
params[mapper.version_id_col.key] = \
mapper.version_id_generator(None)
yield (
state, state_dict, params, mapper,
connection, value_params, has_all_pks,
has_all_defaults)
def _collect_update_commands(
uowtransaction, table, states_to_update,
bulk=False):
"""Identify sets of values to use in UPDATE statements for a
list of states.
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
statement. Includes some tricky scenarios where the primary
key of an object might have been changed.
"""
for state, state_dict, mapper, connection, \
update_version_id in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
value_params = {}
propkey_to_col = mapper._propkey_to_col[table]
if bulk:
params = dict(
(propkey_to_col[propkey].key, state_dict[propkey])
for propkey in
set(propkey_to_col).intersection(state_dict).difference(
mapper._pk_keys_by_table[table])
)
has_all_defaults = True
else:
params = {}
for propkey in set(propkey_to_col).intersection(
state.committed_state):
value = state_dict[propkey]
col = propkey_to_col[propkey]
if isinstance(value, sql.ClauseElement):
value_params[col] = value
# guard against values that generate non-__nonzero__
# objects for __eq__()
elif state.manager[propkey].impl.is_equal(
value, state.committed_state[propkey]) is not True:
params[col.key] = value
if mapper.base_mapper.eager_defaults:
has_all_defaults = mapper._server_onupdate_default_cols[table].\
issubset(params)
else:
has_all_defaults = True
if update_version_id is not None and \
mapper.version_id_col in mapper._cols_by_table[table]:
if not bulk and not (params or value_params):
# HACK: check for history in other tables, in case the
# history is only in a different table than the one
# where the version_id_col is. This logic was lost
# from 0.9 -> 1.0.0 and restored in 1.0.6.
for prop in mapper._columntoproperty.values():
history = (
state.manager[prop.key].impl.get_history(
state, state_dict,
attributes.PASSIVE_NO_INITIALIZE))
if history.added:
break
else:
# no net change, break
continue
col = mapper.version_id_col
params[col._label] = update_version_id
if (bulk or col.key not in params) and \
mapper.version_id_generator is not False:
val = mapper.version_id_generator(update_version_id)
params[col.key] = val
elif not (params or value_params):
continue
if bulk:
pk_params = dict(
(propkey_to_col[propkey]._label, state_dict.get(propkey))
for propkey in
set(propkey_to_col).
intersection(mapper._pk_keys_by_table[table])
)
else:
pk_params = {}
for col in pks:
propkey = mapper._columntoproperty[col].key
history = state.manager[propkey].impl.get_history(
state, state_dict, attributes.PASSIVE_OFF)
if history.added:
if not history.deleted or \
("pk_cascaded", state, col) in \
uowtransaction.attributes:
pk_params[col._label] = history.added[0]
params.pop(col.key, None)
else:
# else, use the old value to locate the row
pk_params[col._label] = history.deleted[0]
params[col.key] = history.added[0]
else:
pk_params[col._label] = history.unchanged[0]
if pk_params[col._label] is None:
raise orm_exc.FlushError(
"Can't update table %s using NULL for primary "
"key value on column %s" % (table, col))
if params or value_params:
params.update(pk_params)
yield (
state, state_dict, params, mapper,
connection, value_params, has_all_defaults)
def _collect_post_update_commands(base_mapper, uowtransaction, table,
states_to_update, post_update_cols):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
"""
for state, state_dict, mapper, connection in states_to_update:
# assert table in mapper._pks_by_table
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
for col in mapper._cols_by_table[table]:
if col in pks:
params[col._label] = \
mapper._get_state_attr_by_column(
state,
state_dict, col, passive=attributes.PASSIVE_OFF)
elif col in post_update_cols:
prop = mapper._columntoproperty[col]
history = state.manager[prop.key].impl.get_history(
state, state_dict,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
yield params, connection
def _collect_delete_commands(base_mapper, uowtransaction, table,
states_to_delete):
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
for state, state_dict, mapper, connection, \
update_version_id in states_to_delete:
if table not in mapper._pks_by_table:
continue
params = {}
for col in mapper._pks_by_table[table]:
params[col.key] = \
value = \
mapper._get_committed_state_attr_by_column(
state, state_dict, col)
if value is None:
raise orm_exc.FlushError(
"Can't delete from table %s "
"using NULL for primary "
"key value on column %s" % (table, col))
if update_version_id is not None and \
mapper.version_id_col in mapper._cols_by_table[table]:
params[mapper.version_id_col.key] = update_version_id
yield params, connection
def _emit_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update,
bookkeeping=True):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = mapper.version_id_col is not None and \
mapper.version_id_col in mapper._cols_by_table[table]
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
if needs_version_id:
clause.clauses.append(
mapper.version_id_col == sql.bindparam(
mapper.version_id_col._label,
type_=mapper.version_id_col.type))
stmt = table.update(clause)
return stmt
cached_stmt = base_mapper._memo(('update', table), update_stmt)
for (connection, paramkeys, hasvalue, has_all_defaults), \
records in groupby(
update,
lambda rec: (
rec[4], # connection
set(rec[2]), # set of parameter keys
bool(rec[5]), # whether or not we have "value" parameters
rec[6] # has_all_defaults
)
):
rows = 0
records = list(records)
statement = cached_stmt
# TODO: would be super-nice to not have to determine this boolean
# inside the loop here, in the 99.9999% of the time there's only
# one connection in use
assert_singlerow = connection.dialect.supports_sane_rowcount
assert_multirow = assert_singlerow and \
connection.dialect.supports_sane_multi_rowcount
allow_multirow = has_all_defaults and not needs_version_id
if bookkeeping and not has_all_defaults and \
mapper.base_mapper.eager_defaults:
statement = statement.return_defaults()
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
if hasvalue:
for state, state_dict, params, mapper, \
connection, value_params, has_all_defaults in records:
c = connection.execute(
statement.values(value_params),
params)
if bookkeeping:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
rows += c.rowcount
check_rowcount = True
else:
if not allow_multirow:
check_rowcount = assert_singlerow
for state, state_dict, params, mapper, \
connection, value_params, has_all_defaults in records:
c = cached_connections[connection].\
execute(statement, params)
# TODO: why with bookkeeping=False?
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
rows += c.rowcount
else:
multiparams = [rec[2] for rec in records]
check_rowcount = assert_multirow or (
assert_singlerow and
len(multiparams) == 1
)
c = cached_connections[connection].\
execute(statement, multiparams)
rows += c.rowcount
# TODO: why with bookkeeping=False?
for state, state_dict, params, mapper, \
connection, value_params, has_all_defaults in records:
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
if check_rowcount:
if rows != len(records):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched." %
(table.description, len(records), rows))
elif needs_version_id:
util.warn("Dialect %s does not support updated rowcount "
"- versioning cannot be verified." %
c.dialect.dialect_description)
def _emit_insert_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, insert,
bookkeeping=True):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
cached_stmt = base_mapper._memo(('insert', table), table.insert)
for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \
records in groupby(
insert,
lambda rec: (
rec[4], # connection
set(rec[2]), # parameter keys
bool(rec[5]), # whether we have "value" parameters
rec[6],
rec[7])):
statement = cached_stmt
if not bookkeeping or \
(
has_all_defaults
or not base_mapper.eager_defaults
or not connection.dialect.implicit_returning
) and has_all_pks and not hasvalue:
records = list(records)
multiparams = [rec[2] for rec in records]
c = cached_connections[connection].\
execute(statement, multiparams)
if bookkeeping:
for (state, state_dict, params, mapper_rec,
conn, value_params, has_all_pks, has_all_defaults), \
last_inserted_params in \
zip(records, c.context.compiled_parameters):
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
last_inserted_params,
value_params)
else:
if not has_all_defaults and base_mapper.eager_defaults:
statement = statement.return_defaults()
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
for state, state_dict, params, mapper_rec, \
connection, value_params, \
has_all_pks, has_all_defaults in records:
if value_params:
result = connection.execute(
statement.values(value_params),
params)
else:
result = cached_connections[connection].\
execute(statement, params)
primary_key = result.context.inserted_primary_key
if primary_key is not None:
# set primary key attributes
for pk, col in zip(primary_key,
mapper._pks_by_table[table]):
prop = mapper_rec._columntoproperty[col]
if state_dict.get(prop.key) is None:
state_dict[prop.key] = pk
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
result,
result.context.compiled_parameters[0],
value_params)
def _emit_post_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
return table.update(clause)
statement = base_mapper._memo(('post_update', table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, grouper in groupby(
update, lambda rec: (
rec[1], # connection
set(rec[0]) # parameter keys
)
):
connection = key[0]
multiparams = [params for params, conn in grouper]
cached_connections[connection].\
execute(statement, multiparams)
def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
mapper, table, delete):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
need_version_id = mapper.version_id_col is not None and \
mapper.version_id_col in mapper._cols_by_table[table]
def delete_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col.key, type_=col.type))
if need_version_id:
clause.clauses.append(
mapper.version_id_col ==
sql.bindparam(
mapper.version_id_col.key,
type_=mapper.version_id_col.type
)
)
return table.delete(clause)
statement = base_mapper._memo(('delete', table), delete_stmt)
for connection, recs in groupby(
delete,
lambda rec: rec[1] # connection
):
del_objects = [params for params, connection in recs]
connection = cached_connections[connection]
expected = len(del_objects)
rows_matched = -1
only_warn = False
if connection.dialect.supports_sane_multi_rowcount:
c = connection.execute(statement, del_objects)
if not need_version_id:
only_warn = True
rows_matched = c.rowcount
elif need_version_id:
if connection.dialect.supports_sane_rowcount:
rows_matched = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection.execute(statement, params)
rows_matched += c.rowcount
else:
util.warn(
"Dialect %s does not support deleted rowcount "
"- versioning cannot be verified." %
connection.dialect.dialect_description,
stacklevel=12)
connection.execute(statement, del_objects)
else:
connection.execute(statement, del_objects)
if base_mapper.confirm_deleted_rows and \
rows_matched > -1 and expected != rows_matched:
if only_warn:
util.warn(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning." %
(table.description, expected, rows_matched)
)
else:
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched. Please set "
"confirm_deleted_rows=False within the mapper "
"configuration to prevent this warning." %
(table.description, expected, rows_matched)
)
def _finalize_insert_update_commands(base_mapper, uowtransaction, states):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
for state, state_dict, mapper, connection, has_identity in states:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
[p.key for p in mapper._readonly_props
if p.expire_on_flush or p.key not in state.dict]
)
if readonly:
state._expire_attributes(state.dict, readonly)
# if eager_defaults option is enabled, load
# all expired cols. Else if we have a version_id_col, make sure
# it isn't expired.
toload_now = []
if base_mapper.eager_defaults:
toload_now.extend(state._unloaded_non_object)
elif mapper.version_id_col is not None and \
mapper.version_id_generator is False:
if mapper._version_id_prop.key in state.unloaded:
toload_now.extend([mapper._version_id_prop.key])
if toload_now:
state.key = base_mapper._identity_key_from_state(state)
loading.load_on_ident(
uowtransaction.session.query(base_mapper),
state.key, refresh_state=state,
only_load_props=toload_now)
# call after_XXX extensions
if not has_identity:
mapper.dispatch.after_insert(mapper, connection, state)
else:
mapper.dispatch.after_update(mapper, connection, state)
def _postfetch(mapper, uowtransaction, table,
state, dict_, result, params, value_params, bulk=False):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
# TODO: bulk is never non-False, need to clean this up
prefetch_cols = result.context.compiled.prefetch
postfetch_cols = result.context.compiled.postfetch
returning_cols = result.context.compiled.returning
if mapper.version_id_col is not None and \
mapper.version_id_col in mapper._cols_by_table[table]:
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
refresh_flush = bool(mapper.class_manager.dispatch.refresh_flush)
if refresh_flush:
load_evt_attrs = []
if returning_cols:
row = result.context.returned_defaults
if row is not None:
for col in returning_cols:
if col.primary_key:
continue
dict_[mapper._columntoproperty[col].key] = row[col]
if refresh_flush:
load_evt_attrs.append(mapper._columntoproperty[col].key)
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
dict_[mapper._columntoproperty[c].key] = params[c.key]
if refresh_flush:
load_evt_attrs.append(mapper._columntoproperty[c].key)
if refresh_flush and load_evt_attrs:
mapper.class_manager.dispatch.refresh_flush(
state, uowtransaction, load_evt_attrs)
if postfetch_cols and state:
state._expire_attributes(state.dict,
[mapper._columntoproperty[c].key
for c in postfetch_cols if c in
mapper._columntoproperty]
)
# synchronize newly inserted ids from one table to the next
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
if state is None:
sync.bulk_populate_inherit_keys(dict_, m, equated_pairs)
else:
sync.populate(state, m, state, m,
equated_pairs,
uowtransaction,
mapper.passive_updates)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
"""
# if session has a connection callable,
# organize individual states with the connection
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = \
uowtransaction.session.connection_callable
else:
connection = uowtransaction.transaction.connection(base_mapper)
connection_callable = None
for state in _sort_states(states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
mapper = state.manager.mapper
yield state, state.dict, mapper, connection
def _cached_connection_dict(base_mapper):
# dictionary of connection->connection_with_cache_options.
return util.PopulateDict(
lambda conn: conn.execution_options(
compiled_cache=base_mapper._compiled_cache
))
def _sort_states(states):
pending = set(states)
persistent = set(s for s in pending if s.key is not None)
pending.difference_update(persistent)
return sorted(pending, key=operator.attrgetter("insert_order")) + \
sorted(persistent, key=lambda q: q.key[1])
class BulkUD(object):
"""Handle bulk update and deletes via a :class:`.Query`."""
def __init__(self, query):
self.query = query.enable_eagerloads(False)
self.mapper = self.query._bind_mapper()
self._validate_query_state()
def _validate_query_state(self):
for attr, methname, notset, op in (
('_limit', 'limit()', None, operator.is_),
('_offset', 'offset()', None, operator.is_),
('_order_by', 'order_by()', False, operator.is_),
('_group_by', 'group_by()', False, operator.is_),
('_distinct', 'distinct()', False, operator.is_),
(
'_from_obj',
'join(), outerjoin(), select_from(), or from_self()',
(), operator.eq)
):
if not op(getattr(self.query, attr), notset):
raise sa_exc.InvalidRequestError(
"Can't call Query.update() or Query.delete() "
"when %s has been called" %
(methname, )
)
@property
def session(self):
return self.query.session
@classmethod
def _factory(cls, lookup, synchronize_session, *arg):
try:
klass = lookup[synchronize_session]
except KeyError:
raise sa_exc.ArgumentError(
"Valid strategies for session synchronization "
"are %s" % (", ".join(sorted(repr(x)
for x in lookup))))
else:
return klass(*arg)
def exec_(self):
self._do_pre()
self._do_pre_synchronize()
self._do_exec()
self._do_post_synchronize()
self._do_post()
@util.dependencies("sqlalchemy.orm.query")
def _do_pre(self, querylib):
query = self.query
self.context = querylib.QueryContext(query)
if isinstance(query._entities[0], querylib._ColumnEntity):
# check for special case of query(table)
tables = set()
for ent in query._entities:
if not isinstance(ent, querylib._ColumnEntity):
tables.clear()
break
else:
tables.update(_from_objects(ent.column))
if len(tables) != 1:
raise sa_exc.InvalidRequestError(
"This operation requires only one Table or "
"entity be specified as the target."
)
else:
self.primary_table = tables.pop()
else:
self.primary_table = query._only_entity_zero(
"This operation requires only one Table or "
"entity be specified as the target."
).mapper.local_table
session = query.session
if query._autoflush:
session._autoflush()
def _do_pre_synchronize(self):
pass
def _do_post_synchronize(self):
pass
class BulkEvaluate(BulkUD):
"""BulkUD which does the 'evaluate' method of session state resolution."""
def _additional_evaluators(self, evaluator_compiler):
pass
def _do_pre_synchronize(self):
query = self.query
target_cls = query._mapper_zero().class_
try:
evaluator_compiler = evaluator.EvaluatorCompiler(target_cls)
if query.whereclause is not None:
eval_condition = evaluator_compiler.process(
query.whereclause)
else:
def eval_condition(obj):
return True
self._additional_evaluators(evaluator_compiler)
except evaluator.UnevaluatableError:
raise sa_exc.InvalidRequestError(
"Could not evaluate current criteria in Python. "
"Specify 'fetch' or False for the "
"synchronize_session parameter.")
# TODO: detect when the where clause is a trivial primary key match
self.matched_objects = [
obj for (cls, pk), obj in
query.session.identity_map.items()
if issubclass(cls, target_cls) and
eval_condition(obj)]
class BulkFetch(BulkUD):
"""BulkUD which does the 'fetch' method of session state resolution."""
def _do_pre_synchronize(self):
query = self.query
session = query.session
context = query._compile_context()
select_stmt = context.statement.with_only_columns(
self.primary_table.primary_key)
self.matched_rows = session.execute(
select_stmt,
mapper=self.mapper,
params=query._params).fetchall()
class BulkUpdate(BulkUD):
"""BulkUD which handles UPDATEs."""
def __init__(self, query, values, update_kwargs):
super(BulkUpdate, self).__init__(query)
self.values = values
self.update_kwargs = update_kwargs
@classmethod
def factory(cls, query, synchronize_session, values, update_kwargs):
return BulkUD._factory({
"evaluate": BulkUpdateEvaluate,
"fetch": BulkUpdateFetch,
False: BulkUpdate
}, synchronize_session, query, values, update_kwargs)
def _resolve_string_to_expr(self, key):
if self.mapper and isinstance(key, util.string_types):
attr = _entity_descriptor(self.mapper, key)
return attr.__clause_element__()
else:
return key
def _resolve_key_to_attrname(self, key):
if self.mapper and isinstance(key, util.string_types):
attr = _entity_descriptor(self.mapper, key)
return attr.property.key
elif isinstance(key, attributes.InstrumentedAttribute):
return key.key
elif hasattr(key, '__clause_element__'):
key = key.__clause_element__()
if self.mapper and isinstance(key, expression.ColumnElement):
try:
attr = self.mapper._columntoproperty[key]
except orm_exc.UnmappedColumnError:
return None
else:
return attr.key
else:
raise sa_exc.InvalidRequestError(
"Invalid expression type: %r" % key)
def _do_exec(self):
values = [
(self._resolve_string_to_expr(k), v)
for k, v in (
self.values.items() if hasattr(self.values, 'items')
else self.values)
]
if not self.update_kwargs.get('preserve_parameter_order', False):
values = dict(values)
update_stmt = sql.update(self.primary_table,
self.context.whereclause, values,
**self.update_kwargs)
self.result = self.query.session.execute(
update_stmt, params=self.query._params,
mapper=self.mapper)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_update(self)
class BulkDelete(BulkUD):
"""BulkUD which handles DELETEs."""
def __init__(self, query):
super(BulkDelete, self).__init__(query)
@classmethod
def factory(cls, query, synchronize_session):
return BulkUD._factory({
"evaluate": BulkDeleteEvaluate,
"fetch": BulkDeleteFetch,
False: BulkDelete
}, synchronize_session, query)
def _do_exec(self):
delete_stmt = sql.delete(self.primary_table,
self.context.whereclause)
self.result = self.query.session.execute(
delete_stmt,
params=self.query._params,
mapper=self.mapper)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_delete(self)
class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate):
"""BulkUD which handles UPDATEs using the "evaluate"
method of session resolution."""
def _additional_evaluators(self, evaluator_compiler):
self.value_evaluators = {}
values = (self.values.items() if hasattr(self.values, 'items')
else self.values)
for key, value in values:
key = self._resolve_key_to_attrname(key)
if key is not None:
self.value_evaluators[key] = evaluator_compiler.process(
expression._literal_as_binds(value))
def _do_post_synchronize(self):
session = self.query.session
states = set()
evaluated_keys = list(self.value_evaluators.keys())
for obj in self.matched_objects:
state, dict_ = attributes.instance_state(obj),\
attributes.instance_dict(obj)
# only evaluate unmodified attributes
to_evaluate = state.unmodified.intersection(
evaluated_keys)
for key in to_evaluate:
dict_[key] = self.value_evaluators[key](obj)
state._commit(dict_, list(to_evaluate))
# expire attributes with pending changes
# (there was no autoflush, so they are overwritten)
state._expire_attributes(dict_,
set(evaluated_keys).
difference(to_evaluate))
states.add(state)
session._register_altered(states)
class BulkDeleteEvaluate(BulkEvaluate, BulkDelete):
"""BulkUD which handles DELETEs using the "evaluate"
method of session resolution."""
def _do_post_synchronize(self):
self.query.session._remove_newly_deleted(
[attributes.instance_state(obj)
for obj in self.matched_objects])
class BulkUpdateFetch(BulkFetch, BulkUpdate):
"""BulkUD which handles UPDATEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
states = set([
attributes.instance_state(session.identity_map[identity_key])
for identity_key in [
target_mapper.identity_key_from_primary_key(
list(primary_key))
for primary_key in self.matched_rows
]
if identity_key in session.identity_map
])
attrib = [_attr_as_key(k) for k in self.values]
for state in states:
session._expire_state(state, attrib)
session._register_altered(states)
class BulkDeleteFetch(BulkFetch, BulkDelete):
"""BulkUD which handles DELETEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
for primary_key in self.matched_rows:
# TODO: inline this and call remove_newly_deleted
# once
identity_key = target_mapper.identity_key_from_primary_key(
list(primary_key))
if identity_key in session.identity_map:
session._remove_newly_deleted(
[attributes.instance_state(
session.identity_map[identity_key]
)]
)
| gpl-3.0 |
ahmetcemturan/SFACT | fabmetheus_utilities/geometry/creation/linear_bearing_cage.py | 12 | 12862 | """
Linear bearing cage.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.creation import extrude
from fabmetheus_utilities.geometry.creation import lineation
from fabmetheus_utilities.geometry.creation import peg
from fabmetheus_utilities.geometry.creation import solid
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.geometry.geometry_utilities import matrix
from fabmetheus_utilities.geometry.manipulation_matrix import translate
from fabmetheus_utilities.geometry.solids import cylinder
from fabmetheus_utilities.geometry.solids import sphere
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import euclidean
import math
__author__ = 'Enrique Perez ([email protected])'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def addAssemblyCage(derivation, negatives, positives):
'Add assembly linear bearing cage.'
addCageGroove(derivation, negatives, positives)
for pegCenterX in derivation.pegCenterXs:
addPositivePeg(derivation, positives, pegCenterX, -derivation.pegY)
addPositivePeg(derivation, positives, pegCenterX, derivation.pegY)
translate.translateNegativesPositives(negatives, positives, Vector3(0.0, -derivation.halfSeparationWidth))
femaleNegatives = []
femalePositives = []
addCageGroove(derivation, femaleNegatives, femalePositives)
for pegCenterX in derivation.pegCenterXs:
addNegativePeg(derivation, femaleNegatives, pegCenterX, -derivation.pegY)
addNegativePeg(derivation, femaleNegatives, pegCenterX, derivation.pegY)
translate.translateNegativesPositives(femaleNegatives, femalePositives, Vector3(0.0, derivation.halfSeparationWidth))
negatives += femaleNegatives
positives += femalePositives
def addCage(derivation, height, negatives, positives):
'Add linear bearing cage.'
copyShallow = derivation.elementNode.getCopyShallow()
copyShallow.attributes['path'] = [Vector3(), Vector3(0.0, 0.0, height)]
extrudeDerivation = extrude.ExtrudeDerivation(copyShallow)
roundedExtendedRectangle = getRoundedExtendedRectangle(derivation.demiwidth, derivation.rectangleCenterX, 14)
outsidePath = euclidean.getVector3Path(roundedExtendedRectangle)
extrude.addPositives(extrudeDerivation, [outsidePath], positives)
for bearingCenterX in derivation.bearingCenterXs:
addNegativeSphere(derivation, negatives, bearingCenterX)
def addCageGroove(derivation, negatives, positives):
'Add cage and groove.'
addCage(derivation, derivation.demiheight, negatives, positives)
addGroove(derivation, negatives)
def addGroove(derivation, negatives):
'Add groove on each side of cage.'
copyShallow = derivation.elementNode.getCopyShallow()
extrude.setElementNodeToEndStart(copyShallow, Vector3(-derivation.demilength), Vector3(derivation.demilength))
extrudeDerivation = extrude.ExtrudeDerivation(copyShallow)
bottom = derivation.demiheight - 0.5 * derivation.grooveWidth
outside = derivation.demiwidth
top = derivation.demiheight
leftGroove = [
complex(-outside, bottom),
complex(-derivation.innerDemiwidth, derivation.demiheight),
complex(-outside, top)]
rightGroove = [
complex(outside, top),
complex(derivation.innerDemiwidth, derivation.demiheight),
complex(outside, bottom)]
extrude.addNegatives(extrudeDerivation, negatives, euclidean.getVector3Paths([leftGroove, rightGroove]))
def addNegativePeg(derivation, negatives, x, y):
'Add negative cylinder at x and y.'
negativePegRadius = derivation.pegRadiusArealized + derivation.halfPegClearance
inradius = complex(negativePegRadius, negativePegRadius)
copyShallow = derivation.elementNode.getCopyShallow()
start = Vector3(x, y, derivation.height)
sides = evaluate.getSidesMinimumThreeBasedOnPrecision(copyShallow, negativePegRadius)
cylinder.addCylinderOutputByEndStart(0.0, inradius, negatives, sides, start, derivation.topOverBottom)
def addNegativeSphere(derivation, negatives, x):
'Add negative sphere at x.'
radius = Vector3(derivation.radiusPlusClearance, derivation.radiusPlusClearance, derivation.radiusPlusClearance)
sphereOutput = sphere.getGeometryOutput(derivation.elementNode.getCopyShallow(), radius)
euclidean.translateVector3Path(matrix.getVertexes(sphereOutput), Vector3(x, 0.0, derivation.demiheight))
negatives.append(sphereOutput)
def addPositivePeg(derivation, positives, x, y):
'Add positive cylinder at x and y.'
positivePegRadius = derivation.pegRadiusArealized - derivation.halfPegClearance
radiusArealized = complex(positivePegRadius, positivePegRadius)
copyShallow = derivation.elementNode.getCopyShallow()
start = Vector3(x, y, derivation.demiheight)
endZ = derivation.height
peg.addPegOutput(derivation.pegBevel, endZ, positives, radiusArealized, derivation.sides, start, derivation.topOverBottom)
def getBearingCenterXs(bearingCenterX, numberOfSteps, stepX):
'Get the bearing center x list.'
bearingCenterXs = []
for stepIndex in xrange(numberOfSteps + 1):
bearingCenterXs.append(bearingCenterX)
bearingCenterX += stepX
return bearingCenterXs
def getGeometryOutput(elementNode):
'Get vector3 vertexes from attribute dictionary.'
derivation = LinearBearingCageDerivation(elementNode)
negatives = []
positives = []
if derivation.typeStringFirstCharacter == 'a':
addAssemblyCage(derivation, negatives, positives)
else:
addCage(derivation, derivation.height, negatives, positives)
return extrude.getGeometryOutputByNegativesPositives(elementNode, negatives, positives)
def getGeometryOutputByArguments(arguments, elementNode):
'Get vector3 vertexes from attribute dictionary by arguments.'
evaluate.setAttributesByArguments(['length', 'radius'], arguments, elementNode)
return getGeometryOutput(elementNode)
def getNewDerivation(elementNode):
'Get new derivation.'
return LinearBearingCageDerivation(elementNode)
def getPegCenterXs(numberOfSteps, pegCenterX, stepX):
'Get the peg center x list.'
pegCenterXs = []
for stepIndex in xrange(numberOfSteps):
pegCenterXs.append(pegCenterX)
pegCenterX += stepX
return pegCenterXs
def getRoundedExtendedRectangle(radius, rectangleCenterX, sides):
'Get the rounded extended rectangle.'
roundedExtendedRectangle = []
halfSides = int(sides / 2)
halfSidesPlusOne = abs(halfSides + 1)
sideAngle = math.pi / float(halfSides)
extensionMultiplier = 1.0 / math.cos(0.5 * sideAngle)
center = complex(rectangleCenterX, 0.0)
startAngle = 0.5 * math.pi
for halfSide in xrange(halfSidesPlusOne):
unitPolar = euclidean.getWiddershinsUnitPolar(startAngle)
unitPolarExtended = complex(unitPolar.real * extensionMultiplier, unitPolar.imag)
roundedExtendedRectangle.append(unitPolarExtended * radius + center)
startAngle += sideAngle
center = complex(-rectangleCenterX, 0.0)
startAngle = -0.5 * math.pi
for halfSide in xrange(halfSidesPlusOne):
unitPolar = euclidean.getWiddershinsUnitPolar(startAngle)
unitPolarExtended = complex(unitPolar.real * extensionMultiplier, unitPolar.imag)
roundedExtendedRectangle.append(unitPolarExtended * radius + center)
startAngle += sideAngle
return roundedExtendedRectangle
def processElementNode(elementNode):
'Process the xml element.'
solid.processElementNodeByGeometry(elementNode, getGeometryOutput(elementNode))
class LinearBearingCageDerivation:
'Class to hold linear bearing cage variables.'
def __init__(self, elementNode):
'Set defaults.'
self.length = evaluate.getEvaluatedFloat(50.0, elementNode, 'length')
self.demilength = 0.5 * self.length
self.elementNode = elementNode
self.radius = lineation.getFloatByPrefixBeginEnd(elementNode, 'radius', 'diameter', 5.0)
self.cageClearanceOverRadius = evaluate.getEvaluatedFloat(0.05, elementNode, 'cageClearanceOverRadius')
self.cageClearance = self.cageClearanceOverRadius * self.radius
self.cageClearance = evaluate.getEvaluatedFloat(self.cageClearance, elementNode, 'cageClearance')
self.racewayClearanceOverRadius = evaluate.getEvaluatedFloat(0.1, elementNode, 'racewayClearanceOverRadius')
self.racewayClearance = self.racewayClearanceOverRadius * self.radius
self.racewayClearance = evaluate.getEvaluatedFloat(self.racewayClearance, elementNode, 'racewayClearance')
self.typeMenuRadioStrings = 'assembly integral'.split()
self.typeString = evaluate.getEvaluatedString('assembly', elementNode, 'type')
self.typeStringFirstCharacter = self.typeString[: 1 ].lower()
self.wallThicknessOverRadius = evaluate.getEvaluatedFloat(0.5, elementNode, 'wallThicknessOverRadius')
self.wallThickness = self.wallThicknessOverRadius * self.radius
self.wallThickness = evaluate.getEvaluatedFloat(self.wallThickness, elementNode, 'wallThickness')
self.zenithAngle = evaluate.getEvaluatedFloat(45.0, elementNode, 'zenithAngle')
self.zenithRadian = math.radians(self.zenithAngle)
self.demiheight = self.radius * math.cos(self.zenithRadian) - self.racewayClearance
self.height = self.demiheight + self.demiheight
self.radiusPlusClearance = self.radius + self.cageClearance
self.cageRadius = self.radiusPlusClearance + self.wallThickness
self.demiwidth = self.cageRadius
self.bearingCenterX = self.cageRadius - self.demilength
separation = self.cageRadius + self.radiusPlusClearance
bearingLength = -self.bearingCenterX - self.bearingCenterX
self.numberOfSteps = int(math.floor(bearingLength / separation))
self.stepX = bearingLength / float(self.numberOfSteps)
self.bearingCenterXs = getBearingCenterXs(self.bearingCenterX, self.numberOfSteps, self.stepX)
if self.typeStringFirstCharacter == 'a':
self.setAssemblyCage()
self.rectangleCenterX = self.demiwidth - self.demilength
def setAssemblyCage(self):
'Set two piece assembly parameters.'
self.grooveDepthOverRadius = evaluate.getEvaluatedFloat(0.15, self.elementNode, 'grooveDepthOverRadius')
self.grooveDepth = self.grooveDepthOverRadius * self.radius
self.grooveDepth = evaluate.getEvaluatedFloat(self.grooveDepth, self.elementNode, 'grooveDepth')
self.grooveWidthOverRadius = evaluate.getEvaluatedFloat(0.6, self.elementNode, 'grooveWidthOverRadius')
self.grooveWidth = self.grooveWidthOverRadius * self.radius
self.grooveWidth = evaluate.getEvaluatedFloat(self.grooveWidth, self.elementNode, 'grooveWidth')
self.pegClearanceOverRadius = evaluate.getEvaluatedFloat(0.0, self.elementNode, 'pegClearanceOverRadius')
self.pegClearance = self.pegClearanceOverRadius * self.radius
self.pegClearance = evaluate.getEvaluatedFloat(self.pegClearance, self.elementNode, 'pegClearance')
self.halfPegClearance = 0.5 * self.pegClearance
self.pegRadiusOverRadius = evaluate.getEvaluatedFloat(0.5, self.elementNode, 'pegRadiusOverRadius')
self.pegRadius = self.pegRadiusOverRadius * self.radius
self.pegRadius = evaluate.getEvaluatedFloat(self.pegRadius, self.elementNode, 'pegRadius')
self.sides = evaluate.getSidesMinimumThreeBasedOnPrecision(self.elementNode, self.pegRadius)
self.pegRadiusArealized = evaluate.getRadiusArealizedBasedOnAreaRadius(self.elementNode, self.pegRadius, self.sides)
self.pegBevelOverPegRadius = evaluate.getEvaluatedFloat(0.25, self.elementNode, 'pegBevelOverPegRadius')
self.pegBevel = self.pegBevelOverPegRadius * self.pegRadiusArealized
self.pegBevel = evaluate.getEvaluatedFloat(self.pegBevel, self.elementNode, 'pegBevel')
self.pegMaximumRadius = self.pegRadiusArealized + abs(self.halfPegClearance)
self.separationOverRadius = evaluate.getEvaluatedFloat(0.5, self.elementNode, 'separationOverRadius')
self.separation = self.separationOverRadius * self.radius
self.separation = evaluate.getEvaluatedFloat(self.separation, self.elementNode, 'separation')
self.topOverBottom = evaluate.getEvaluatedFloat(0.8, self.elementNode, 'topOverBottom')
peg.setTopOverBottomByRadius(self, 0.0, self.pegRadiusArealized, self.height)
self.quarterHeight = 0.5 * self.demiheight
self.pegY = 0.5 * self.wallThickness + self.pegMaximumRadius
cagePegRadius = self.cageRadius + self.pegMaximumRadius
halfStepX = 0.5 * self.stepX
pegHypotenuse = math.sqrt(self.pegY * self.pegY + halfStepX * halfStepX)
if cagePegRadius > pegHypotenuse:
self.pegY = math.sqrt(cagePegRadius * cagePegRadius - halfStepX * halfStepX)
self.demiwidth = max(self.pegY + self.pegMaximumRadius + self.wallThickness, self.demiwidth)
self.innerDemiwidth = self.demiwidth
self.demiwidth += self.grooveDepth
self.halfSeparationWidth = self.demiwidth + 0.5 * self.separation
if self.pegRadiusArealized <= 0.0:
self.pegCenterXs = []
else:
self.pegCenterXs = getPegCenterXs(self.numberOfSteps, self.bearingCenterX + halfStepX, self.stepX)
| agpl-3.0 |
apache/airflow | tests/providers/amazon/aws/hooks/test_kinesis.py | 3 | 2515 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
import uuid
from airflow.providers.amazon.aws.hooks.kinesis import AwsFirehoseHook
try:
from moto import mock_kinesis
except ImportError:
mock_kinesis = None
class TestAwsFirehoseHook(unittest.TestCase):
@unittest.skipIf(mock_kinesis is None, 'mock_kinesis package not present')
@mock_kinesis
def test_get_conn_returns_a_boto3_connection(self):
hook = AwsFirehoseHook(
aws_conn_id='aws_default', delivery_stream="test_airflow", region_name="us-east-1"
)
assert hook.get_conn() is not None
@unittest.skipIf(mock_kinesis is None, 'mock_kinesis package not present')
@mock_kinesis
def test_insert_batch_records_kinesis_firehose(self):
hook = AwsFirehoseHook(
aws_conn_id='aws_default', delivery_stream="test_airflow", region_name="us-east-1"
)
response = hook.get_conn().create_delivery_stream(
DeliveryStreamName="test_airflow",
S3DestinationConfiguration={
'RoleARN': 'arn:aws:iam::123456789012:role/firehose_delivery_role',
'BucketARN': 'arn:aws:s3:::kinesis-test',
'Prefix': 'airflow/',
'BufferingHints': {'SizeInMBs': 123, 'IntervalInSeconds': 124},
'CompressionFormat': 'UNCOMPRESSED',
},
)
stream_arn = response['DeliveryStreamARN']
assert stream_arn == "arn:aws:firehose:us-east-1:123456789012:deliverystream/test_airflow"
records = [{"Data": str(uuid.uuid4())} for _ in range(100)]
response = hook.put_records(records)
assert response['FailedPutCount'] == 0
assert response['ResponseMetadata']['HTTPStatusCode'] == 200
| apache-2.0 |
sbalde/edx-platform | common/lib/xmodule/xmodule/tests/test_stringify.py | 187 | 1256 | """
Tests stringify functions used in xmodule html
"""
from nose.tools import assert_equals # pylint: disable=no-name-in-module
from lxml import etree
from xmodule.stringify import stringify_children
def test_stringify():
text = 'Hi <div x="foo">there <span>Bruce</span><b>!</b></div>'
html = '''<html a="b" foo="bar">{0}</html>'''.format(text)
xml = etree.fromstring(html)
out = stringify_children(xml)
assert_equals(out, text)
def test_stringify_again():
html = r"""<html name="Voltage Source Answer" >A voltage source is non-linear!
<div align="center">
<img src="/static/images/circuits/voltage-source.png"/>
\(V=V_C\)
</div>
But it is <a href="http://mathworld.wolfram.com/AffineFunction.html">affine</a>,
which means linear except for an offset.
</html>
"""
html = """<html>A voltage source is non-linear!
<div align="center">
</div>
But it is <a href="http://mathworld.wolfram.com/AffineFunction.html">affine</a>,
which means linear except for an offset.
</html>
"""
xml = etree.fromstring(html)
out = stringify_children(xml)
print "output:"
print out
# Tracking strange content repeating bug
# Should appear once
assert_equals(out.count("But it is "), 1)
| agpl-3.0 |
interedition/collatex | collatex-pythonport/docs/conf.py | 4 | 8473 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import collatex
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CollateX-Python'
copyright = u'2014, Ronald Haentjens Dekker'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = collatex.__version__
# The full version, including alpha/beta/rc tags.
release = collatex.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'collatexdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'collatex.tex',
u'CollateX-Python Documentation',
u'Ronald Haentjens Dekker', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'collatex',
u'CollateX-Python Documentation',
[u'Ronald Haentjens Dekker'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'collatex',
u'CollateX-Python Documentation',
u'Ronald Haentjens Dekker',
'Collatex',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | gpl-3.0 |
Learning-from-our-past/Kaira | books/karelians/extraction/extractors/weddingextractor.py | 2 | 1845 | # -*- coding: utf-8 -*-
import re
from books.karelians.extraction.extractors.baseExtractor import BaseExtractor
from books.karelians.extraction.extractionExceptions import *
from books.karelians.extraction.extractors.dateExtractor import DateExtractor
from shared import textUtils
from books.karelians.extractionkeys import KEYS
from interface.valuewrapper import ValueWrapper
from shared import regexUtils
class WeddingExtractor(BaseExtractor):
def extract(self, text, entry):
super(WeddingExtractor, self).extract(text)
self.PATTERN = r"(?:avioit)\.?\s?-(?P<year>\d{2,4})"
self.OPTIONS = (re.UNICODE | re.IGNORECASE)
self.REQUIRES_MATCH_POSITION = True
self.SUBSTRING_WIDTH = 100
self.weddingYear = ""
self.preparedText = ""
self.initVars(text)
self._findDate(self.preparedText)
return self._constructReturnDict()
def initVars(self,text):
self.preparedText = self._prepareTextForExtraction(text)
def _prepareTextForExtraction(self, text):
t = textUtils.takeSubStrBasedOnPos(text, self.matchStartPosition, self.SUBSTRING_WIDTH)
t = textUtils.removeSpacesFromText(t)
return t
def _findDate(self, text):
try:
wedding = regexUtils.safeSearch(self.PATTERN, text, self.OPTIONS)
self._setFinalMatchPosition(wedding.end())
self.weddingYear = "19" + wedding.group("year")
except regexUtils.RegexNoneMatchException as e:
self.weddingYear = ""
def _setFinalMatchPosition(self, end):
#Dirty fix for inaccuracy in positions which would screw the Location extraction
self.matchFinalPosition = end + self.matchStartPosition - 4
def _constructReturnDict(self):
return {KEYS["weddingYear"]: ValueWrapper(self.weddingYear)}
| gpl-2.0 |
psiorx/drake | drake/thirdParty/xacro.py | 19 | 18904 | #! /usr/bin/env python
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author: Stuart Glaser
import os.path, sys, os, getopt
import subprocess
from xml.dom.minidom import parse, parseString
import xml.dom
import re
import string
class XacroException(Exception): pass
def isnumber(x):
return hasattr(x, '__int__')
#import roslib; roslib.load_manifest('xacro')
#import roslib.substitution_args
def eval_extension(str):
return str; #roslib.substitution_args.resolve_args(str, resolve_anon=False)
# Better pretty printing of xml
# Taken from http://ronrothman.com/public/leftbraned/xml-dom-minidom-toprettyxml-and-silly-whitespace/
def fixed_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
xml.dom.minidom._write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
if len(self.childNodes) == 1 \
and self.childNodes[0].nodeType == xml.dom.minidom.Node.TEXT_NODE:
writer.write(">")
self.childNodes[0].writexml(writer, "", "", "")
writer.write("</%s>%s" % (self.tagName, newl))
return
writer.write(">%s"%(newl))
for node in self.childNodes:
if node.nodeType is not xml.dom.minidom.Node.TEXT_NODE: # 3:
node.writexml(writer,indent+addindent,addindent,newl)
#node.writexml(writer,indent+addindent,addindent,newl)
writer.write("%s</%s>%s" % (indent,self.tagName,newl))
else:
writer.write("/>%s"%(newl))
# replace minidom's function with ours
xml.dom.minidom.Element.writexml = fixed_writexml
class Table:
def __init__(self, parent = None):
self.parent = parent
self.table = {}
def __getitem__(self, key):
if key in self.table:
return self.table[key]
elif self.parent:
return self.parent[key]
else:
raise KeyError(key)
def __setitem__(self, key, value):
self.table[key] = value
def __contains__(self, key):
return \
key in self.table or \
(self.parent and key in self.parent)
class QuickLexer(object):
def __init__(self, **res):
self.str = ""
self.top = None
self.res = []
for k,v in res.items():
self.__setattr__(k, len(self.res))
self.res.append(v)
def lex(self, str):
self.str = str
self.top = None
self.next()
def peek(self):
return self.top
def next(self):
result = self.top
self.top = None
for i in range(len(self.res)):
m = re.match(self.res[i], self.str)
if m:
self.top = (i, m.group(0))
self.str = self.str[m.end():]
break
return result
def first_child_element(elt):
c = elt.firstChild
while c:
if c.nodeType == xml.dom.Node.ELEMENT_NODE:
return c
c = c.nextSibling
return None
def next_sibling_element(elt):
c = elt.nextSibling
while c:
if c.nodeType == xml.dom.Node.ELEMENT_NODE:
return c
c = c.nextSibling
return None
# Pre-order traversal of the elements
def next_element(elt):
child = first_child_element(elt)
if child: return child
while elt and elt.nodeType == xml.dom.Node.ELEMENT_NODE:
next = next_sibling_element(elt)
if next:
return next
elt = elt.parentNode
return None
# Pre-order traversal of all the nodes
def next_node(node):
if node.firstChild:
return node.firstChild
while node:
if node.nextSibling:
return node.nextSibling
node = node.parentNode
return None
def child_elements(elt):
c = elt.firstChild
while c:
if c.nodeType == xml.dom.Node.ELEMENT_NODE:
yield c
c = c.nextSibling
all_includes = []
## @throws XacroException if a parsing error occurs with an included document
def process_includes(doc, base_dir):
namespaces = {}
previous = doc.documentElement
elt = next_element(previous)
while elt:
if elt.tagName == 'include' or elt.tagName == 'xacro:include':
filename = eval_text(elt.getAttribute('filename'), {})
if not os.path.isabs(filename):
filename = os.path.join(base_dir, filename)
f = None
try:
try:
f = open(filename)
except IOError, e:
print elt
raise XacroException("included file \"%s\" could not be opened: %s" % (filename, str(e)))
try:
global all_includes
all_includes.append(filename)
included = parse(f)
except Exception, e:
raise XacroException("included file [%s] generated an error during XML parsing: %s"%(filename, str(e)))
finally:
if f: f.close()
# Replaces the include tag with the elements of the included file
for c in child_elements(included.documentElement):
elt.parentNode.insertBefore(c.cloneNode(1), elt)
elt.parentNode.removeChild(elt)
elt = None
# Grabs all the declared namespaces of the included document
for name, value in included.documentElement.attributes.items():
if name.startswith('xmlns:'):
namespaces[name] = value
else:
previous = elt
elt = next_element(previous)
# Makes sure the final document declares all the namespaces of the included documents.
for k,v in namespaces.items():
doc.documentElement.setAttribute(k, v)
# Returns a dictionary: { macro_name => macro_xml_block }
def grab_macros(doc):
macros = {}
previous = doc.documentElement
elt = next_element(previous)
while elt:
if elt.tagName == 'macro' or elt.tagName == 'xacro:macro':
name = elt.getAttribute('name')
macros[name] = elt
macros['xacro:' + name] = elt
elt.parentNode.removeChild(elt)
elt = None
else:
previous = elt
elt = next_element(previous)
return macros
# Returns a Table of the properties
def grab_properties(doc):
table = Table()
previous = doc.documentElement
elt = next_element(previous)
while elt:
if elt.tagName == 'property' or elt.tagName == 'xacro:property':
name = elt.getAttribute('name')
value = None
if elt.hasAttribute('value'):
value = elt.getAttribute('value')
else:
name = '**' + name
value = elt #debug
bad = string.whitespace + "${}"
has_bad = False
for b in bad:
if b in name:
has_bad = True
break
if has_bad:
sys.stderr.write('Property names may not have whitespace, ' +
'"{", "}", or "$" : "' + name + '"')
else:
table[name] = value
elt.parentNode.removeChild(elt)
elt = None
else:
previous = elt
elt = next_element(previous)
return table
def eat_ignore(lex):
while lex.peek() and lex.peek()[0] == lex.IGNORE:
lex.next()
def eval_lit(lex, symbols):
eat_ignore(lex)
if lex.peek()[0] == lex.NUMBER:
return float(lex.next()[1])
if lex.peek()[0] == lex.SYMBOL:
try:
value = symbols[lex.next()[1]]
except KeyError, ex:
#sys.stderr.write("Could not find symbol: %s\n" % str(ex))
raise XacroException("Property wasn't defined: %s" % str(ex))
if not (isnumber(value) or isinstance(value,(str,unicode))):
print [value], isinstance(value, str), type(value)
raise XacroException("WTF2")
try:
return int(value)
except:
try:
return float(value)
except:
return value
raise XacroException("Bad literal")
def eval_factor(lex, symbols):
eat_ignore(lex)
neg = 1;
if lex.peek()[1] == '-':
lex.next()
neg = -1
if lex.peek()[0] in [lex.NUMBER, lex.SYMBOL]:
return neg * eval_lit(lex, symbols)
if lex.peek()[0] == lex.LPAREN:
lex.next()
eat_ignore(lex)
result = eval_expr(lex, symbols)
eat_ignore(lex)
if lex.next()[0] != lex.RPAREN:
raise XacroException("Unmatched left paren")
eat_ignore(lex)
return neg * result
raise XacroException("Misplaced operator")
def eval_term(lex, symbols):
eat_ignore(lex)
result = 0
if lex.peek()[0] in [lex.NUMBER, lex.SYMBOL, lex.LPAREN] \
or lex.peek()[1] == '-':
result = eval_factor(lex, symbols)
eat_ignore(lex)
while lex.peek() and lex.peek()[1] in ['*', '/']:
op = lex.next()[1]
n = eval_factor(lex, symbols)
if op == '*':
result = float(result) * float(n)
elif op == '/':
result = float(result) / float(n)
else:
raise XacroException("WTF")
eat_ignore(lex)
return result
def eval_expr(lex, symbols):
eat_ignore(lex)
op = None
if lex.peek()[0] == lex.OP:
op = lex.next()[1]
if not op in ['+', '-']:
raise XacroException("Invalid operation. Must be '+' or '-'")
result = eval_term(lex, symbols)
if op == '-':
result = -float(result)
eat_ignore(lex)
while lex.peek() and lex.peek()[1] in ['+', '-']:
op = lex.next()[1]
n = eval_term(lex, symbols)
if op == '+':
result = float(result) + float(n)
if op == '-':
result = float(result) - float(n)
eat_ignore(lex)
return result
def eval_text(text, symbols):
def handle_expr(s):
lex = QuickLexer(IGNORE = r"\s+",
NUMBER = r"(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?",
SYMBOL = r"[a-zA-Z_]\w*",
OP = r"[\+\-\*/^]",
LPAREN = r"\(",
RPAREN = r"\)")
lex.lex(s)
return eval_expr(lex, symbols)
def handle_extension(s):
return eval_extension("$(%s)" % s)
results = []
lex = QuickLexer(DOLLAR_DOLLAR_BRACE = r"\$\$+\{",
EXPR = r"\$\{[^\}]*\}",
EXTENSION = r"\$\([^\)]*\)",
TEXT = r"([^\$]|\$[^{(]|\$$)+")
lex.lex(text)
while lex.peek():
if lex.peek()[0] == lex.EXPR:
results.append(handle_expr(lex.next()[1][2:-1]))
elif lex.peek()[0] == lex.EXTENSION:
results.append(handle_extension(lex.next()[1][2:-1]))
elif lex.peek()[0] == lex.TEXT:
results.append(lex.next()[1])
elif lex.peek()[0] == lex.DOLLAR_DOLLAR_BRACE:
results.append(lex.next()[1][1:])
return ''.join(map(str, results))
# Expands macros, replaces properties, and evaluates expressions
def eval_all(root, macros, symbols):
# Evaluates the attributes for the root node
for at in root.attributes.items():
result = eval_text(at[1], symbols)
root.setAttribute(at[0], result)
previous = root
node = next_node(previous)
while node:
if node.nodeType == xml.dom.Node.ELEMENT_NODE:
if node.tagName in macros:
body = macros[node.tagName].cloneNode(deep = True)
params = body.getAttribute('params').split()
# Expands the macro
scoped = Table(symbols)
for name,value in node.attributes.items():
if not name in params:
raise XacroException("Invalid parameter \"%s\" while expanding macro \"%s\"" % \
(str(name), str(node.tagName)))
params.remove(name)
scoped[name] = eval_text(value, symbols)
# Pulls out the block arguments, in order
cloned = node.cloneNode(deep = True)
eval_all(cloned, macros, symbols)
block = cloned.firstChild
for param in params[:]:
if param[0] == '*':
while block and block.nodeType != xml.dom.Node.ELEMENT_NODE:
block = block.nextSibling
if not block:
raise XacroException("Not enough blocks while evaluating macro %s" % str(node.tagName))
params.remove(param)
scoped[param] = block
block = block.nextSibling
if params:
raise XacroException("Some parameters were not set for macro %s" % \
str(node.tagName))
eval_all(body, macros, scoped)
# Replaces the macro node with the expansion
for e in list(child_elements(body)): # Ew
node.parentNode.insertBefore(e, node)
node.parentNode.removeChild(node)
node = None
elif node.tagName == 'insert_block' or node.tagName == 'xacro:insert_block':
name = node.getAttribute('name')
if ("**" + name) in symbols:
# Multi-block
block = symbols['**' + name]
for e in list(child_elements(block)):
node.parentNode.insertBefore(e.cloneNode(deep=True), node)
node.parentNode.removeChild(node)
elif ("*" + name) in symbols:
# Single block
block = symbols['*' + name]
node.parentNode.insertBefore(block.cloneNode(deep=True), node)
node.parentNode.removeChild(node)
else:
raise XacroException("Block \"%s\" was never declared" % name)
node = None
else:
# Evals the attributes
for at in node.attributes.items():
result = eval_text(at[1], symbols)
node.setAttribute(at[0], result)
previous = node
elif node.nodeType == xml.dom.Node.TEXT_NODE:
node.data = eval_text(node.data, symbols)
previous = node
else:
previous = node
node = next_node(previous)
return macros
# Expands everything except includes
def eval_self_contained(doc):
macros = grab_macros(doc)
symbols = grab_properties(doc)
eval_all(doc.documentElement, macros, symbols)
def print_usage(exit_code = 0):
print "Usage: %s [-o <output>] <input>" % 'xacro.py'
print " %s --deps Prints dependencies" % 'xacro.py'
print " %s --includes Only evalutes includes" % 'xacro.py'
sys.exit(exit_code)
def main():
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "ho:", ['deps', 'includes'])
except getopt.GetoptError, err:
print str(err)
print_usage(2)
just_deps = False
just_includes = False
output = sys.stdout
for o, a in opts:
if o == '-h':
print_usage(0)
elif o == '-o':
output = open(a, 'w')
elif o == '--deps':
just_deps = True
elif o == '--includes':
just_includes = True
if len(args) < 1:
print "No input given"
print_usage(2)
f = open(args[0])
doc = None
try:
doc = parse(f)
except xml.parsers.expat.ExpatError:
sys.stderr.write("Expat parsing error. Check that:\n")
sys.stderr.write(" - Your XML is correctly formed\n")
sys.stderr.write(" - You have the xacro xmlns declaration: " +
"xmlns:xacro=\"http://www.ros.org/wiki/xacro\"\n")
sys.stderr.write("\n")
raise
finally:
f.close()
process_includes(doc, os.path.dirname(sys.argv[1]))
if just_deps:
for inc in all_includes:
sys.stdout.write(inc + " ")
sys.stdout.write("\n")
elif just_includes:
doc.writexml(output)
print
else:
eval_self_contained(doc)
banner = [xml.dom.minidom.Comment(c) for c in
[" %s " % ('='*83),
" | This document was autogenerated by xacro from %-30s | " % args[0],
" | EDITING THIS FILE BY HAND IS NOT RECOMMENDED %-30s | " % "",
" %s " % ('='*83)]]
first = doc.firstChild
for comment in banner:
doc.insertBefore(comment, first)
output.write(doc.toprettyxml(indent = ' '))
#doc.writexml(output, newl = "\n")
print
main();
| bsd-3-clause |
toshywoshy/ansible | lib/ansible/modules/network/fortios/fortios_firewall_ldb_monitor.py | 7 | 12613 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_ldb_monitor
short_description: Configure server load balancing health monitors in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and ldb_monitor category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_ldb_monitor:
description:
- Configure server load balancing health monitors.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
http_get:
description:
- URL used to send a GET request to check the health of an HTTP server.
type: str
http_match:
description:
- String to match the value expected in response to an HTTP-GET request.
type: str
http_max_redirects:
description:
- The maximum number of HTTP redirects to be allowed (0 - 5).
type: int
interval:
description:
- Time between health checks (5 - 65635 sec).
type: int
name:
description:
- Monitor name.
required: true
type: str
port:
description:
- Service port used to perform the health check. If 0, health check monitor inherits port configured for the server (0 - 65635).
type: int
retry:
description:
- Number health check attempts before the server is considered down (1 - 255).
type: int
timeout:
description:
- Time to wait to receive response to a health check from a server. Reaching the timeout means the health check failed (1 - 255 sec).
type: int
type:
description:
- Select the Monitor type used by the health check monitor to check the health of the server (PING | TCP | HTTP).
type: str
choices:
- ping
- tcp
- http
- passive-sip
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure server load balancing health monitors.
fortios_firewall_ldb_monitor:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_ldb_monitor:
http_get: "<your_own_value>"
http_match: "<your_own_value>"
http_max_redirects: "5"
interval: "6"
name: "default_name_7"
port: "8"
retry: "9"
timeout: "10"
type: "ping"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_ldb_monitor_data(json):
option_list = ['http_get', 'http_match', 'http_max_redirects',
'interval', 'name', 'port',
'retry', 'timeout', 'type']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_ldb_monitor(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_ldb_monitor'] and data['firewall_ldb_monitor']:
state = data['firewall_ldb_monitor']['state']
else:
state = True
firewall_ldb_monitor_data = data['firewall_ldb_monitor']
filtered_data = underscore_to_hyphen(filter_firewall_ldb_monitor_data(firewall_ldb_monitor_data))
if state == "present":
return fos.set('firewall',
'ldb-monitor',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'ldb-monitor',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_ldb_monitor']:
resp = firewall_ldb_monitor(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_ldb_monitor": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"http_get": {"required": False, "type": "str"},
"http_match": {"required": False, "type": "str"},
"http_max_redirects": {"required": False, "type": "int"},
"interval": {"required": False, "type": "int"},
"name": {"required": True, "type": "str"},
"port": {"required": False, "type": "int"},
"retry": {"required": False, "type": "int"},
"timeout": {"required": False, "type": "int"},
"type": {"required": False, "type": "str",
"choices": ["ping", "tcp", "http",
"passive-sip"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
wfxiang08/django190 | django/contrib/gis/gdal/__init__.py | 130 | 2610 | """
This module houses ctypes interfaces for GDAL objects. The following GDAL
objects are supported:
CoordTransform: Used for coordinate transformations from one spatial
reference system to another.
Driver: Wraps an OGR data source driver.
DataSource: Wrapper for the OGR data source object, supports
OGR-supported data sources.
Envelope: A ctypes structure for bounding boxes (GDAL library
not required).
OGRGeometry: Object for accessing OGR Geometry functionality.
OGRGeomType: A class for representing the different OGR Geometry
types (GDAL library not required).
SpatialReference: Represents OSR Spatial Reference objects.
The GDAL library will be imported from the system path using the default
library name for the current OS. The default library path may be overridden
by setting `GDAL_LIBRARY_PATH` in your settings with the path to the GDAL C
library on your system.
GDAL links to a large number of external libraries that consume RAM when
loaded. Thus, it may desirable to disable GDAL on systems with limited
RAM resources -- this may be accomplished by setting `GDAL_LIBRARY_PATH`
to a non-existent file location (e.g., `GDAL_LIBRARY_PATH='/null/path'`;
setting to None/False/'' will not work as a string must be given).
"""
from django.contrib.gis.gdal.error import (check_err, GDALException,
OGRException, OGRIndexError, SRSException) # NOQA
from django.contrib.gis.gdal.geomtype import OGRGeomType # NOQA
__all__ = [
'check_err', 'GDALException', 'OGRException', 'OGRIndexError',
'SRSException', 'OGRGeomType', 'HAS_GDAL',
]
# Attempting to import objects that depend on the GDAL library. The
# HAS_GDAL flag will be set to True if the library is present on
# the system.
try:
from django.contrib.gis.gdal.driver import Driver # NOQA
from django.contrib.gis.gdal.datasource import DataSource # NOQA
from django.contrib.gis.gdal.libgdal import gdal_version, gdal_full_version, GDAL_VERSION # NOQA
from django.contrib.gis.gdal.raster.source import GDALRaster # NOQA
from django.contrib.gis.gdal.srs import SpatialReference, CoordTransform # NOQA
from django.contrib.gis.gdal.geometries import OGRGeometry # NOQA
HAS_GDAL = True
__all__ += [
'Driver', 'DataSource', 'gdal_version', 'gdal_full_version',
'GDAL_VERSION', 'SpatialReference', 'CoordTransform', 'OGRGeometry',
]
except GDALException:
HAS_GDAL = False
try:
from django.contrib.gis.gdal.envelope import Envelope
__all__ += ['Envelope']
except ImportError:
# No ctypes, but don't raise an exception.
pass
| bsd-3-clause |
double-y/django | django/conf/locale/mk/formats.py | 504 | 1742 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
pombredanne/python-ptrace | examples/itrace.py | 1 | 1770 | #!/usr/bin/env python
"""
Here is a tool which I have been using to debug libc startup code where I
didn't find gdb very helpful. It single steps the process and prints each
instruction pointer address. To go faster, it allows a number of syscalls to
run before starting single-stepping.
It's possible to pipe the addresses through addr2line to get a very
simple tracing debugger. :-)
I couldn't see a way to catch syscalls and single step at the same
time. As a consequence the tool can't handle multiple threads.
Mark
"""
import signal
from ptrace.debugger import ProcessExit, ProcessSignal
import strace
class Tracer(strace.SyscallTracer):
def createCommonOptions(self, parser):
parser.add_option(
"-n", dest="syscall_limit", type="int", default=None,
help="Number of syscalls before switching to single step")
super(Tracer, self).createCommonOptions(parser)
def syscallTrace(self, process):
syscall_limit = self.options.syscall_limit
i = 0
while i < syscall_limit or syscall_limit is None:
print i
i += 1
process.syscall()
self.debugger.waitSyscall()
i = 0
while self.debugger:
eip = process.getInstrPointer()
print i, process.pid, "[%08x]" % eip
i += 1
process.singleStep()
event = self.debugger.waitProcessEvent()
if isinstance(event, ProcessExit):
print "process exit"
return
if (isinstance(event, ProcessSignal) and
event.signum & ~128 != signal.SIGTRAP):
print "died with signal %i" % event.signum
return
if __name__ == "__main__":
Tracer().main()
| gpl-2.0 |
maaaks/andreas | andreas/commands/dbcommands.py | 1 | 1291 | from typing import List, Type
from andreas.db.database import db
from andreas.db.model import Model
from andreas.models.event import Event
from andreas.models.keypair import KeyPair
from andreas.models.post import Post
from andreas.models.relations import PostPostRelation, UserPostRelation
from andreas.models.server import Server
from andreas.models.signature import Signature, UnverifiedSignature
from andreas.models.user import User
models: List[Type[Model]] = [
Event,
KeyPair,
Post,
PostPostRelation,
Server,
Signature,
UnverifiedSignature,
User,
UserPostRelation,
]
@db.atomic()
def updatedb():
db.create_tables(models)
@db.atomic()
def populatedb():
updatedb()
# Local server
if not Server.select().where(Server.is_local).count():
server = Server()
server.is_local = True
server.name = 'localhost'
server.engine_name = 'Andreas'
server.engine_version = '0.0.1'
server.save()
# Local user
if not User.select().count():
user = User()
user.server = Server.get(Server.is_local)
user.name = 'root'
user.save()
@db.atomic()
def dropdb():
db.drop_tables(models, safe=True)
@db.atomic()
def resetdb():
dropdb()
populatedb() | mit |
aliyun/oss-ftp | python27/win32/Lib/distutils/tests/test_register.py | 39 | 8839 | # -*- encoding: utf8 -*-
"""Tests for distutils.command.register."""
import os
import unittest
import getpass
import urllib2
import warnings
from test.test_support import check_warnings, run_unittest
from distutils.command import register as register_module
from distutils.command.register import register
from distutils.errors import DistutilsSetupError
from distutils.tests.test_config import PyPIRCCommandTestCase
try:
import docutils
except ImportError:
docutils = None
PYPIRC_NOPASSWORD = """\
[distutils]
index-servers =
server1
[server1]
username:me
"""
WANTED_PYPIRC = """\
[distutils]
index-servers =
pypi
[pypi]
username:tarek
password:password
"""
class RawInputs(object):
"""Fakes user inputs."""
def __init__(self, *answers):
self.answers = answers
self.index = 0
def __call__(self, prompt=''):
try:
return self.answers[self.index]
finally:
self.index += 1
class FakeOpener(object):
"""Fakes a PyPI server"""
def __init__(self):
self.reqs = []
def __call__(self, *args):
return self
def open(self, req):
self.reqs.append(req)
return self
def read(self):
return 'xxx'
class RegisterTestCase(PyPIRCCommandTestCase):
def setUp(self):
super(RegisterTestCase, self).setUp()
# patching the password prompt
self._old_getpass = getpass.getpass
def _getpass(prompt):
return 'password'
getpass.getpass = _getpass
self.old_opener = urllib2.build_opener
self.conn = urllib2.build_opener = FakeOpener()
def tearDown(self):
getpass.getpass = self._old_getpass
urllib2.build_opener = self.old_opener
super(RegisterTestCase, self).tearDown()
def _get_cmd(self, metadata=None):
if metadata is None:
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx'}
pkg_info, dist = self.create_dist(**metadata)
return register(dist)
def test_create_pypirc(self):
# this test makes sure a .pypirc file
# is created when requested.
# let's create a register instance
cmd = self._get_cmd()
# we shouldn't have a .pypirc file yet
self.assertFalse(os.path.exists(self.rc))
# patching raw_input and getpass.getpass
# so register gets happy
#
# Here's what we are faking :
# use your existing login (choice 1.)
# Username : 'tarek'
# Password : 'password'
# Save your login (y/N)? : 'y'
inputs = RawInputs('1', 'tarek', 'y')
register_module.raw_input = inputs.__call__
# let's run the command
try:
cmd.run()
finally:
del register_module.raw_input
# we should have a brand new .pypirc file
self.assertTrue(os.path.exists(self.rc))
# with the content similar to WANTED_PYPIRC
f = open(self.rc)
try:
content = f.read()
self.assertEqual(content, WANTED_PYPIRC)
finally:
f.close()
# now let's make sure the .pypirc file generated
# really works : we shouldn't be asked anything
# if we run the command again
def _no_way(prompt=''):
raise AssertionError(prompt)
register_module.raw_input = _no_way
cmd.show_response = 1
cmd.run()
# let's see what the server received : we should
# have 2 similar requests
self.assertEqual(len(self.conn.reqs), 2)
req1 = dict(self.conn.reqs[0].headers)
req2 = dict(self.conn.reqs[1].headers)
self.assertEqual(req2['Content-length'], req1['Content-length'])
self.assertIn('xxx', self.conn.reqs[1].data)
def test_password_not_in_file(self):
self.write_file(self.rc, PYPIRC_NOPASSWORD)
cmd = self._get_cmd()
cmd._set_config()
cmd.finalize_options()
cmd.send_metadata()
# dist.password should be set
# therefore used afterwards by other commands
self.assertEqual(cmd.distribution.password, 'password')
def test_registering(self):
# this test runs choice 2
cmd = self._get_cmd()
inputs = RawInputs('2', 'tarek', '[email protected]')
register_module.raw_input = inputs.__call__
try:
# let's run the command
cmd.run()
finally:
del register_module.raw_input
# we should have send a request
self.assertEqual(len(self.conn.reqs), 1)
req = self.conn.reqs[0]
headers = dict(req.headers)
self.assertEqual(headers['Content-length'], '608')
self.assertIn('tarek', req.data)
def test_password_reset(self):
# this test runs choice 3
cmd = self._get_cmd()
inputs = RawInputs('3', '[email protected]')
register_module.raw_input = inputs.__call__
try:
# let's run the command
cmd.run()
finally:
del register_module.raw_input
# we should have send a request
self.assertEqual(len(self.conn.reqs), 1)
req = self.conn.reqs[0]
headers = dict(req.headers)
self.assertEqual(headers['Content-length'], '290')
self.assertIn('tarek', req.data)
@unittest.skipUnless(docutils is not None, 'needs docutils')
def test_strict(self):
# testing the script option
# when on, the register command stops if
# the metadata is incomplete or if
# long_description is not reSt compliant
# empty metadata
cmd = self._get_cmd({})
cmd.ensure_finalized()
cmd.strict = 1
self.assertRaises(DistutilsSetupError, cmd.run)
# metadata are OK but long_description is broken
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': u'éxéxé',
'name': 'xxx', 'version': 'xxx',
'long_description': 'title\n==\n\ntext'}
cmd = self._get_cmd(metadata)
cmd.ensure_finalized()
cmd.strict = 1
self.assertRaises(DistutilsSetupError, cmd.run)
# now something that works
metadata['long_description'] = 'title\n=====\n\ntext'
cmd = self._get_cmd(metadata)
cmd.ensure_finalized()
cmd.strict = 1
inputs = RawInputs('1', 'tarek', 'y')
register_module.raw_input = inputs.__call__
# let's run the command
try:
cmd.run()
finally:
del register_module.raw_input
# strict is not by default
cmd = self._get_cmd()
cmd.ensure_finalized()
inputs = RawInputs('1', 'tarek', 'y')
register_module.raw_input = inputs.__call__
# let's run the command
try:
cmd.run()
finally:
del register_module.raw_input
# and finally a Unicode test (bug #12114)
metadata = {'url': u'xxx', 'author': u'\u00c9ric',
'author_email': u'xxx', u'name': 'xxx',
'version': u'xxx',
'description': u'Something about esszet \u00df',
'long_description': u'More things about esszet \u00df'}
cmd = self._get_cmd(metadata)
cmd.ensure_finalized()
cmd.strict = 1
inputs = RawInputs('1', 'tarek', 'y')
register_module.raw_input = inputs.__call__
# let's run the command
try:
cmd.run()
finally:
del register_module.raw_input
@unittest.skipUnless(docutils is not None, 'needs docutils')
def test_register_invalid_long_description(self):
description = ':funkie:`str`' # mimic Sphinx-specific markup
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx',
'long_description': description}
cmd = self._get_cmd(metadata)
cmd.ensure_finalized()
cmd.strict = True
inputs = RawInputs('2', 'tarek', '[email protected]')
register_module.raw_input = inputs
self.addCleanup(delattr, register_module, 'raw_input')
self.assertRaises(DistutilsSetupError, cmd.run)
def test_check_metadata_deprecated(self):
# makes sure make_metadata is deprecated
cmd = self._get_cmd()
with check_warnings() as w:
warnings.simplefilter("always")
cmd.check_metadata()
self.assertEqual(len(w.warnings), 1)
def test_suite():
return unittest.makeSuite(RegisterTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| mit |
Adriwr/Clinica | vendor/doctrine/orm/docs/en/_exts/configurationblock.py | 2577 | 3506 | #Copyright (c) 2010 Fabien Potencier
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from docutils.parsers.rst import Directive, directives
from docutils import nodes
from string import upper
class configurationblock(nodes.General, nodes.Element):
pass
class ConfigurationBlock(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
formats = {
'html': 'HTML',
'xml': 'XML',
'php': 'PHP',
'yaml': 'YAML',
'jinja': 'Twig',
'html+jinja': 'Twig',
'jinja+html': 'Twig',
'php+html': 'PHP',
'html+php': 'PHP',
'ini': 'INI',
'php-annotations': 'Annotations',
}
def run(self):
env = self.state.document.settings.env
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
entries = []
for i, child in enumerate(node):
if isinstance(child, nodes.literal_block):
# add a title (the language name) before each block
#targetid = "configuration-block-%d" % env.new_serialno('configuration-block')
#targetnode = nodes.target('', '', ids=[targetid])
#targetnode.append(child)
innernode = nodes.emphasis(self.formats[child['language']], self.formats[child['language']])
para = nodes.paragraph()
para += [innernode, child]
entry = nodes.list_item('')
entry.append(para)
entries.append(entry)
resultnode = configurationblock()
resultnode.append(nodes.bullet_list('', *entries))
return [resultnode]
def visit_configurationblock_html(self, node):
self.body.append(self.starttag(node, 'div', CLASS='configuration-block'))
def depart_configurationblock_html(self, node):
self.body.append('</div>\n')
def visit_configurationblock_latex(self, node):
pass
def depart_configurationblock_latex(self, node):
pass
def setup(app):
app.add_node(configurationblock,
html=(visit_configurationblock_html, depart_configurationblock_html),
latex=(visit_configurationblock_latex, depart_configurationblock_latex))
app.add_directive('configuration-block', ConfigurationBlock)
| mit |
laborautonomo/opps | opps/views/generic/json_views.py | 1 | 2941 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from django.http import HttpResponse
from django.views.generic import TemplateView
from django.views.generic.detail import BaseDetailView
from django.views.generic.detail import SingleObjectTemplateResponseMixin
def response_mimetype(request):
if "application/json" in request.META['HTTP_ACCEPT']:
return "application/json"
return "text/plain"
class JSONResponse(HttpResponse):
"""JSON response class."""
def __init__(self, obj='', json_opts={}, mimetype="application/json",
*args, **kwargs):
content = json.dumps(obj, **json_opts)
super(JSONResponse, self).__init__(content, mimetype, *args, **kwargs)
class JSONPResponse(HttpResponse):
"""JSONP response class."""
def __init__(self, obj='', json_opts={}, mimetype="application/jsonp",
jsonp_callback='jsonpCallback', *args, **kwargs):
_json_content = json.dumps(obj, **json_opts)
content = "{}({})".format(jsonp_callback, _json_content)
super(JSONPResponse, self).__init__(content, mimetype, *args,
**kwargs)
class JSONResponseMixin(object):
"""
A mixin that can be used to render a JSON response.
"""
HEADERS = {}
def render_to_json_response(self, context, **response_kwargs):
"""
Returns a JSON response, transforming 'context' to make the payload.
"""
response = HttpResponse(
self.convert_context_to_json(context),
content_type='application/json',
**response_kwargs
)
for key, value in self.HEADERS.items():
response[key] = value
return response
def convert_context_to_json(self, context):
"Convert the context dictionary into a JSON object"
# Note: This is *EXTREMELY* naive; in reality, you'll need
# to do much more complex handling to ensure that arbitrary
# objects -- such as Django model instances or querysets
# -- can be serialized as JSON.
return json.dumps(context)
class JSONView(JSONResponseMixin, TemplateView):
def render_to_response(self, context, **response_kwargs):
return self.render_to_json_response(context, **response_kwargs)
class JSONDetailView(JSONResponseMixin, BaseDetailView):
def render_to_response(self, context, **response_kwargs):
return self.render_to_json_response(context, **response_kwargs)
class HybridDetailView(JSONResponseMixin,
SingleObjectTemplateResponseMixin,
BaseDetailView):
def render_to_response(self, context):
# Look for a 'format=json' GET argument
if self.request.GET.get('format') == 'json':
return self.render_to_json_response(context)
else:
return super(HybridDetailView, self).render_to_response(context)
| mit |
AntonioJBT/CGATPipeline_core | CGATPipelines/Pipeline/Utils.py | 2 | 2223 | """Utils.py - Utilities for ruffus pipelines
============================================
Reference
---------
"""
import inspect
import sys
def isTest():
"""return True if the pipeline is run in a "testing" mode.
This method checks if ``-is-test`` has been given as a
command line option.
"""
return "--is-test" in sys.argv
def getCallerLocals(decorators=0):
'''returns the locals of the calling function.
from http://pylab.blogspot.com/2009/02/python-accessing-caller-locals-from.html
Arguments
---------
decorators : int
Number of contexts to go up to reach calling function
of interest.
Returns
-------
locals : dict
Dictionary of variable defined in the context of the
calling function.
'''
f = sys._getframe(2 + decorators)
args = inspect.getargvalues(f)
return args[3]
def getCaller(decorators=0):
"""return the name of the calling module.
Arguments
---------
decorators : int
Number of contexts to go up to reach calling function
of interest.
Returns
-------
mod : object
The calling module
"""
frm = inspect.stack()[2 + decorators]
mod = inspect.getmodule(frm[0])
return mod
def add_doc(value, replace=False):
"""add doc string of value to function that is decorated.
The original doc-string is added as the first paragraph(s)
inside the new doc-string.
Parameter
---------
replace : bool
If True, replace documentation rather than appending
"""
def _doc(func):
if func.__doc__:
lines = value.__doc__.split("\n")
for x, line in enumerate(lines):
if line.strip() == "":
break
# insert appropriate indentiation
# currently hard-coded, can be derived
# from doc string?
if not replace:
lines.insert(x+1, " " * 4 +
func.__doc__)
func.__doc__ = "\n".join(lines)
else:
func.__doc__ = value.__doc__
else:
func.__doc__ = value.__doc__
return func
return _doc
| mit |
joshmoore/zeroc-ice | java/test/Ice/operations/run.py | 1 | 1403 | #!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2011 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys
path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise "can't find toplevel directory!"
sys.path.append(os.path.join(path[0]))
from scripts import *
print "tests with regular server."
TestUtil.clientServerTest(additionalClientOptions = "--Ice.Warn.AMICallback=0")
print "tests with AMD server."
TestUtil.clientServerTest(additionalClientOptions = "--Ice.Warn.AMICallback=0", server="test.Ice.operations.AMDServer")
print "tests with TIE server."
TestUtil.clientServerTest(additionalClientOptions = "--Ice.Warn.AMICallback=0", server="test.Ice.operations.TieServer")
print "tests with AMD TIE server."
TestUtil.clientServerTest(additionalClientOptions = "--Ice.Warn.AMICallback=0", server="test.Ice.operations.AMDTieServer")
print "tests with collocated server."
TestUtil.collocatedTest()
| gpl-2.0 |
EliteTK/qutebrowser | qutebrowser/browser/webelem.py | 2 | 12858 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Generic web element related code.
Module attributes:
Group: Enum for different kinds of groups.
SELECTORS: CSS selectors for different groups of elements.
FILTERS: A dictionary of filter functions for the modes.
The filter for "links" filters javascript:-links and a-tags
without "href".
"""
import collections.abc
from PyQt5.QtCore import QUrl, Qt, QEvent, QTimer
from PyQt5.QtGui import QMouseEvent
from qutebrowser.config import config
from qutebrowser.utils import log, usertypes, utils, qtutils
Group = usertypes.enum('Group', ['all', 'links', 'images', 'url', 'prevnext',
'inputs'])
SELECTORS = {
Group.all: ('a, area, textarea, select, input:not([type=hidden]), button, '
'frame, iframe, link, [onclick], [onmousedown], [role=link], '
'[role=option], [role=button], img'),
Group.links: 'a, area, link, [role=link]',
Group.images: 'img',
Group.url: '[src], [href]',
Group.prevnext: 'a, area, button, link, [role=button]',
Group.inputs: ('input[type=text], input[type=email], input[type=url], '
'input[type=tel], input[type=number], '
'input[type=password], input[type=search], '
'input:not([type]), textarea'),
}
def filter_links(elem):
return 'href' in elem and QUrl(elem['href']).scheme() != 'javascript'
FILTERS = {
Group.links: filter_links,
Group.prevnext: filter_links,
}
class Error(Exception):
"""Base class for WebElement errors."""
pass
class AbstractWebElement(collections.abc.MutableMapping):
"""A wrapper around QtWebKit/QtWebEngine web element.
Attributes:
tab: The tab associated with this element.
"""
def __init__(self, tab):
self._tab = tab
def __eq__(self, other):
raise NotImplementedError
def __str__(self):
return self.text()
def __getitem__(self, key):
raise NotImplementedError
def __setitem__(self, key, val):
raise NotImplementedError
def __delitem__(self, key):
raise NotImplementedError
def __iter__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __repr__(self):
try:
html = utils.compact_text(self.outer_xml(), 500)
except Error:
html = None
return utils.get_repr(self, html=html)
def has_frame(self):
"""Check if this element has a valid frame attached."""
raise NotImplementedError
def geometry(self):
"""Get the geometry for this element."""
raise NotImplementedError
def style_property(self, name, *, strategy):
"""Get the element style resolved with the given strategy."""
raise NotImplementedError
def classes(self):
"""Get a list of classes assigned to this element."""
raise NotImplementedError
def tag_name(self):
"""Get the tag name of this element.
The returned name will always be lower-case.
"""
raise NotImplementedError
def outer_xml(self):
"""Get the full HTML representation of this element."""
raise NotImplementedError
def text(self, *, use_js=False):
"""Get the plain text content for this element.
Args:
use_js: Whether to use javascript if the element isn't
content-editable.
"""
# FIXME:qtwebengine what to do about use_js with WebEngine?
raise NotImplementedError
def set_text(self, text, *, use_js=False):
"""Set the given plain text.
Args:
use_js: Whether to use javascript if the element isn't
content-editable.
"""
# FIXME:qtwebengine what to do about use_js with WebEngine?
raise NotImplementedError
def insert_text(self, text):
"""Insert the given text into the element."""
raise NotImplementedError
def rect_on_view(self, *, elem_geometry=None, no_js=False):
"""Get the geometry of the element relative to the webview.
Uses the getClientRects() JavaScript method to obtain the collection of
rectangles containing the element and returns the first rectangle which
is large enough (larger than 1px times 1px). If all rectangles returned
by getClientRects() are too small, falls back to elem.rect_on_view().
Skipping of small rectangles is due to <a> elements containing other
elements with "display:block" style, see
https://github.com/The-Compiler/qutebrowser/issues/1298
Args:
elem_geometry: The geometry of the element, or None.
Calling QWebElement::geometry is rather expensive so
we want to avoid doing it twice.
no_js: Fall back to the Python implementation
"""
raise NotImplementedError
def is_writable(self):
"""Check whether an element is writable."""
return not ('disabled' in self or 'readonly' in self)
def is_content_editable(self):
"""Check if an element has a contenteditable attribute.
Args:
elem: The QWebElement to check.
Return:
True if the element has a contenteditable attribute,
False otherwise.
"""
try:
return self['contenteditable'].lower() not in ['false', 'inherit']
except KeyError:
return False
def _is_editable_object(self):
"""Check if an object-element is editable."""
if 'type' not in self:
log.webelem.debug("<object> without type clicked...")
return False
objtype = self['type'].lower()
if objtype.startswith('application/') or 'classid' in self:
# Let's hope flash/java stuff has an application/* mimetype OR
# at least a classid attribute. Oh, and let's hope images/...
# DON'T have a classid attribute. HTML sucks.
log.webelem.debug("<object type='{}'> clicked.".format(objtype))
return config.get('input', 'insert-mode-on-plugins')
else:
# Image/Audio/...
return False
def _is_editable_input(self):
"""Check if an input-element is editable.
Return:
True if the element is editable, False otherwise.
"""
try:
objtype = self['type'].lower()
except KeyError:
return self.is_writable()
else:
if objtype in ['text', 'email', 'url', 'tel', 'number', 'password',
'search']:
return self.is_writable()
else:
return False
def _is_editable_div(self):
"""Check if a div-element is editable.
Return:
True if the element is editable, False otherwise.
"""
# Beginnings of div-classes which are actually some kind of editor.
div_classes = ('CodeMirror', # Javascript editor over a textarea
'kix-', # Google Docs editor
'ace_') # http://ace.c9.io/
for klass in self.classes():
if any([klass.startswith(e) for e in div_classes]):
return True
return False
def is_editable(self, strict=False):
"""Check whether we should switch to insert mode for this element.
Args:
strict: Whether to do stricter checking so only fields where we can
get the value match, for use with the :editor command.
Return:
True if we should switch to insert mode, False otherwise.
"""
roles = ('combobox', 'textbox')
log.webelem.debug("Checking if element is editable: {}".format(
repr(self)))
tag = self.tag_name()
if self.is_content_editable() and self.is_writable():
return True
elif self.get('role', None) in roles and self.is_writable():
return True
elif tag == 'input':
return self._is_editable_input()
elif tag == 'textarea':
return self.is_writable()
elif tag in ['embed', 'applet']:
# Flash/Java/...
return config.get('input', 'insert-mode-on-plugins') and not strict
elif tag == 'object':
return self._is_editable_object() and not strict
elif tag == 'div':
return self._is_editable_div() and not strict
else:
return False
def is_text_input(self):
"""Check if this element is some kind of text box."""
roles = ('combobox', 'textbox')
tag = self.tag_name()
return self.get('role', None) in roles or tag in ['input', 'textarea']
def remove_blank_target(self):
"""Remove target from link."""
raise NotImplementedError
def resolve_url(self, baseurl):
"""Resolve the URL in the element's src/href attribute.
Args:
baseurl: The URL to base relative URLs on as QUrl.
Return:
A QUrl with the absolute URL, or None.
"""
if baseurl.isRelative():
raise ValueError("Need an absolute base URL!")
for attr in ['href', 'src']:
if attr in self:
text = self[attr].strip()
break
else:
return None
url = QUrl(text)
if not url.isValid():
return None
if url.isRelative():
url = baseurl.resolved(url)
qtutils.ensure_valid(url)
return url
def _mouse_pos(self):
"""Get the position to click/hover."""
# Click the center of the largest square fitting into the top/left
# corner of the rectangle, this will help if part of the <a> element
# is hidden behind other elements
# https://github.com/The-Compiler/qutebrowser/issues/1005
rect = self.rect_on_view()
if rect.width() > rect.height():
rect.setWidth(rect.height())
else:
rect.setHeight(rect.width())
pos = rect.center()
if pos.x() < 0 or pos.y() < 0:
raise Error("Element position is out of view!")
return pos
def click(self, click_target):
"""Simulate a click on the element."""
# FIXME:qtwebengine do we need this?
# self._widget.setFocus()
self._tab.data.override_target = click_target
pos = self._mouse_pos()
log.webelem.debug("Sending fake click to {!r} at position {} with "
"target {}".format(self, pos, click_target))
if click_target in [usertypes.ClickTarget.tab,
usertypes.ClickTarget.tab_bg,
usertypes.ClickTarget.window]:
modifiers = Qt.ControlModifier
else:
modifiers = Qt.NoModifier
events = [
QMouseEvent(QEvent.MouseMove, pos, Qt.NoButton, Qt.NoButton,
Qt.NoModifier),
QMouseEvent(QEvent.MouseButtonPress, pos, Qt.LeftButton,
Qt.LeftButton, modifiers),
QMouseEvent(QEvent.MouseButtonRelease, pos, Qt.LeftButton,
Qt.NoButton, modifiers),
]
for evt in events:
self._tab.send_event(evt)
def after_click():
"""Move cursor to end and reset override_target after clicking."""
if self.is_text_input() and self.is_editable():
self._tab.caret.move_to_end_of_document()
self._tab.data.override_target = None
QTimer.singleShot(0, after_click)
def hover(self):
"""Simulate a mouse hover over the element."""
pos = self._mouse_pos()
event = QMouseEvent(QEvent.MouseMove, pos, Qt.NoButton, Qt.NoButton,
Qt.NoModifier)
self._tab.send_event(event)
| gpl-3.0 |
mdavid/horizon | openstack_dashboard/dashboards/project/stacks/api.py | 95 | 2863 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from openstack_dashboard.api import heat
from openstack_dashboard.dashboards.project.stacks import mappings
from openstack_dashboard.dashboards.project.stacks import sro
class Stack(object):
pass
def d3_data(request, stack_id=''):
try:
stack = heat.stack_get(request, stack_id)
except Exception:
stack = Stack()
stack.id = stack_id
stack.stack_name = request.session.get('stack_name', '')
stack.stack_status = 'DELETE_COMPLETE'
stack.stack_status_reason = 'DELETE_COMPLETE'
try:
resources = heat.resources_list(request, stack.stack_name)
except Exception:
resources = []
d3_data = {"nodes": [], "stack": {}}
if stack:
stack_image = mappings.get_resource_image(stack.stack_status, 'stack')
stack_node = {
'stack_id': stack.id,
'name': stack.stack_name,
'status': stack.stack_status,
'image': stack_image,
'image_size': 60,
'image_x': -30,
'image_y': -30,
'text_x': 40,
'text_y': ".35em",
'in_progress': (stack.status == 'IN_PROGRESS'),
'info_box': sro.stack_info(stack, stack_image)
}
d3_data['stack'] = stack_node
if resources:
for resource in resources:
resource_image = mappings.get_resource_image(
resource.resource_status,
resource.resource_type)
resource_status = mappings.get_resource_status(
resource.resource_status)
if resource_status in ('IN_PROGRESS', 'INIT'):
in_progress = True
else:
in_progress = False
resource_node = {
'name': resource.resource_name,
'status': resource.resource_status,
'image': resource_image,
'required_by': resource.required_by,
'image_size': 50,
'image_x': -25,
'image_y': -25,
'text_x': 35,
'text_y': ".35em",
'in_progress': in_progress,
'info_box': sro.resource_info(resource)
}
d3_data['nodes'].append(resource_node)
return json.dumps(d3_data)
| apache-2.0 |
huziyizero/godot | platform/osx/detect.py | 12 | 3271 |
import os
import sys
def is_active():
return True
def get_name():
return "OSX"
def can_build():
if (sys.platform == "darwin" or os.environ.has_key("OSXCROSS_ROOT")):
return True
return False
def get_opts():
return [
('force_64_bits','Force 64 bits binary','no'),
('osxcross_sdk','OSXCross SDK version','darwin14'),
]
def get_flags():
return [
('legacygl', 'yes'),
('builtin_zlib', 'no'),
('glew', 'yes'),
]
def configure(env):
env.Append(CPPPATH=['#platform/osx'])
if (env["bits"]=="default"):
env["bits"]="32"
if (env["target"]=="release"):
env.Append(CCFLAGS=['-O2','-ffast-math','-fomit-frame-pointer','-ftree-vectorize','-msse2'])
elif (env["target"]=="release_debug"):
env.Append(CCFLAGS=['-O2','-DDEBUG_ENABLED'])
elif (env["target"]=="debug"):
env.Append(CCFLAGS=['-g3', '-Wall','-DDEBUG_ENABLED','-DDEBUG_MEMORY_ENABLED'])
if (not os.environ.has_key("OSXCROSS_ROOT")):
#regular native build
if (env["bits"]=="64"):
env.Append(CCFLAGS=['-arch', 'x86_64'])
env.Append(LINKFLAGS=['-arch', 'x86_64'])
elif (env["bits"]=="32"):
env.Append(CCFLAGS=['-arch', 'i386'])
env.Append(LINKFLAGS=['-arch', 'i386'])
else:
env.Append(CCFLAGS=['-arch', 'i386', '-arch', 'x86_64'])
env.Append(LINKFLAGS=['-arch', 'i386', '-arch', 'x86_64'])
else:
#osxcross build
root=os.environ.get("OSXCROSS_ROOT",0)
if env["bits"]=="64":
basecmd=root+"/target/bin/x86_64-apple-"+env["osxcross_sdk"]+"-"
else:
basecmd=root+"/target/bin/i386-apple-"+env["osxcross_sdk"]+"-"
env['CC'] = basecmd+"cc"
env['CXX'] = basecmd+"c++"
env['AR'] = basecmd+"ar"
env['RANLIB'] = basecmd+"ranlib"
env['AS'] = basecmd+"as"
# env.Append(CPPPATH=['#platform/osx/include/freetype2', '#platform/osx/include'])
# env.Append(LIBPATH=['#platform/osx/lib'])
env.Append(CPPFLAGS=["-DAPPLE_STYLE_KEYS"])
env.Append(CPPFLAGS=['-DUNIX_ENABLED','-DGLES2_ENABLED','-DOSX_ENABLED'])
env.Append(LIBS=['pthread'])
#env.Append(CPPFLAGS=['-F/Developer/SDKs/MacOSX10.4u.sdk/System/Library/Frameworks', '-isysroot', '/Developer/SDKs/MacOSX10.4u.sdk', '-mmacosx-version-min=10.4'])
#env.Append(LINKFLAGS=['-mmacosx-version-min=10.4', '-isysroot', '/Developer/SDKs/MacOSX10.4u.sdk', '-Wl,-syslibroot,/Developer/SDKs/MacOSX10.4u.sdk'])
env.Append(LINKFLAGS=['-framework', 'Cocoa', '-framework', 'Carbon', '-framework', 'OpenGL', '-framework', 'AGL', '-framework', 'AudioUnit','-lz'])
if (env["CXX"]=="clang++"):
env.Append(CPPFLAGS=['-DTYPED_METHOD_BIND'])
env["CC"]="clang"
env["LD"]="clang++"
if (env["colored"]=="yes"):
if sys.stdout.isatty():
env.Append(CPPFLAGS=["-fcolor-diagnostics"])
import methods
env.Append( BUILDERS = { 'GLSL120' : env.Builder(action = methods.build_legacygl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'GLSL' : env.Builder(action = methods.build_glsl_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
env.Append( BUILDERS = { 'GLSL120GLES' : env.Builder(action = methods.build_gles2_headers, suffix = 'glsl.h',src_suffix = '.glsl') } )
#env.Append( BUILDERS = { 'HLSL9' : env.Builder(action = methods.build_hlsl_dx9_headers, suffix = 'hlsl.h',src_suffix = '.hlsl') } )
env["x86_opt_gcc"]=True
| mit |
dmoliveira/networkx | networkx/algorithms/tree/tests/test_recognition.py | 54 | 4061 |
from nose.tools import *
import networkx as nx
class TestTreeRecognition(object):
graph = nx.Graph
multigraph = nx.MultiGraph
def setUp(self):
self.T1 = self.graph()
self.T2 = self.graph()
self.T2.add_node(1)
self.T3 = self.graph()
self.T3.add_nodes_from(range(5))
edges = [(i,i+1) for i in range(4)]
self.T3.add_edges_from(edges)
self.T5 = self.multigraph()
self.T5.add_nodes_from(range(5))
edges = [(i,i+1) for i in range(4)]
self.T5.add_edges_from(edges)
self.T6 = self.graph()
self.T6.add_nodes_from([6,7])
self.T6.add_edge(6,7)
self.F1 = nx.compose(self.T6, self.T3)
self.N4 = self.graph()
self.N4.add_node(1)
self.N4.add_edge(1,1)
self.N5 = self.graph()
self.N5.add_nodes_from(range(5))
self.N6 = self.graph()
self.N6.add_nodes_from(range(3))
self.N6.add_edges_from([(0,1),(1,2),(2,0)])
self.NF1 = nx.compose(self.T6,self.N6)
@raises(nx.NetworkXPointlessConcept)
def test_null_tree(self):
nx.is_tree(self.graph())
nx.is_tree(self.multigraph())
@raises(nx.NetworkXPointlessConcept)
def test_null_forest(self):
nx.is_forest(self.graph())
nx.is_forest(self.multigraph())
def test_is_tree(self):
assert_true(nx.is_tree(self.T2))
assert_true(nx.is_tree(self.T3))
assert_true(nx.is_tree(self.T5))
def test_is_not_tree(self):
assert_false(nx.is_tree(self.N4))
assert_false(nx.is_tree(self.N5))
assert_false(nx.is_tree(self.N6))
def test_is_forest(self):
assert_true(nx.is_forest(self.T2))
assert_true(nx.is_forest(self.T3))
assert_true(nx.is_forest(self.T5))
assert_true(nx.is_forest(self.F1))
assert_true(nx.is_forest(self.N5))
def test_is_not_forest(self):
assert_false(nx.is_forest(self.N4))
assert_false(nx.is_forest(self.N6))
assert_false(nx.is_forest(self.NF1))
class TestDirectedTreeRecognition(TestTreeRecognition):
graph = nx.DiGraph
multigraph = nx.MultiDiGraph
def test_disconnected_graph():
# https://github.com/networkx/networkx/issues/1144
G = nx.Graph()
G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)])
assert_false(nx.is_tree(G))
G = nx.DiGraph()
G.add_edges_from([(0, 1), (1, 2), (2, 0), (3, 4)])
assert_false(nx.is_tree(G))
def test_dag_nontree():
G = nx.DiGraph()
G.add_edges_from([(0,1), (0,2), (1,2)])
assert_false(nx.is_tree(G))
assert_true(nx.is_directed_acyclic_graph(G))
def test_multicycle():
G = nx.MultiDiGraph()
G.add_edges_from([(0,1), (0,1)])
assert_false(nx.is_tree(G))
assert_true(nx.is_directed_acyclic_graph(G))
def test_emptybranch():
G = nx.DiGraph()
G.add_nodes_from(range(10))
assert_true(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
def test_path():
G = nx.DiGraph()
G.add_path(range(5))
assert_true(nx.is_branching(G))
assert_true(nx.is_arborescence(G))
def test_notbranching1():
# Acyclic violation.
G = nx.MultiDiGraph()
G.add_nodes_from(range(10))
G.add_edges_from([(0,1),(1,0)])
assert_false(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
def test_notbranching2():
# In-degree violation.
G = nx.MultiDiGraph()
G.add_nodes_from(range(10))
G.add_edges_from([(0,1),(0,2),(3,2)])
assert_false(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
def test_notarborescence1():
# Not an arborescence due to not spanning.
G = nx.MultiDiGraph()
G.add_nodes_from(range(10))
G.add_edges_from([(0,1),(0,2),(1,3),(5,6)])
assert_true(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
def test_notarborescence2():
# Not an arborescence due to in-degree violation.
G = nx.MultiDiGraph()
G.add_path(range(5))
G.add_edge(6, 4)
assert_false(nx.is_branching(G))
assert_false(nx.is_arborescence(G))
| bsd-3-clause |
vivekanand1101/neutron | neutron/agent/rpc.py | 22 | 8851 | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from datetime import datetime
import itertools
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import uuidutils
from neutron.common import constants
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.i18n import _LW
LOG = logging.getLogger(__name__)
def create_consumers(endpoints, prefix, topic_details, start_listening=True):
"""Create agent RPC consumers.
:param endpoints: The list of endpoints to process the incoming messages.
:param prefix: Common prefix for the plugin/agent message queues.
:param topic_details: A list of topics. Each topic has a name, an
operation, and an optional host param keying the
subscription to topic.host for plugin calls.
:param start_listening: if True, it starts the processing loop
:returns: A common Connection.
"""
connection = n_rpc.create_connection(new=True)
for details in topic_details:
topic, operation, node_name = itertools.islice(
itertools.chain(details, [None]), 3)
topic_name = topics.get_topic_name(prefix, topic, operation)
connection.create_consumer(topic_name, endpoints, fanout=True)
if node_name:
node_topic_name = '%s.%s' % (topic_name, node_name)
connection.create_consumer(node_topic_name,
endpoints,
fanout=False)
if start_listening:
connection.consume_in_threads()
return connection
class PluginReportStateAPI(object):
"""RPC client used to report state back to plugin.
This class implements the client side of an rpc interface. The server side
can be found in neutron.db.agents_db.AgentExtRpcCallback. For more
information on changing rpc interfaces, see doc/source/devref/rpc_api.rst.
"""
def __init__(self, topic):
target = oslo_messaging.Target(topic=topic, version='1.0',
namespace=constants.RPC_NAMESPACE_STATE)
self.client = n_rpc.get_client(target)
def report_state(self, context, agent_state, use_call=False):
cctxt = self.client.prepare()
# add unique identifier to a report
# that can be logged on server side.
# This create visible correspondence between events on
# the agent and on the server
agent_state['uuid'] = uuidutils.generate_uuid()
kwargs = {
'agent_state': {'agent_state': agent_state},
'time': datetime.utcnow().strftime(constants.ISO8601_TIME_FORMAT),
}
method = cctxt.call if use_call else cctxt.cast
return method(context, 'report_state', **kwargs)
class PluginApi(object):
'''Agent side of the rpc API.
API version history:
1.0 - Initial version.
1.3 - get_device_details rpc signature upgrade to obtain 'host' and
return value to include fixed_ips and device_owner for
the device port
1.4 - tunnel_sync rpc signature upgrade to obtain 'host'
1.5 - Support update_device_list and
get_devices_details_list_and_failed_devices
'''
def __init__(self, topic):
target = oslo_messaging.Target(topic=topic, version='1.0')
self.client = n_rpc.get_client(target)
def get_device_details(self, context, device, agent_id, host=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'get_device_details', device=device,
agent_id=agent_id, host=host)
def get_devices_details_list(self, context, devices, agent_id, host=None):
try:
cctxt = self.client.prepare(version='1.3')
res = cctxt.call(context, 'get_devices_details_list',
devices=devices, agent_id=agent_id, host=host)
except oslo_messaging.UnsupportedVersion:
# If the server has not been upgraded yet, a DVR-enabled agent
# may not work correctly, however it can function in 'degraded'
# mode, in that DVR routers may not be in the system yet, and
# it might be not necessary to retrieve info about the host.
LOG.warn(_LW('DVR functionality requires a server upgrade.'))
res = [
self.get_device_details(context, device, agent_id, host)
for device in devices
]
return res
def get_devices_details_list_and_failed_devices(self, context, devices,
agent_id, host=None):
"""Get devices details and the list of devices that failed.
This method returns the devices details. If an error is thrown when
retrieving the devices details, the device is put in a list of
failed devices.
"""
try:
cctxt = self.client.prepare(version='1.5')
res = cctxt.call(
context,
'get_devices_details_list_and_failed_devices',
devices=devices, agent_id=agent_id, host=host)
except oslo_messaging.UnsupportedVersion:
#TODO(rossella_s): Remove this failback logic in M
res = self._device_list_rpc_call_with_failed_dev(
self.get_device_details, context, agent_id, host, devices)
return res
def update_device_down(self, context, device, agent_id, host=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'update_device_down', device=device,
agent_id=agent_id, host=host)
def update_device_up(self, context, device, agent_id, host=None):
cctxt = self.client.prepare()
return cctxt.call(context, 'update_device_up', device=device,
agent_id=agent_id, host=host)
def _device_list_rpc_call_with_failed_dev(self, rpc_call, context,
agent_id, host, devices):
succeeded_devices = []
failed_devices = []
for device in devices:
try:
rpc_device = rpc_call(context, device, agent_id, host)
except Exception:
failed_devices.append(device)
else:
# update_device_up doesn't return the device
succeeded_dev = rpc_device or device
succeeded_devices.append(succeeded_dev)
return {'devices': succeeded_devices, 'failed_devices': failed_devices}
def update_device_list(self, context, devices_up, devices_down,
agent_id, host):
try:
cctxt = self.client.prepare(version='1.5')
res = cctxt.call(context, 'update_device_list',
devices_up=devices_up, devices_down=devices_down,
agent_id=agent_id, host=host)
except oslo_messaging.UnsupportedVersion:
#TODO(rossella_s): Remove this failback logic in M
dev_up = self._device_list_rpc_call_with_failed_dev(
self.update_device_up, context, agent_id, host, devices_up)
dev_down = self._device_list_rpc_call_with_failed_dev(
self.update_device_down, context, agent_id, host, devices_down)
res = {'devices_up': dev_up.get('devices'),
'failed_devices_up': dev_up.get('failed_devices'),
'devices_down': dev_down.get('devices'),
'failed_devices_down': dev_down.get('failed_devices')}
return res
def tunnel_sync(self, context, tunnel_ip, tunnel_type=None, host=None):
try:
cctxt = self.client.prepare(version='1.4')
res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type, host=host)
except oslo_messaging.UnsupportedVersion:
LOG.warn(_LW('Tunnel synchronization requires a server upgrade.'))
cctxt = self.client.prepare()
res = cctxt.call(context, 'tunnel_sync', tunnel_ip=tunnel_ip,
tunnel_type=tunnel_type)
return res
| apache-2.0 |
gbosh/mezzanine | mezzanine/urls.py | 1 | 3462 | """
This is the main ``urlconf`` for Mezzanine - it sets up patterns for
all the various Mezzanine apps, third-party apps like Grappelli and
filebrowser.
"""
from django.conf.urls.defaults import patterns, include
from django.contrib import admin
from django.contrib.admin.sites import NotRegistered
from django.http import HttpResponse
from mezzanine.conf import settings
from mezzanine.core.sitemaps import DisplayableSitemap
# Remove unwanted models from the admin that are installed by default with
# third-party apps.
for model in settings.ADMIN_REMOVAL:
try:
model = tuple(model.rsplit(".", 1))
exec "from %s import %s" % model
except ImportError:
pass
else:
try:
admin.site.unregister(eval(model[1]))
except NotRegistered:
pass
urlpatterns = []
# Django's sitemap app.
if "django.contrib.sitemaps" in settings.INSTALLED_APPS:
sitemaps = {"sitemaps": {"all": DisplayableSitemap}}
urlpatterns += patterns("django.contrib.sitemaps.views",
("^sitemap\.xml$", "sitemap", sitemaps)
)
# Return a robots.txt that disallows all spiders when DEBUG is True.
if getattr(settings, "DEBUG", False):
urlpatterns += patterns("",
("^robots.txt$", lambda r: HttpResponse("User-agent: *\nDisallow: /",
mimetype="text/plain")),
)
# Filebrowser admin media library.
if getattr(settings, "PACKAGE_NAME_FILEBROWSER") in settings.INSTALLED_APPS:
urlpatterns += patterns("",
("^admin/media-library/", include("%s.urls" %
settings.PACKAGE_NAME_FILEBROWSER)),
)
# Miscellanous Mezzanine patterns.
urlpatterns += patterns("",
("^", include("mezzanine.core.urls")),
("^", include("mezzanine.generic.urls")),
)
# Mezzanine's Blog app.
blog_installed = "mezzanine.blog" in settings.INSTALLED_APPS
if blog_installed:
BLOG_SLUG = settings.BLOG_SLUG.rstrip("/")
blog_patterns = patterns("",
("^%s" % BLOG_SLUG, include("mezzanine.blog.urls")),
)
urlpatterns += blog_patterns
# Mezzanine's Accounts app
_old_accounts_enabled = getattr(settings, "ACCOUNTS_ENABLED", False)
if _old_accounts_enabled:
import warnings
warnings.warn("The setting ACCOUNTS_ENABLED is deprecated. Please "
"add mezzanine.accounts to INSTALLED_APPS.")
if _old_accounts_enabled or "mezzanine.accounts" in settings.INSTALLED_APPS:
# We don't define a URL prefix here such as /account/ since we want
# to honour the LOGIN_* settings, which Django has prefixed with
# /account/ by default. So those settings are used in accounts.urls
urlpatterns += patterns("",
("^", include("mezzanine.accounts.urls")),
)
# Mezzanine's Pages app.
PAGES_SLUG = ""
if "mezzanine.pages" in settings.INSTALLED_APPS:
# No BLOG_SLUG means catch-all patterns belong to the blog,
# so give pages their own prefix and inject them before the
# blog urlpatterns.
if blog_installed and not BLOG_SLUG:
PAGES_SLUG = getattr(settings, "PAGES_SLUG", "pages").strip("/") + "/"
blog_patterns_start = urlpatterns.index(blog_patterns[0])
urlpatterns[blog_patterns_start:len(blog_patterns)] = patterns("",
("^%s" % unicode(PAGES_SLUG), include("mezzanine.pages.urls")),
)
else:
urlpatterns += patterns("",
("^", include("mezzanine.pages.urls")),
)
| bsd-2-clause |
bgxavier/nova | nova/virt/ironic/ironic_states.py | 36 | 4259 | # Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Mapping of bare metal node states.
Setting the node `power_state` is handled by the conductor's power
synchronization thread. Based on the power state retrieved from the driver
for the node, the state is set to POWER_ON or POWER_OFF, accordingly.
Should this fail, the `power_state` value is left unchanged, and the node
is placed into maintenance mode.
The `power_state` can also be set manually via the API. A failure to change
the state leaves the current state unchanged. The node is NOT placed into
maintenance mode in this case.
"""
#####################
# Provisioning states
#####################
NOSTATE = None
""" No state information.
This state is used with power_state to represent a lack of knowledge of
power state, and in target_*_state fields when there is no target.
Prior to the Kilo release, Ironic set node.provision_state to NOSTATE
when the node was available for provisioning. During Kilo cycle, this was
changed to the AVAILABLE state.
"""
MANAGEABLE = 'manageable'
""" Node is in a manageable state.
This state indicates that Ironic has verified, at least once, that it had
sufficient information to manage the hardware. While in this state, the node
is not available for provisioning (it must be in the AVAILABLE state for that).
"""
AVAILABLE = 'available'
""" Node is available for use and scheduling.
This state is replacing the NOSTATE state used prior to Kilo.
"""
ACTIVE = 'active'
""" Node is successfully deployed and associated with an instance. """
DEPLOYWAIT = 'wait call-back'
""" Node is waiting to be deployed.
This will be the node `provision_state` while the node is waiting for
the driver to finish deployment.
"""
DEPLOYING = 'deploying'
""" Node is ready to receive a deploy request, or is currently being deployed.
A node will have its `provision_state` set to DEPLOYING briefly before it
receives its initial deploy request. It will also move to this state from
DEPLOYWAIT after the callback is triggered and deployment is continued
(disk partitioning and image copying).
"""
DEPLOYFAIL = 'deploy failed'
""" Node deployment failed. """
DEPLOYDONE = 'deploy complete'
""" Node was successfully deployed.
This is mainly a target provision state used during deployment. A successfully
deployed node should go to ACTIVE status.
"""
DELETING = 'deleting'
""" Node is actively being torn down. """
DELETED = 'deleted'
""" Node tear down was successful.
In Juno, target_provision_state was set to this value during node tear down.
In Kilo, this will be a transitory value of provision_state, and never
represented in target_provision_state.
"""
CLEANING = 'cleaning'
""" Node is being automatically cleaned to prepare it for provisioning. """
CLEANFAIL = 'clean failed'
""" Node failed cleaning. This requires operator intervention to resolve. """
ERROR = 'error'
""" An error occurred during node processing.
The `last_error` attribute of the node details should contain an error message.
"""
REBUILD = 'rebuild'
""" Node is to be rebuilt.
This is not used as a state, but rather as a "verb" when changing the node's
provision_state via the REST API.
"""
INSPECTING = 'inspecting'
""" Node is under inspection.
This is the provision state used when inspection is started. A successfully
inspected node shall transition to MANAGEABLE status.
"""
INSPECTFAIL = 'inspect failed'
""" Node inspection failed. """
##############
# Power states
##############
POWER_ON = 'power on'
""" Node is powered on. """
POWER_OFF = 'power off'
""" Node is powered off. """
REBOOT = 'rebooting'
""" Node is rebooting. """
| apache-2.0 |
un33k/CouchPotatoServer | libs/suds/builder.py | 197 | 4220 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The I{builder} module provides an wsdl/xsd defined types factory
"""
from logging import getLogger
from suds import *
from suds.sudsobject import Factory
log = getLogger(__name__)
class Builder:
""" Builder used to construct an object for types defined in the schema """
def __init__(self, resolver):
"""
@param resolver: A schema object name resolver.
@type resolver: L{resolver.Resolver}
"""
self.resolver = resolver
def build(self, name):
""" build a an object for the specified typename as defined in the schema """
if isinstance(name, basestring):
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
else:
type = name
cls = type.name
if type.mixed():
data = Factory.property(cls)
else:
data = Factory.object(cls)
resolved = type.resolve()
md = data.__metadata__
md.sxtype = resolved
md.ordering = self.ordering(resolved)
history = []
self.add_attributes(data, resolved)
for child, ancestry in type.children():
if self.skip_child(child, ancestry):
continue
self.process(data, child, history[:])
return data
def process(self, data, type, history):
""" process the specified type then process its children """
if type in history:
return
if type.enum():
return
history.append(type)
resolved = type.resolve()
value = None
if type.unbounded():
value = []
else:
if len(resolved) > 0:
if resolved.mixed():
value = Factory.property(resolved.name)
md = value.__metadata__
md.sxtype = resolved
else:
value = Factory.object(resolved.name)
md = value.__metadata__
md.sxtype = resolved
md.ordering = self.ordering(resolved)
setattr(data, type.name, value)
if value is not None:
data = value
if not isinstance(data, list):
self.add_attributes(data, resolved)
for child, ancestry in resolved.children():
if self.skip_child(child, ancestry):
continue
self.process(data, child, history[:])
def add_attributes(self, data, type):
""" add required attributes """
for attr, ancestry in type.attributes():
name = '_%s' % attr.name
value = attr.get_default()
setattr(data, name, value)
def skip_child(self, child, ancestry):
""" get whether or not to skip the specified child """
if child.any(): return True
for x in ancestry:
if x.choice():
return True
return False
def ordering(self, type):
""" get the ordering """
result = []
for child, ancestry in type.resolve():
name = child.name
if child.name is None:
continue
if child.isattr():
name = '_%s' % child.name
result.append(name)
return result
| gpl-3.0 |
great-expectations/great_expectations | tests/test_ge_utils.py | 1 | 11911 | import copy
import os
import pytest
import great_expectations as ge
from great_expectations.core.util import nested_update
from great_expectations.dataset.util import check_sql_engine_dialect
from great_expectations.util import (
filter_properties_dict,
get_currently_executing_function_call_arguments,
lint_code,
)
def test_validate_non_dataset(file_data_asset, empty_expectation_suite):
with pytest.raises(
ValueError, match=r"The validate util method only supports dataset validations"
):
with pytest.warns(
Warning,
match="No great_expectations version found in configuration object.",
):
ge.validate(
file_data_asset,
empty_expectation_suite,
data_asset_class=ge.data_asset.FileDataAsset,
)
def test_validate_dataset(dataset, basic_expectation_suite):
res = ge.validate(dataset, basic_expectation_suite)
# assert res.success is True # will not be true for mysql, where "infinities" column is missing
assert res["statistics"]["evaluated_expectations"] == 4
if isinstance(dataset, ge.dataset.PandasDataset):
res = ge.validate(
dataset,
expectation_suite=basic_expectation_suite,
data_asset_class=ge.dataset.PandasDataset,
)
assert res.success is True
assert res["statistics"]["evaluated_expectations"] == 4
with pytest.raises(
ValueError,
match=r"The validate util method only supports validation for subtypes of the provided data_asset_type",
):
ge.validate(
dataset,
basic_expectation_suite,
data_asset_class=ge.dataset.SqlAlchemyDataset,
)
elif (
isinstance(dataset, ge.dataset.SqlAlchemyDataset)
and dataset.sql_engine_dialect.name.lower() != "mysql"
):
res = ge.validate(
dataset,
expectation_suite=basic_expectation_suite,
data_asset_class=ge.dataset.SqlAlchemyDataset,
)
assert res.success is True
assert res["statistics"]["evaluated_expectations"] == 4
with pytest.raises(
ValueError,
match=r"The validate util method only supports validation for subtypes of the provided data_asset_type",
):
ge.validate(
dataset,
expectation_suite=basic_expectation_suite,
data_asset_class=ge.dataset.PandasDataset,
)
elif (
isinstance(dataset, ge.dataset.SqlAlchemyDataset)
and dataset.sql_engine_dialect.name.lower() == "mysql"
):
# mysql cannot use the infinities column
res = ge.validate(
dataset,
expectation_suite=basic_expectation_suite,
data_asset_class=ge.dataset.SqlAlchemyDataset,
)
assert res.success is False
assert res["statistics"]["evaluated_expectations"] == 4
with pytest.raises(
ValueError,
match=r"The validate util method only supports validation for subtypes of the provided data_asset_type",
):
ge.validate(
dataset,
expectation_suite=basic_expectation_suite,
data_asset_class=ge.dataset.PandasDataset,
)
elif isinstance(dataset, ge.dataset.SparkDFDataset):
res = ge.validate(
dataset, basic_expectation_suite, data_asset_class=ge.dataset.SparkDFDataset
)
assert res.success is True
assert res["statistics"]["evaluated_expectations"] == 4
with pytest.raises(
ValueError,
match=r"The validate util method only supports validation for subtypes of the provided data_asset_type",
):
ge.validate(
dataset,
expectation_suite=basic_expectation_suite,
data_asset_class=ge.dataset.PandasDataset,
)
def test_validate_using_data_context(
dataset, data_context_parameterized_expectation_suite
):
# Before running, the data context should not have compiled parameters
assert (
data_context_parameterized_expectation_suite._evaluation_parameter_dependencies_compiled
is False
)
res = ge.validate(
dataset,
expectation_suite_name="my_dag_node.default",
data_context=data_context_parameterized_expectation_suite,
)
# Since the handling of evaluation parameters is no longer happening without an action,
# the context should still be not compiles after validation.
assert (
data_context_parameterized_expectation_suite._evaluation_parameter_dependencies_compiled
is False
)
# And, we should have validated the right number of expectations from the context-provided config
assert res.success is False
assert res.statistics["evaluated_expectations"] == 2
def test_validate_using_data_context_path(
dataset, data_context_parameterized_expectation_suite
):
data_context_path = data_context_parameterized_expectation_suite.root_directory
res = ge.validate(
dataset,
expectation_suite_name="my_dag_node.default",
data_context=data_context_path,
)
# We should have now found the right suite with expectations to evaluate
assert res.success is False
assert res["statistics"]["evaluated_expectations"] == 2
def test_validate_invalid_parameters(
dataset, basic_expectation_suite, data_context_parameterized_expectation_suite
):
with pytest.raises(
ValueError,
match="Either an expectation suite or a DataContext is required for validation.",
):
ge.validate(dataset)
def test_gen_directory_tree_str(tmp_path_factory):
project_dir = str(tmp_path_factory.mktemp("project_dir"))
os.mkdir(os.path.join(project_dir, "BBB"))
with open(os.path.join(project_dir, "BBB", "bbb.txt"), "w") as f:
f.write("hello")
with open(os.path.join(project_dir, "BBB", "aaa.txt"), "w") as f:
f.write("hello")
os.mkdir(os.path.join(project_dir, "AAA"))
print(ge.util.gen_directory_tree_str(project_dir))
# Note: files and directories are sorteds alphabetically, so that this method can be used for testing.
assert (
ge.util.gen_directory_tree_str(project_dir)
== """\
project_dir0/
AAA/
BBB/
aaa.txt
bbb.txt
"""
)
def test_nested_update():
# nested_update is useful for update nested dictionaries (such as batch_kwargs with reader_options as a dictionary)
batch_kwargs = {
"path": "/a/path",
"reader_method": "read_csv",
"reader_options": {"header": 0},
}
nested_update(batch_kwargs, {"reader_options": {"nrows": 1}})
assert batch_kwargs == {
"path": "/a/path",
"reader_method": "read_csv",
"reader_options": {"header": 0, "nrows": 1},
}
def test_nested_update_lists():
# nested_update is useful for update nested dictionaries (such as batch_kwargs with reader_options as a dictionary)
dependencies = {
"suite.warning": {"metric.name": ["column=foo"]},
"suite.failure": {"metric.blarg": [""]},
}
new_dependencies = {
"suite.warning": {
"metric.other_name": ["column=foo"],
"metric.name": ["column=bar"],
}
}
nested_update(dependencies, new_dependencies)
assert dependencies == {
"suite.warning": {
"metric.name": ["column=foo", "column=bar"],
"metric.other_name": ["column=foo"],
},
"suite.failure": {"metric.blarg": [""]},
}
def test_linter_raises_error_on_non_string_input():
with pytest.raises(TypeError):
lint_code(99)
def test_linter_changes_dirty_code():
code = "foo = [1,2,3]"
assert lint_code(code) == "foo = [1, 2, 3]\n"
def test_linter_leaves_clean_code():
code = "foo = [1, 2, 3]\n"
assert lint_code(code) == "foo = [1, 2, 3]\n"
def test_get_currently_executing_function_call_arguments(a=None, *args, **kwargs):
if a is None:
test_get_currently_executing_function_call_arguments(0, 1, 2, 3, b=5)
else:
assert a == 0
assert args == (1, 2, 3)
assert kwargs == {"b": 5}
params = get_currently_executing_function_call_arguments(
**{
"additional_param_0": "xyz_0",
"additional_param_1": "xyz_1",
"additional_param_2": "xyz_2",
}
)
assert params["a"] == 0
assert params["args"] == (1, 2, 3)
assert params["b"] == 5
assert params["additional_param_0"] == "xyz_0"
assert params["additional_param_1"] == "xyz_1"
assert params["additional_param_2"] == "xyz_2"
def test_filter_properties_dict():
source_dict: dict = {
"integer_zero": 0,
"null": None,
"string": "xyz_0",
"integer_one": 1,
"scientific_notation_floating_point_number": 9.8e1,
}
d0_begin: dict = copy.deepcopy(source_dict)
with pytest.raises(ValueError):
# noinspection PyUnusedLocal
d0_end: dict = filter_properties_dict(
properties=d0_begin,
keep_fields=["string"],
delete_fields=["integer_zero", "scientific_notation_floating_point_number"],
clean_falsy=True,
)
d0_end: dict = filter_properties_dict(properties=d0_begin, clean_falsy=True)
d0_end_expected = copy.deepcopy(d0_begin)
d0_end_expected.pop("null")
assert d0_end == d0_end_expected
d1_begin: dict = copy.deepcopy(source_dict)
d1_end: dict = filter_properties_dict(
properties=d1_begin,
clean_nulls=False,
)
d1_end_expected = copy.deepcopy(d1_begin)
assert d1_end == d1_end_expected
d2_begin: dict = copy.deepcopy(source_dict)
d2_end: dict = filter_properties_dict(
properties=d2_begin,
clean_nulls=True,
clean_falsy=False,
)
d2_end_expected = copy.deepcopy(d2_begin)
d2_end_expected.pop("null")
assert d2_end == d2_end_expected
d3_begin: dict = copy.deepcopy(source_dict)
d3_end: dict = filter_properties_dict(
properties=d3_begin,
keep_fields=["null"],
clean_falsy=True,
)
d3_end_expected = {"null": None}
assert d3_end == d3_end_expected
d4_begin: dict = copy.deepcopy(source_dict)
d4_end: dict = filter_properties_dict(
properties=d4_begin,
clean_falsy=True,
keep_falsy_numerics=False,
)
d4_end_expected = copy.deepcopy(d4_begin)
d4_end_expected.pop("integer_zero")
d4_end_expected.pop("null")
assert d4_end == d4_end_expected
d5_begin: dict = copy.deepcopy(source_dict)
d5_end: dict = filter_properties_dict(
properties=d5_begin,
keep_fields=["integer_zero", "scientific_notation_floating_point_number"],
clean_falsy=True,
)
d5_end_expected = {
"integer_zero": 0,
"scientific_notation_floating_point_number": 9.8e1,
}
assert d5_end == d5_end_expected
d6_begin: dict = copy.deepcopy(source_dict)
d6_end: dict = filter_properties_dict(
properties=d6_begin,
delete_fields=["integer_zero", "scientific_notation_floating_point_number"],
clean_falsy=True,
)
d6_end_expected = {"string": "xyz_0", "integer_one": 1}
assert d6_end == d6_end_expected
d7_begin: dict = copy.deepcopy(source_dict)
filter_properties_dict(
properties=d7_begin,
delete_fields=["integer_zero", "scientific_notation_floating_point_number"],
clean_falsy=True,
inplace=True,
)
d7_end = copy.deepcopy(d7_begin)
d7_end_expected = {"string": "xyz_0", "integer_one": 1}
assert d7_end == d7_end_expected
| apache-2.0 |
lgeiger/ide-python | dist/debugger/VendorLib/vs-py-debugger/pythonFiles/experimental/ptvsd/ptvsd/_vendored/pydevd/pydev_ipython/inputhookqt5.py | 7 | 7286 | # -*- coding: utf-8 -*-
"""
Qt5's inputhook support function
Author: Christian Boos
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import signal
import threading
from pydev_ipython.qt_for_kernel import QtCore, QtGui
from pydev_ipython.inputhook import allow_CTRL_C, ignore_CTRL_C, stdin_ready
# To minimise future merging complexity, rather than edit the entire code base below
# we fake InteractiveShell here
class InteractiveShell:
_instance = None
@classmethod
def instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def set_hook(self, *args, **kwargs):
# We don't consider the pre_prompt_hook because we don't have
# KeyboardInterrupts to consider since we are running under PyDev
pass
#-----------------------------------------------------------------------------
# Module Globals
#-----------------------------------------------------------------------------
got_kbdint = False
sigint_timer = None
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def create_inputhook_qt5(mgr, app=None):
"""Create an input hook for running the Qt5 application event loop.
Parameters
----------
mgr : an InputHookManager
app : Qt Application, optional.
Running application to use. If not given, we probe Qt for an
existing application object, and create a new one if none is found.
Returns
-------
A pair consisting of a Qt Application (either the one given or the
one found or created) and a inputhook.
Notes
-----
We use a custom input hook instead of PyQt5's default one, as it
interacts better with the readline packages (issue #481).
The inputhook function works in tandem with a 'pre_prompt_hook'
which automatically restores the hook as an inputhook in case the
latter has been temporarily disabled after having intercepted a
KeyboardInterrupt.
"""
if app is None:
app = QtCore.QCoreApplication.instance()
if app is None:
from PyQt5 import QtWidgets
app = QtWidgets.QApplication([" "])
# Re-use previously created inputhook if any
ip = InteractiveShell.instance()
if hasattr(ip, '_inputhook_qt5'):
return app, ip._inputhook_qt5
# Otherwise create the inputhook_qt5/preprompthook_qt5 pair of
# hooks (they both share the got_kbdint flag)
def inputhook_qt5():
"""PyOS_InputHook python hook for Qt5.
Process pending Qt events and if there's no pending keyboard
input, spend a short slice of time (50ms) running the Qt event
loop.
As a Python ctypes callback can't raise an exception, we catch
the KeyboardInterrupt and temporarily deactivate the hook,
which will let a *second* CTRL+C be processed normally and go
back to a clean prompt line.
"""
try:
allow_CTRL_C()
app = QtCore.QCoreApplication.instance()
if not app: # shouldn't happen, but safer if it happens anyway...
return 0
app.processEvents(QtCore.QEventLoop.AllEvents, 300)
if not stdin_ready():
# Generally a program would run QCoreApplication::exec()
# from main() to enter and process the Qt event loop until
# quit() or exit() is called and the program terminates.
#
# For our input hook integration, we need to repeatedly
# enter and process the Qt event loop for only a short
# amount of time (say 50ms) to ensure that Python stays
# responsive to other user inputs.
#
# A naive approach would be to repeatedly call
# QCoreApplication::exec(), using a timer to quit after a
# short amount of time. Unfortunately, QCoreApplication
# emits an aboutToQuit signal before stopping, which has
# the undesirable effect of closing all modal windows.
#
# To work around this problem, we instead create a
# QEventLoop and call QEventLoop::exec(). Other than
# setting some state variables which do not seem to be
# used anywhere, the only thing QCoreApplication adds is
# the aboutToQuit signal which is precisely what we are
# trying to avoid.
timer = QtCore.QTimer()
event_loop = QtCore.QEventLoop()
timer.timeout.connect(event_loop.quit)
while not stdin_ready():
timer.start(50)
event_loop.exec_()
timer.stop()
except KeyboardInterrupt:
global got_kbdint, sigint_timer
ignore_CTRL_C()
got_kbdint = True
mgr.clear_inputhook()
# This generates a second SIGINT so the user doesn't have to
# press CTRL+C twice to get a clean prompt.
#
# Since we can't catch the resulting KeyboardInterrupt here
# (because this is a ctypes callback), we use a timer to
# generate the SIGINT after we leave this callback.
#
# Unfortunately this doesn't work on Windows (SIGINT kills
# Python and CTRL_C_EVENT doesn't work).
if(os.name == 'posix'):
pid = os.getpid()
if(not sigint_timer):
sigint_timer = threading.Timer(.01, os.kill,
args=[pid, signal.SIGINT] )
sigint_timer.start()
else:
print("\nKeyboardInterrupt - Ctrl-C again for new prompt")
except: # NO exceptions are allowed to escape from a ctypes callback
ignore_CTRL_C()
from traceback import print_exc
print_exc()
print("Got exception from inputhook_qt5, unregistering.")
mgr.clear_inputhook()
finally:
allow_CTRL_C()
return 0
def preprompthook_qt5(ishell):
"""'pre_prompt_hook' used to restore the Qt5 input hook
(in case the latter was temporarily deactivated after a
CTRL+C)
"""
global got_kbdint, sigint_timer
if(sigint_timer):
sigint_timer.cancel()
sigint_timer = None
if got_kbdint:
mgr.set_inputhook(inputhook_qt5)
got_kbdint = False
ip._inputhook_qt5 = inputhook_qt5
ip.set_hook('pre_prompt_hook', preprompthook_qt5)
return app, inputhook_qt5
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.