repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
iohannez/gnuradio | gr-blocks/python/blocks/stream_to_vector_decimator.py | 6 | 3246 | from __future__ import unicode_literals
#
# Copyright 2008 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from . import blocks_swig as blocks
class stream_to_vector_decimator(gr.hier_block2):
"""
Convert the stream to a vector, decimate the vector stream to achieve the vector rate.
"""
def __init__(self, item_size, sample_rate, vec_rate, vec_len):
"""
Create the block chain.
Args:
item_size: the number of bytes per sample
sample_rate: the rate of incoming samples
vec_rate: the rate of outgoing vectors (same units as sample_rate)
vec_len: the length of the outgoing vectors in items
"""
self._vec_rate = vec_rate
self._vec_len = vec_len
self._sample_rate = sample_rate
gr.hier_block2.__init__(self, "stream_to_vector_decimator",
gr.io_signature(1, 1, item_size), # Input signature
gr.io_signature(1, 1, item_size*vec_len)) # Output signature
s2v = blocks.stream_to_vector(item_size, vec_len)
self.one_in_n = blocks.keep_one_in_n(item_size*vec_len, 1)
self._update_decimator()
self.connect(self, s2v, self.one_in_n, self)
def set_sample_rate(self, sample_rate):
"""
Set the new sampling rate and update the decimator.
Args:
sample_rate: the new rate
"""
self._sample_rate = sample_rate
self._update_decimator()
def set_vec_rate(self, vec_rate):
"""
Set the new vector rate and update the decimator.
Args:
vec_rate: the new rate
"""
self._vec_rate = vec_rate
self._update_decimator()
def set_decimation(self, decim):
"""
Set the decimation parameter directly.
Args:
decim: the new decimation
"""
self._decim = max(1, int(round(decim)))
self.one_in_n.set_n(self._decim)
def _update_decimator(self):
self.set_decimation(self._sample_rate/self._vec_len/self._vec_rate)
def decimation(self):
"""
Returns the actual decimation.
"""
return self._decim
def sample_rate(self):
"""
Returns configured sample rate.
"""
return self._sample_rate
def frame_rate(self):
"""
Returns actual frame rate
"""
return self._sample_rate/self._vec_len/self._decim
| gpl-3.0 |
raiden-network/raiden | raiden/raiden_service.py | 1 | 69488 | # pylint: disable=too-many-lines
import os
import random
import time
from collections import defaultdict
from enum import Enum
from typing import Any, Dict, List, NamedTuple, Set, Tuple, cast
from uuid import UUID
import click
import filelock
import gevent
import structlog
from eth_utils import to_hex
from gevent import Greenlet
from gevent.event import AsyncResult, Event
from web3.types import BlockData
from raiden import routing
from raiden.api.objects import Notification
from raiden.api.python import RaidenAPI
from raiden.api.rest import APIServer, RestAPI
from raiden.blockchain.decode import blockchainevent_to_statechange
from raiden.blockchain.events import BlockchainEvents, DecodedEvent
from raiden.blockchain.filters import RaidenContractFilter
from raiden.constants import (
ABSENT_SECRET,
BLOCK_ID_LATEST,
GENESIS_BLOCK_NUMBER,
SECRET_LENGTH,
SNAPSHOT_STATE_CHANGES_COUNT,
Environment,
RoutingMode,
)
from raiden.exceptions import (
BrokenPreconditionError,
InvalidDBData,
InvalidSecret,
InvalidSecretHash,
InvalidSettleTimeout,
PaymentConflict,
RaidenRecoverableError,
RaidenUnrecoverableError,
SerializationError,
)
from raiden.message_handler import MessageHandler
from raiden.messages.abstract import Message, SignedMessage
from raiden.messages.encode import message_from_sendevent
from raiden.network.proxies.proxy_manager import ProxyManager
from raiden.network.proxies.secret_registry import SecretRegistry
from raiden.network.proxies.service_registry import ServiceRegistry
from raiden.network.proxies.token_network_registry import TokenNetworkRegistry
from raiden.network.proxies.user_deposit import UserDeposit
from raiden.network.rpc.client import JSONRPCClient
from raiden.network.transport import populate_services_addresses
from raiden.network.transport.matrix.transport import MatrixTransport, MessagesQueue
from raiden.raiden_event_handler import EventHandler
from raiden.services import send_pfs_update, update_monitoring_service_from_balance_proof
from raiden.settings import RaidenConfig
from raiden.storage import sqlite, wal
from raiden.storage.serialization import DictSerializer, JSONSerializer
from raiden.storage.sqlite import HIGH_STATECHANGE_ULID, Range
from raiden.storage.wal import WriteAheadLog
from raiden.tasks import AlarmTask
from raiden.transfer import node, views
from raiden.transfer.architecture import (
BalanceProofSignedState,
ContractSendEvent,
Event as RaidenEvent,
StateChange,
)
from raiden.transfer.channel import get_capacity
from raiden.transfer.events import (
EventPaymentSentFailed,
EventPaymentSentSuccess,
EventWrapper,
RequestMetadata,
SendWithdrawExpired,
SendWithdrawRequest,
)
from raiden.transfer.identifiers import CanonicalIdentifier
from raiden.transfer.mediated_transfer.events import (
EventRouteFailed,
SendLockedTransfer,
SendSecretRequest,
SendUnlock,
)
from raiden.transfer.mediated_transfer.mediation_fee import (
FeeScheduleState,
calculate_imbalance_fees,
)
from raiden.transfer.mediated_transfer.state import TransferDescriptionWithSecretState
from raiden.transfer.mediated_transfer.state_change import (
ActionInitInitiator,
ReceiveLockExpired,
ReceiveTransferCancelRoute,
ReceiveTransferRefund,
)
from raiden.transfer.mediated_transfer.tasks import InitiatorTask
from raiden.transfer.state import ChainState, RouteState, TokenNetworkRegistryState
from raiden.transfer.state_change import (
ActionChannelSetRevealTimeout,
ActionChannelWithdraw,
BalanceProofStateChange,
Block,
ContractReceiveChannelDeposit,
ReceiveUnlock,
ReceiveWithdrawExpired,
ReceiveWithdrawRequest,
)
from raiden.ui.startup import RaidenBundle, ServicesBundle
from raiden.utils.formatting import lpex, to_checksum_address
from raiden.utils.gevent import spawn_named
from raiden.utils.logging import redact_secret
from raiden.utils.runnable import Runnable
from raiden.utils.secrethash import sha256_secrethash
from raiden.utils.signer import LocalSigner, Signer
from raiden.utils.transfers import random_secret
from raiden.utils.typing import (
MYPY_ANNOTATION,
Address,
AddressMetadata,
BlockNumber,
BlockTimeout,
InitiatorAddress,
MonitoringServiceAddress,
OneToNAddress,
Optional,
PaymentAmount,
PaymentID,
PrivateKey,
Secret,
SecretHash,
SecretRegistryAddress,
TargetAddress,
TokenNetworkAddress,
WithdrawAmount,
typecheck,
)
from raiden.utils.upgrades import UpgradeManager
from raiden_contracts.constants import ChannelEvent
from raiden_contracts.contract_manager import ContractManager
log = structlog.get_logger(__name__)
StatusesDict = Dict[TargetAddress, Dict[PaymentID, "PaymentStatus"]]
PFS_UPDATE_CAPACITY_STATE_CHANGES = (
ContractReceiveChannelDeposit,
ReceiveUnlock,
ReceiveWithdrawRequest,
ReceiveWithdrawExpired,
ReceiveTransferCancelRoute,
ReceiveLockExpired,
ReceiveTransferRefund,
# State change | Reason why update is not needed
# ActionInitInitiator | Update triggered by SendLockedTransfer
# ActionInitMediator | Update triggered by SendLockedTransfer
# ActionInitTarget | Update triggered by SendLockedTransfer
# ActionTransferReroute | Update triggered by SendLockedTransfer
# ActionChannelWithdraw | Upd. triggered by ReceiveWithdrawConfirmation/ReceiveWithdrawExpired
)
PFS_UPDATE_CAPACITY_EVENTS = (
SendUnlock,
SendLockedTransfer,
SendWithdrawRequest,
SendWithdrawExpired,
)
# Assume lower capacity for fees when in doubt, see
# https://raiden-network-specification.readthedocs.io/en/latest/pathfinding_service.html
# #when-to-send-pfsfeeupdates
PFS_UPDATE_FEE_STATE_CHANGES = (
ContractReceiveChannelDeposit,
ReceiveWithdrawRequest,
ReceiveWithdrawExpired,
)
PFS_UPDATE_FEE_EVENTS = (SendWithdrawRequest, SendWithdrawExpired)
assert not set(PFS_UPDATE_FEE_STATE_CHANGES) - set(
PFS_UPDATE_CAPACITY_STATE_CHANGES
), "No fee updates without capacity updates possible"
assert not set(PFS_UPDATE_FEE_EVENTS) - set(
PFS_UPDATE_CAPACITY_EVENTS
), "No fee updates without capacity updates possible"
def initiator_init(
raiden: "RaidenService",
transfer_identifier: PaymentID,
transfer_amount: PaymentAmount,
transfer_secret: Secret,
transfer_secrethash: SecretHash,
token_network_address: TokenNetworkAddress,
target_address: TargetAddress,
lock_timeout: BlockTimeout = None,
route_states: List[RouteState] = None,
) -> Tuple[Optional[str], ActionInitInitiator]:
transfer_state = TransferDescriptionWithSecretState(
token_network_registry_address=raiden.default_registry.address,
payment_identifier=transfer_identifier,
amount=transfer_amount,
token_network_address=token_network_address,
initiator=InitiatorAddress(raiden.address),
target=target_address,
secret=transfer_secret,
secrethash=transfer_secrethash,
lock_timeout=lock_timeout,
)
error_msg = None
if route_states is None:
our_address_metadata = raiden.transport.address_metadata
msg = "Transport is not initialized with raiden-service"
assert our_address_metadata is not None, msg
error_msg, route_states, feedback_token = routing.get_best_routes(
chain_state=views.state_from_raiden(raiden),
token_network_address=token_network_address,
one_to_n_address=raiden.default_one_to_n_address,
from_address=InitiatorAddress(raiden.address),
to_address=target_address,
amount=transfer_amount,
previous_address=None,
pfs_config=raiden.config.pfs_config,
privkey=raiden.privkey,
our_address_metadata=our_address_metadata,
)
# Only prepare feedback when token is available
if feedback_token is not None:
for route_state in route_states:
raiden.route_to_feedback_token[tuple(route_state.route)] = feedback_token
return error_msg, ActionInitInitiator(transfer_state, route_states)
def smart_contract_filters_from_node_state(
chain_state: ChainState,
secret_registry_address: SecretRegistryAddress,
service_registry: Optional[ServiceRegistry],
) -> RaidenContractFilter:
token_network_registries = chain_state.identifiers_to_tokennetworkregistries.values()
token_networks = [tn for tnr in token_network_registries for tn in tnr.token_network_list]
channels_of_token_network = {
tn.address: set(tn.channelidentifiers_to_channels.keys())
for tn in token_networks
if tn.channelidentifiers_to_channels
}
return RaidenContractFilter(
secret_registry_address=secret_registry_address,
token_network_registry_addresses={tnr.address for tnr in token_network_registries},
token_network_addresses={tn.address for tn in token_networks},
channels_of_token_network=channels_of_token_network,
ignore_secret_registry_until_channel_found=not channels_of_token_network,
service_registry=service_registry,
)
class PaymentStatus(NamedTuple):
"""Value type for RaidenService.targets_to_identifiers_to_statuses.
Contains the necessary information to tell conflicting transfers from
retries as well as the status of a transfer that is retried.
"""
payment_identifier: PaymentID
amount: PaymentAmount
token_network_address: TokenNetworkAddress
payment_done: AsyncResult
lock_timeout: Optional[BlockTimeout]
def matches(self, token_network_address: TokenNetworkAddress, amount: PaymentAmount) -> bool:
return token_network_address == self.token_network_address and amount == self.amount
class SyncTimeout:
"""Helper to determine if the sync should halt or continue.
The goal of this helper is to stop syncing before the block
`current_confirmed_head` is pruned, otherwise JSON-RPC requests will start
to fail.
"""
def __init__(self, current_confirmed_head: BlockNumber, timeout: float) -> None:
self.sync_start = time.monotonic()
self.timeout = timeout
self.current_confirmed_head = current_confirmed_head
def time_elapsed(self) -> float:
delta = time.monotonic() - self.sync_start
return delta
def should_continue(self, last_fetched_block: BlockNumber) -> bool:
has_time = self.timeout >= self.time_elapsed()
has_blocks_unsynched = self.current_confirmed_head > last_fetched_block
return has_time and has_blocks_unsynched
class SynchronizationState(Enum):
FULLY_SYNCED = "fully_synced"
PARTIALLY_SYNCED = "partially_synced"
class RaidenService(Runnable):
"""A Raiden node."""
def __init__(
self,
rpc_client: JSONRPCClient,
proxy_manager: ProxyManager,
query_start_block: BlockNumber,
raiden_bundle: RaidenBundle,
services_bundle: Optional[ServicesBundle],
transport: MatrixTransport,
raiden_event_handler: EventHandler,
message_handler: MessageHandler,
routing_mode: RoutingMode,
config: RaidenConfig,
api_server: Optional[APIServer] = None,
) -> None:
super().__init__()
# check that the settlement timeout fits the limits of the contract
settlement_timeout_min = raiden_bundle.token_network_registry.settlement_timeout_min(
BLOCK_ID_LATEST
)
settlement_timeout_max = raiden_bundle.token_network_registry.settlement_timeout_max(
BLOCK_ID_LATEST
)
invalid_settle_timeout = (
config.settle_timeout < settlement_timeout_min
or config.settle_timeout > settlement_timeout_max
or config.settle_timeout < config.reveal_timeout * 2
)
if invalid_settle_timeout:
contract = to_checksum_address(raiden_bundle.token_network_registry.address)
raise InvalidSettleTimeout(
(
f"Settlement timeout for Registry contract {contract} must "
f"be in range [{settlement_timeout_min}, {settlement_timeout_max}], "
f"is {config.settle_timeout}"
)
)
self.targets_to_identifiers_to_statuses: StatusesDict = defaultdict(dict)
one_to_n_address = None
monitoring_service_address = None
service_registry: Optional[ServiceRegistry] = None
user_deposit: Optional[UserDeposit] = None
if services_bundle:
if services_bundle.one_to_n:
one_to_n_address = services_bundle.one_to_n.address
if services_bundle.monitoring_service:
monitoring_service_address = services_bundle.monitoring_service.address
service_registry = services_bundle.service_registry
user_deposit = services_bundle.user_deposit
self.rpc_client: JSONRPCClient = rpc_client
self.proxy_manager: ProxyManager = proxy_manager
self.default_registry: TokenNetworkRegistry = raiden_bundle.token_network_registry
self.query_start_block = query_start_block
self.default_services_bundle = services_bundle
self.default_one_to_n_address: Optional[OneToNAddress] = one_to_n_address
self.default_secret_registry: SecretRegistry = raiden_bundle.secret_registry
self.default_service_registry = service_registry
self.default_user_deposit: Optional[UserDeposit] = user_deposit
self.default_msc_address: Optional[MonitoringServiceAddress] = monitoring_service_address
self.routing_mode: RoutingMode = routing_mode
self.config: RaidenConfig = config
self.notifications: Dict = {} # notifications are unique (and indexed) by id.
self.signer: Signer = LocalSigner(self.rpc_client.privkey)
self.address: Address = self.signer.address
self.transport: MatrixTransport = transport
self.alarm = AlarmTask(
proxy_manager=proxy_manager, sleep_time=self.config.blockchain.query_interval
)
self.raiden_event_handler = raiden_event_handler
self.message_handler = message_handler
self.blockchain_events: Optional[BlockchainEvents] = None
self.api_server: Optional[APIServer] = api_server
self.raiden_api: Optional[RaidenAPI] = None
self.rest_api: Optional[RestAPI] = None
if api_server is not None:
self.raiden_api = RaidenAPI(self)
self.rest_api = api_server.rest_api
self.stop_event = Event()
self.stop_event.set() # inits as stopped
self.greenlets: List[Greenlet] = list()
self.last_log_time = time.monotonic()
self.last_log_block = BlockNumber(0)
self.contract_manager: ContractManager = ContractManager(config.contracts_path)
self.wal: Optional[WriteAheadLog] = None
self.db_lock: Optional[filelock.UnixFileLock] = None
if self.config.database_path != ":memory:":
database_dir = os.path.dirname(config.database_path)
os.makedirs(database_dir, exist_ok=True)
self.database_dir: Optional[str] = database_dir
# Two raiden processes must not write to the same database. Even
# though it's possible the database itself would not be corrupt,
# the node's state could. If a database was shared among multiple
# nodes, the database WAL would be the union of multiple node's
# WAL. During a restart a single node can't distinguish its state
# changes from the others, and it would apply it all, meaning that
# a node would execute the actions of itself and the others.
#
# Additionally the database snapshots would be corrupt, because it
# would not represent the effects of applying all the state changes
# in order.
lock_file = os.path.join(self.database_dir, ".lock")
self.db_lock = filelock.FileLock(lock_file)
else:
self.database_dir = None
self.serialization_file = None
self.db_lock = None
self.payment_identifier_lock = gevent.lock.Semaphore()
# A list is not hashable, so use tuple as key here
self.route_to_feedback_token: Dict[Tuple[Address, ...], UUID] = dict()
# Flag used to skip the processing of all Raiden events during the
# startup.
#
# Rationale: At the startup, the latest snapshot is restored and all
# state changes which are not 'part' of it are applied. The criteria to
# re-apply the state changes is their 'absence' in the snapshot, /not/
# their completeness. Because these state changes are re-executed
# in-order and some of their side-effects will already have been
# completed, the events should be delayed until the state is
# synchronized (e.g. an open channel state change, which has already
# been mined).
#
# Incomplete events, i.e. the ones which don't have their side-effects
# applied, will be executed once the blockchain state is synchronized
# because of the node's queues.
self.ready_to_process_events = False
# Counters used for state snapshotting
self.state_change_qty_snapshot = 0
self.state_change_qty = 0
def start(self) -> None:
"""Start the node synchronously. Raises directly if anything went wrong on startup"""
assert self.stop_event.ready(), f"Node already started. node:{self!r}"
self.stop_event.clear()
self.greenlets = list()
self.ready_to_process_events = False # set to False because of restarts
self._initialize_wal()
self._synchronize_with_blockchain()
chain_state = views.state_from_raiden(self)
self._initialize_payment_statuses(chain_state)
self._initialize_transactions_queues(chain_state)
self._initialize_messages_queues(chain_state)
self._initialize_channel_fees()
self._initialize_monitoring_services_queue(chain_state)
self._initialize_ready_to_process_events()
# Start the side-effects:
# - React to blockchain events
# - React to incoming messages
# - Send pending transactions
# - Send pending message
self.alarm.greenlet.link_exception(self.on_error)
self.transport.greenlet.link_exception(self.on_error)
if self.api_server:
self.api_server.greenlet.link_exception(self.on_error)
self._start_transport()
self._start_alarm_task()
log.debug("Raiden Service started", node=to_checksum_address(self.address))
super().start()
self._set_rest_api_service_available()
def _run(self, *args: Any, **kwargs: Any) -> None: # pylint: disable=method-hidden
"""Busy-wait on long-lived subtasks/greenlets, re-raise if any error occurs"""
self.greenlet.name = f"RaidenService._run node:{to_checksum_address(self.address)}"
try:
self.stop_event.wait()
except gevent.GreenletExit: # killed without exception
self.stop_event.set()
gevent.killall([self.alarm, self.transport]) # kill children
raise # re-raise to keep killed status
except Exception:
self.stop()
raise
def stop(self) -> None:
"""Stop the node gracefully. Raise if any stop-time error occurred on any subtask"""
if self.stop_event.ready(): # not started
return
# Needs to come before any greenlets joining
self.stop_event.set()
# Filters must be uninstalled after the alarm task has stopped. Since
# the events are polled by an alarm task callback, if the filters are
# uninstalled before the alarm task is fully stopped the callback will
# fail.
#
# We need a timeout to prevent an endless loop from trying to
# contact the disconnected client
if self.api_server is not None:
self.api_server.stop()
self.transport.stop()
self.alarm.stop()
if self.api_server is not None:
self.api_server.greenlet.join()
self.transport.greenlet.join()
self.alarm.greenlet.join()
assert (
self.blockchain_events
), f"The blockchain_events has to be set by the start. node:{self!r}"
self.blockchain_events.stop()
# Close storage DB to release internal DB lock
assert (
self.wal
), f"The Service must have been started before it can be stopped. node:{self!r}"
self.wal.storage.close()
self.wal = None
if self.db_lock is not None:
self.db_lock.release()
log.debug("Raiden Service stopped", node=to_checksum_address(self.address))
def add_notification(
self,
notification: Notification,
log_opts: Optional[Dict] = None,
click_opts: Optional[Dict] = None,
) -> None:
log_opts = log_opts or {}
click_opts = click_opts or {}
log.info(notification.summary, **log_opts)
click.secho(notification.body, **click_opts)
self.notifications[notification.id] = notification
@property
def confirmation_blocks(self) -> BlockTimeout:
return self.config.blockchain.confirmation_blocks
@property
def privkey(self) -> PrivateKey:
return self.rpc_client.privkey
def add_pending_greenlet(self, greenlet: Greenlet) -> None:
"""Ensures an error on the passed greenlet crashes self/main greenlet."""
def remove(_: Any) -> None:
self.greenlets.remove(greenlet)
self.greenlets.append(greenlet)
greenlet.link_exception(self.on_error)
greenlet.link_value(remove)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} node:{to_checksum_address(self.address)}>"
def _start_transport(self) -> None:
"""Initialize the transport and related facilities.
Note:
The node has first to `_synchronize_with_blockchain` before
starting the transport. This synchronization includes the on-chain
channel state and is necessary to reject new messages for closed
channels.
"""
assert self.ready_to_process_events, f"Event processing disabled. node:{self!r}"
msg = (
"`self.blockchain_events` is `None`. "
"Seems like `_synchronize_with_blockchain` wasn't called before `_start_transport`."
)
assert self.blockchain_events is not None, msg
if self.default_service_registry is not None:
populate_services_addresses(
self.transport, self.default_service_registry, BLOCK_ID_LATEST
)
self.transport.start(raiden_service=self, prev_auth_data=None)
def _make_initial_state(self) -> ChainState:
# On first run Raiden needs to fetch all events for the payment
# network, to reconstruct all token network graphs and find opened
# channels
#
# The value `self.query_start_block` is an optimization, because
# Raiden has to poll all events until the last confirmed block,
# using the genesis block would result in fetchs for a few million
# of unnecessary blocks. Instead of querying all these unnecessary
# blocks, the configuration variable `query_start_block` is used to
# start at the block which `TokenNetworkRegistry` was deployed.
last_log_block_number = self.query_start_block
last_log_block_hash = self.rpc_client.blockhash_from_blocknumber(last_log_block_number)
initial_state = ChainState(
pseudo_random_generator=random.Random(),
block_number=last_log_block_number,
block_hash=last_log_block_hash,
our_address=self.address,
chain_id=self.rpc_client.chain_id,
)
token_network_registry_address = self.default_registry.address
token_network_registry = TokenNetworkRegistryState(
token_network_registry_address,
[], # empty list of token network states as it's the node's startup
)
initial_state.identifiers_to_tokennetworkregistries[
token_network_registry_address
] = token_network_registry
return initial_state
def _initialize_wal(self) -> None:
if self.database_dir is not None:
try:
assert (
self.db_lock is not None
), "If a database_dir is present, a lock for the database has to exist"
self.db_lock.acquire(timeout=0)
assert self.db_lock.is_locked, f"Database not locked. node:{self!r}"
except (filelock.Timeout, AssertionError) as ex:
raise RaidenUnrecoverableError(
"Could not aquire database lock. Maybe a Raiden node for this account "
f"({to_checksum_address(self.address)}) is already running?"
) from ex
self.maybe_upgrade_db()
storage = sqlite.SerializedSQLiteStorage(
database_path=self.config.database_path, serializer=JSONSerializer()
)
storage.update_version()
storage.log_run()
try:
initial_state = self._make_initial_state()
(
state_snapshot,
state_change_start,
state_change_qty_snapshot,
) = wal.restore_or_init_snapshot(
storage=storage, node_address=self.address, initial_state=initial_state
)
state, state_change_qty_unapplied = wal.replay_state_changes(
node_address=self.address,
state=state_snapshot,
state_change_range=Range(state_change_start, HIGH_STATECHANGE_ULID),
storage=storage,
transition_function=node.state_transition, # type: ignore
)
except SerializationError:
raise RaidenUnrecoverableError(
"Could not restore state. "
"It seems like the existing database is incompatible with "
"the current version of Raiden. Consider using a stable "
"version of the Raiden client."
)
if state_change_qty_snapshot == 0:
print(
"This is the first time Raiden is being used with this address. "
"Processing all the events may take some time. Please wait ..."
)
self.state_change_qty_snapshot = state_change_qty_snapshot
self.state_change_qty = state_change_qty_snapshot + state_change_qty_unapplied
msg = "The state must be a ChainState instance."
assert isinstance(state, ChainState), msg
self.wal = WriteAheadLog(state, storage, node.state_transition)
# The `Block` state change is dispatched only after all the events
# for that given block have been processed, filters can be safely
# installed starting from this position without losing events.
last_log_block_number = views.block_number(self.wal.get_current_state())
log.debug(
"Querying blockchain from block",
last_restored_block=last_log_block_number,
node=to_checksum_address(self.address),
)
known_networks = views.get_token_network_registry_address(views.state_from_raiden(self))
if known_networks and self.default_registry.address not in known_networks:
configured_registry = to_checksum_address(self.default_registry.address)
known_registries = lpex(known_networks)
raise RuntimeError(
f"Token network address mismatch.\n"
f"Raiden is configured to use the smart contract "
f"{configured_registry}, which conflicts with the current known "
f"smart contracts {known_registries}"
)
def _log_sync_progress(
self, polled_block_number: BlockNumber, target_block: BlockNumber
) -> None:
"""Print a message if there are many blocks to be fetched, or if the
time in-between polls is high.
"""
now = time.monotonic()
blocks_until_target = target_block - polled_block_number
polled_block_count = polled_block_number - self.last_log_block
elapsed = now - self.last_log_time
if blocks_until_target > 100 or elapsed > 15.0:
log.info(
"Synchronizing blockchain events",
remaining_blocks_to_sync=blocks_until_target,
blocks_per_second=polled_block_count / elapsed,
to_block=target_block,
elapsed=elapsed,
)
self.last_log_time = time.monotonic()
self.last_log_block = polled_block_number
def _synchronize_with_blockchain(self) -> None:
"""Prepares the alarm task callback and synchronize with the blockchain
since the last run.
Notes about setup order:
- The filters must be polled after the node state has been primed,
otherwise the state changes won't have effect.
- The synchronization must be done before the transport is started, to
reject messages for closed/settled channels.
"""
msg = (
f"Transport must not be started before the node has synchronized "
f"with the blockchain, otherwise the node may accept transfers to a "
f"closed channel. node:{self!r}"
)
assert not self.transport, msg
assert self.wal, f"The database must have been initialized. node:{self!r}"
chain_state = views.state_from_raiden(self)
# The `Block` state change is dispatched only after all the events for
# that given block have been processed, filters can be safely installed
# starting from this position without missing events.
last_block_number = views.block_number(chain_state)
event_filter = smart_contract_filters_from_node_state(
chain_state,
self.default_secret_registry.address,
self.default_service_registry,
)
log.debug("initial filter", event_filter=event_filter, node=self.address)
blockchain_events = BlockchainEvents(
web3=self.rpc_client.web3,
chain_id=chain_state.chain_id,
contract_manager=self.contract_manager,
last_fetched_block=last_block_number,
event_filter=event_filter,
block_batch_size_config=self.config.blockchain.block_batch_size_config,
node_address=self.address,
)
blockchain_events.register_listener(self._blockchain_event_listener)
self.last_log_block = last_block_number
self.last_log_time = time.monotonic()
# `blockchain_events` is a requirement for
# `_best_effort_synchronize_with_confirmed_head`, so it must be set
# before calling it
self.blockchain_events = blockchain_events
synchronization_state = SynchronizationState.PARTIALLY_SYNCED
while synchronization_state is SynchronizationState.PARTIALLY_SYNCED:
latest_block = self.rpc_client.get_block(block_identifier=BLOCK_ID_LATEST)
synchronization_state = self._best_effort_synchronize(latest_block)
self.alarm.register_callback(self._best_effort_synchronize)
def _blockchain_event_listener(self, events: List[DecodedEvent]) -> None:
for event in events:
args = event.event_data["args"]
if event.event_data["event"] == ChannelEvent.OPENED:
other = (
args["participant1"]
if args["participant1"] != self.address
else args["participant2"]
)
self.transport.health_check_web_rtc(other)
def _start_alarm_task(self) -> None:
"""Start the alarm task.
Note:
The alarm task must be started only when processing events is
allowed, otherwise side-effects of blockchain events will be
ignored.
"""
assert self.ready_to_process_events, f"Event processing disabled. node:{self!r}"
self.alarm.start()
def _set_rest_api_service_available(self) -> None:
if self.raiden_api:
assert self.rest_api, "api enabled in config but self.rest_api not initialized"
self.rest_api.raiden_api = self.raiden_api
print("Synchronization complete, REST API services now available.")
def _initialize_ready_to_process_events(self) -> None:
"""Mark the node as ready to start processing raiden events that may
send messages or transactions.
This flag /must/ be set to true before the both transport and the
alarm are started.
"""
msg = (
f"The transport must not be initialized before the "
f"`ready_to_process_events` flag is set, since this is a requirement "
f"for the alarm task and the alarm task should be started before the "
f"transport to avoid race conditions. node:{self!r}"
)
assert not self.transport, msg
msg = (
f"Alarm task must not be started before the "
f"`ready_to_process_events` flag is set, otherwise events may be "
f"missed. node:{self!r}"
)
assert not self.alarm, msg
self.ready_to_process_events = True
def get_block_number(self) -> BlockNumber:
assert self.wal, f"WAL object not yet initialized. node:{self!r}"
return views.block_number(self.wal.get_current_state())
def on_messages(self, messages: List[Message]) -> None:
self.message_handler.on_messages(self, messages)
def handle_and_track_state_changes(self, state_changes: List[StateChange]) -> None:
"""Dispatch the state change and does not handle the exceptions.
When the method is used the exceptions are tracked and re-raised in the
raiden service thread.
"""
if len(state_changes) == 0:
return
# It's important to /not/ block here, because this function can
# be called from the alarm task greenlet, which should not
# starve. This was a problem when the node decided to send a new
# transaction, since the proxies block until the transaction is
# mined and confirmed (e.g. the settle window is over and the
# node sends the settle transaction).
for greenlet in self.handle_state_changes(state_changes):
self.add_pending_greenlet(greenlet)
def handle_state_changes(self, state_changes: List[StateChange]) -> List[Greenlet]:
"""Dispatch the state change and return the processing threads.
Use this for error reporting, failures in the returned greenlets,
should be re-raised using `gevent.joinall` with `raise_error=True`.
"""
assert self.wal, f"WAL not restored. node:{self!r}"
log.debug(
"State changes",
node=to_checksum_address(self.address),
state_changes=[
redact_secret(DictSerializer.serialize(state_change))
for state_change in state_changes
],
)
raiden_events = []
with self.wal.process_state_change_atomically() as dispatcher:
for state_change in state_changes:
events = dispatcher.dispatch(state_change)
raiden_events.extend(events)
return self._trigger_state_change_effects(
new_state=views.state_from_raiden(self),
state_changes=state_changes,
events=raiden_events,
)
def _trigger_state_change_effects(
self,
new_state: ChainState,
state_changes: List[StateChange],
events: List[Event],
) -> List[Greenlet]:
"""Trigger effects that are based on processed state changes.
Examples are MS/PFS updates, transport communication channel updates
and presence checks.
"""
# For safety of the mediation the monitoring service must be updated
# before the balance proof is sent. Otherwise a timing attack would be
# possible, where an attacker would mediate a transfer through a node,
# and try to DoS it, with the expectation that the victim would
# forward the payment, but wouldn't be able to send a transaction to
# the blockchain nor update a MS.
# Since several state_changes in one batch of state_changes can trigger
# the same PFSCapacityUpdate or MonitoringUpdate we want to iterate over
# all state changes to produce and send only unique messages. Assumption is
# that the latest related state_change defines the correct messages.
# Goal is to reduce messages.
monitoring_updates: Dict[CanonicalIdentifier, BalanceProofStateChange] = dict()
pfs_fee_updates: Set[CanonicalIdentifier] = set()
pfs_capacity_updates: Set[CanonicalIdentifier] = set()
for state_change in state_changes:
if self.config.services.monitoring_enabled and isinstance(
state_change, BalanceProofStateChange
):
monitoring_updates[state_change.balance_proof.canonical_identifier] = state_change
if isinstance(state_change, PFS_UPDATE_CAPACITY_STATE_CHANGES):
if isinstance(state_change, BalanceProofStateChange):
canonical_identifier = state_change.balance_proof.canonical_identifier
else:
canonical_identifier = state_change.canonical_identifier
if isinstance(state_change, PFS_UPDATE_FEE_STATE_CHANGES):
pfs_fee_updates.add(canonical_identifier)
else:
pfs_capacity_updates.add(canonical_identifier)
if isinstance(state_change, Block):
self.transport.expire_services_addresses(
self.rpc_client.get_block(state_change.block_hash)["timestamp"],
state_change.block_number,
)
for event in events:
if isinstance(event, PFS_UPDATE_FEE_EVENTS):
pfs_fee_updates.add(event.canonical_identifier)
elif isinstance(event, PFS_UPDATE_CAPACITY_EVENTS):
pfs_capacity_updates.add(event.canonical_identifier)
for monitoring_update in monitoring_updates.values():
update_monitoring_service_from_balance_proof(
raiden=self,
chain_state=new_state,
new_balance_proof=monitoring_update.balance_proof,
non_closing_participant=self.address,
)
for canonical_identifier in pfs_capacity_updates:
send_pfs_update(raiden=self, canonical_identifier=canonical_identifier)
for canonical_identifier in pfs_fee_updates:
send_pfs_update(
raiden=self, canonical_identifier=canonical_identifier, update_fee_schedule=True
)
log.debug(
"Raiden events",
node=to_checksum_address(self.address),
raiden_events=[redact_secret(DictSerializer.serialize(event)) for event in events],
)
self.state_change_qty += len(state_changes)
self._maybe_snapshot()
if self.ready_to_process_events:
return self.async_handle_events(chain_state=new_state, raiden_events=events)
else:
return list()
def _maybe_snapshot(self) -> None:
if self.state_change_qty > self.state_change_qty_snapshot + SNAPSHOT_STATE_CHANGES_COUNT:
assert self.wal, "WAL must be set."
log.debug("Storing snapshot")
self.wal.snapshot(self.state_change_qty)
self.state_change_qty_snapshot = self.state_change_qty
def async_handle_events(
self, chain_state: ChainState, raiden_events: List[RaidenEvent]
) -> List[Greenlet]:
"""Spawn a new thread to handle a Raiden event.
This will spawn a new greenlet to handle each event, which is
important for two reasons:
- Blockchain transactions can be queued without interfering with each
other.
- The calling thread is free to do more work. This is specially
important for the AlarmTask thread, which will eventually cause the
node to send transactions when a given Block is reached (e.g.
registering a secret or settling a channel).
Important:
This is spawning a new greenlet for /each/ transaction. It's
therefore /required/ that there is *NO* order among these.
"""
typecheck(chain_state, ChainState)
event_wrapper = EventWrapper(raiden_events)
parsed_events = event_wrapper.wrap_events()
fast_events = list()
greenlets: List[Greenlet] = list()
# These events are slow to process, and they will add extra delay to the protocol messages.
# To avoid unnecessary delays and weird edge cases, every event that can lead to a blocking
# operation is handled in a separated thread.
#
# - ContractSend* events will send transactions that can take multiple minutes to be
# processed, since that will wait for the transaction to be mined and confirmed.
# - SecretSecretRequest events may take a long time if a resolver is used, which can be as
# high as the lock expiration (couple of minutes).
# - Payment related events may block on the PFS. (see `PFSFeedbackEventHandler`)
blocking_events = (
EventRouteFailed,
EventPaymentSentSuccess,
SendSecretRequest,
ContractSendEvent,
RequestMetadata,
)
for event in parsed_events:
if isinstance(event, blocking_events):
greenlets.append(
spawn_named(
"rs-handle_blocking_events", self._handle_events, chain_state, [event]
)
)
else:
fast_events.append(event)
if fast_events:
greenlets.append(
spawn_named("rs-handle_events", self._handle_events, chain_state, fast_events)
)
return greenlets
def _handle_events(self, chain_state: ChainState, raiden_events: List[RaidenEvent]) -> None:
try:
self.raiden_event_handler.on_raiden_events(
raiden=self, chain_state=chain_state, events=raiden_events
)
except RaidenRecoverableError as e:
log.info(str(e))
except InvalidDBData:
raise
except (RaidenUnrecoverableError, BrokenPreconditionError) as e:
log_unrecoverable = (
self.config.environment_type == Environment.PRODUCTION
and not self.config.unrecoverable_error_should_crash
)
if log_unrecoverable:
log.error(str(e))
else:
raise
def _best_effort_synchronize(self, latest_block: BlockData) -> SynchronizationState:
"""Called with the current latest block, tries to synchronize with the
*confirmed* head of the chain in a best effort manner, it is not
guaranteed to succeed in a single call since `latest_block` may become
pruned.
Note:
This should be called only once per block, otherwise there will be
duplicated `Block` state changes in the log.
"""
latest_block_number = latest_block["number"]
# Handle testing with private chains. The block number can be
# smaller than confirmation_blocks
current_confirmed_head = BlockNumber(
max(GENESIS_BLOCK_NUMBER, latest_block_number - self.confirmation_blocks)
)
return self._best_effort_synchronize_with_confirmed_head(
current_confirmed_head, self.config.blockchain.timeout_before_block_pruned
)
def _best_effort_synchronize_with_confirmed_head(
self, current_confirmed_head: BlockNumber, timeout: float
) -> SynchronizationState:
"""Tries to synchronize with the blockchain events up to
`current_confirmed_head`. This may stop before being fully synchronized
if the number of `current_confirmed_head` is close to be pruned.
Multiple queries may be necessary on restarts, because the node may
have been offline for an extend period of time. During normal
operation, this must not happen, because in this case the node may have
missed important events, like a channel close, while the transport
layer is running, this can lead to loss of funds.
It is very important for `current_confirmed_head` to be a confirmed
block that has not been pruned. Unconfirmed blocks are a problem
because of reorgs, since some operations performed based on the events
are irreversible, namely sending a balance proof after a channel
deposit, once a node accepts a deposit, these tokens can be used to do
mediated transfers, and if a reorg removes the deposit tokens could be
lost. Using older blocks are a problem because of data availability
problems, in some cases it is necessary to query the blockchain to
fetch data which is not available in an event, the original event block
is used, however that block may have been pruned if the synchronization
is considerably lagging behind (which happens after long restarts), so
a new block number is necessary to be used as a fallback, a `latest`
is not a valid option because of the reorgs.
This function takes care of fetching blocks in batches and confirming
their result. This is important to keep memory usage low and to speed
up restarts. Memory usage can get a hit if the node is asleep for a
long period of time, since all the missing confirmed blocks have to be
fetched before the node is in a working state. Restarts get a hit if
the node is closed while it was synchronizing, without regularly saving
that work, if the node is killed while synchronizing, it only gets
gradually slower.
"""
msg = (
f"The blockchain event handler has to be instantiated before the "
f"alarm task is started. node:{self!r}"
)
assert self.blockchain_events, msg
state_changes = []
raiden_events = []
guard = SyncTimeout(current_confirmed_head, timeout)
while guard.should_continue(self.blockchain_events.last_fetched_block):
poll_result = self.blockchain_events.fetch_logs_in_batch(current_confirmed_head)
if poll_result is None:
# No blocks could be fetched (due to timeout), retry
continue
assert self.wal, "raiden.wal not set"
with self.wal.process_state_change_atomically() as dispatcher:
for event in poll_result.events:
# Important: `blockchainevent_to_statechange` has to be called
# with the block of the current confirmed head! An unconfirmed
# block could lead to the wrong state being dispatched because
# of reorgs, and older blocks are not sufficient to fix
# problems with pruning, the `SyncTimeout` is used to ensure
# the `current_confirmed_head` stays valid.
maybe_state_change = blockchainevent_to_statechange(
raiden_config=self.config,
proxy_manager=self.proxy_manager,
raiden_storage=self.wal.storage, # FXIME: use more recent
chain_state=dispatcher.latest_state(),
event=event,
current_confirmed_head=current_confirmed_head,
)
if maybe_state_change is not None:
events = dispatcher.dispatch(maybe_state_change)
state_changes.append(maybe_state_change)
raiden_events.extend(events)
# On restarts the node has to pick up all events generated since the
# last run. To do this the node will set the filters' from_block to
# the value of the latest block number known to have *all* events
# processed.
#
# To guarantee the above the node must either:
#
# - Dispatch the state changes individually, leaving the Block
# state change last, so that it knows all the events for the
# given block have been processed. On restarts this can result in
# the same event being processed twice.
# - Dispatch all the smart contract events together with the Block
# state change in a single transaction, either all or nothing will
# be applied, and on a restart the node picks up from where it
# left.
#
# The approach used below is to dispatch the Block and the
# blockchain events in a single transaction. This is the preferred
# approach because it guarantees that no events will be missed and
# it fixes race conditions on the value of the block number value,
# that can lead to crashes.
#
# Example: The user creates a new channel with an initial deposit
# of X tokens. This is done with two operations, the first is to
# open the new channel, the second is to deposit the requested
# tokens in it. Once the node fetches the event for the new channel,
# it will immediately request the deposit, which leaves a window for
# a race condition. If the Block state change was not yet
# processed, the block hash used as the triggering block for the
# deposit will be off-by-one, and it will point to the block
# immediately before the channel existed. This breaks a proxy
# precondition which crashes the client.
block_state_change = Block(
block_number=poll_result.polled_block_number,
gas_limit=poll_result.polled_block_gas_limit,
block_hash=poll_result.polled_block_hash,
)
events = dispatcher.dispatch(block_state_change)
state_changes.append(block_state_change)
raiden_events.extend(events)
self._log_sync_progress(poll_result.polled_block_number, current_confirmed_head)
log.debug(
"State changes",
node=to_checksum_address(self.address),
state_changes=[
redact_secret(DictSerializer.serialize(state_change))
for state_change in state_changes
],
)
event_greenlets = self._trigger_state_change_effects(
new_state=views.state_from_raiden(self),
state_changes=state_changes,
events=raiden_events,
)
for greenlet in event_greenlets:
self.add_pending_greenlet(greenlet)
current_synched_block_number = self.get_block_number()
log.debug(
"Synchronized to a new confirmed block",
sync_elapsed=guard.time_elapsed(),
block_number=current_synched_block_number,
)
msg = "current_synched_block_number is larger than current_confirmed_head"
assert current_synched_block_number <= current_confirmed_head, msg
if current_synched_block_number < current_confirmed_head:
return SynchronizationState.PARTIALLY_SYNCED
return SynchronizationState.FULLY_SYNCED
def _initialize_transactions_queues(self, chain_state: ChainState) -> None:
"""Initialize the pending transaction queue from the previous run.
Note:
This will only send the transactions which don't have their
side-effects applied. Transactions which another node may have sent
already will be detected by the alarm task's first run and cleared
from the queue (e.g. A monitoring service update transfer).
"""
msg = (
f"Initializing the transaction queue requires the state to be restored. node:{self!r}"
)
assert self.wal, msg
msg = (
f"Initializing the transaction queue must be done after the "
f"blockchain has be synched. This removes invalidated transactions from "
f"the queue. node:{self!r}"
)
assert self.blockchain_events, msg
pending_transactions = cast(List[RaidenEvent], views.get_pending_transactions(chain_state))
log.debug(
"Initializing transaction queues",
num_pending_transactions=len(pending_transactions),
node=to_checksum_address(self.address),
)
transaction_greenlets = self.async_handle_events(
chain_state=chain_state, raiden_events=pending_transactions
)
for greeenlet in transaction_greenlets:
self.add_pending_greenlet(greeenlet)
def _initialize_payment_statuses(self, chain_state: ChainState) -> None:
"""Re-initialize targets_to_identifiers_to_statuses.
Restore the PaymentStatus for any pending payment. This is not tied to
a specific protocol message but to the lifecycle of a payment, i.e.
the status is re-created if a payment itself has not completed.
"""
with self.payment_identifier_lock:
secret_hashes = [
to_hex(secrethash)
for secrethash in chain_state.payment_mapping.secrethashes_to_task
]
log.debug(
"Initializing payment statuses",
secret_hashes=secret_hashes,
node=to_checksum_address(self.address),
)
for task in chain_state.payment_mapping.secrethashes_to_task.values():
if not isinstance(task, InitiatorTask):
continue
# Every transfer in the transfers_list must have the same target
# and payment_identifier, so using the first transfer is
# sufficient.
initiator = next(iter(task.manager_state.initiator_transfers.values()))
transfer = initiator.transfer
transfer_description = initiator.transfer_description
target = transfer.target
identifier = transfer.payment_identifier
balance_proof = transfer.balance_proof
self.targets_to_identifiers_to_statuses[target][identifier] = PaymentStatus(
payment_identifier=identifier,
amount=transfer_description.amount,
token_network_address=balance_proof.token_network_address,
payment_done=AsyncResult(),
lock_timeout=initiator.transfer_description.lock_timeout,
)
def _initialize_messages_queues(self, chain_state: ChainState) -> None:
"""Initialize all the message queues with the transport.
Note:
All messages from the state queues must be pushed to the transport
before it's started. This is necessary to avoid a race where the
transport processes network messages too quickly, queueing new
messages before any of the previous messages, resulting in new
messages being out-of-order.
The Alarm task must be started before this method is called,
otherwise queues for channel closed while the node was offline
won't be properly cleared. It is not bad but it is suboptimal.
"""
assert not self.transport, f"Transport is running. node:{self!r}"
msg = f"Node must be synchronized with the blockchain. node:{self!r}"
assert self.blockchain_events, msg
events_queues = views.get_all_messagequeues(chain_state)
log.debug(
"Initializing message queues",
queues_identifiers=list(events_queues.keys()),
node=to_checksum_address(self.address),
)
all_messages: List[MessagesQueue] = list()
for queue_identifier, event_queue in events_queues.items():
queue_messages: List[Tuple[Message, Optional[AddressMetadata]]] = list()
for event in event_queue:
message = message_from_sendevent(event)
self.sign(message)
# FIXME: this will load the recipient's metadata from the persisted
# state. If the recipient roamed during the offline time of our node,
# the message will never reach the recipient,
# especially since we don't have a WebRTC connection with the recipient at
# startup.
# Depending on the time our node is offline, roaming of the recipient
# can become more likely.
queue_messages.append((message, event.recipient_metadata))
all_messages.append(MessagesQueue(queue_identifier, queue_messages))
self.transport.send_async(all_messages)
def _initialize_monitoring_services_queue(self, chain_state: ChainState) -> None:
"""Send the monitoring requests for all current balance proofs.
Note:
The node must always send the *received* balance proof to the
monitoring service, *before* sending its own locked transfer
forward. If the monitoring service is updated after, then the
following can happen:
For a transfer A-B-C where this node is B
- B receives T1 from A and processes it
- B forwards its T2 to C
* B crashes (the monitoring service is not updated)
For the above scenario, the monitoring service would not have the
latest balance proof received by B from A available with the lock
for T1, but C would. If the channel B-C is closed and B does not
come back online in time, the funds for the lock L1 can be lost.
During restarts the rationale from above has to be replicated.
Because the initialization code *is not* the same as the event
handler. This means the balance proof updates must be done prior to
the processing of the message queues.
"""
msg = (
f"Transport was started before the monitoring service queue was updated. "
f"This can lead to safety issue. node:{self!r}"
)
assert not self.transport, msg
msg = f"The node state was not yet recovered, cant read balance proofs. node:{self!r}"
assert self.wal, msg
# Fetch all balance proofs from the chain_state
current_balance_proofs: List[BalanceProofSignedState] = []
for tn_registry in chain_state.identifiers_to_tokennetworkregistries.values():
for tn in tn_registry.tokennetworkaddresses_to_tokennetworks.values():
for channel in tn.channelidentifiers_to_channels.values():
balance_proof = channel.partner_state.balance_proof
if not balance_proof:
continue
assert isinstance(balance_proof, BalanceProofSignedState), MYPY_ANNOTATION
current_balance_proofs.append(balance_proof)
log.debug(
"Initializing monitoring services",
num_of_balance_proofs=len(current_balance_proofs),
node=to_checksum_address(self.address),
)
for balance_proof in current_balance_proofs:
update_monitoring_service_from_balance_proof(
self,
chain_state=chain_state,
new_balance_proof=balance_proof,
non_closing_participant=self.address,
)
def _initialize_channel_fees(self) -> None:
"""Initializes the fees of all open channels to the latest set values.
This includes a recalculation of the dynamic rebalancing fees.
"""
chain_state = views.state_from_raiden(self)
fee_config = self.config.mediation_fees
token_addresses = views.get_token_identifiers(
chain_state=chain_state, token_network_registry_address=self.default_registry.address
)
for token_address in token_addresses:
channels = views.get_channelstate_open(
chain_state=chain_state,
token_network_registry_address=self.default_registry.address,
token_address=token_address,
)
for channel in channels:
# get the flat fee for this network if set, otherwise the default
flat_fee = fee_config.get_flat_fee(channel.token_address)
proportional_fee = fee_config.get_proportional_fee(channel.token_address)
proportional_imbalance_fee = fee_config.get_proportional_imbalance_fee(
channel.token_address
)
log.info(
"Updating channel fees",
channel=channel.canonical_identifier,
cap_mediation_fees=fee_config.cap_meditation_fees,
flat_fee=flat_fee,
proportional_fee=proportional_fee,
proportional_imbalance_fee=proportional_imbalance_fee,
)
imbalance_penalty = calculate_imbalance_fees(
channel_capacity=get_capacity(channel),
proportional_imbalance_fee=proportional_imbalance_fee,
)
channel.fee_schedule = FeeScheduleState(
cap_fees=fee_config.cap_meditation_fees,
flat=flat_fee,
proportional=proportional_fee,
imbalance_penalty=imbalance_penalty,
)
send_pfs_update(
raiden=self,
canonical_identifier=channel.canonical_identifier,
update_fee_schedule=True,
)
def sign(self, message: Message) -> None:
"""Sign message inplace."""
if not isinstance(message, SignedMessage):
raise ValueError("{} is not signable.".format(repr(message)))
message.sign(self.signer)
def mediated_transfer_async(
self,
token_network_address: TokenNetworkAddress,
amount: PaymentAmount,
target: TargetAddress,
identifier: PaymentID,
secret: Secret = None,
secrethash: SecretHash = None,
lock_timeout: BlockTimeout = None,
route_states: List[RouteState] = None,
) -> PaymentStatus:
"""Transfer `amount` between this node and `target`.
This method will start an asynchronous transfer, the transfer might fail
or succeed depending on a couple of factors:
- Existence of a path that can be used, through the usage of direct
or intermediary channels.
- Network speed, making the transfer sufficiently fast so it doesn't
expire.
"""
if secret is None:
if secrethash is None:
secret = random_secret()
else:
secret = ABSENT_SECRET
if secrethash is None:
secrethash = sha256_secrethash(secret)
elif secret != ABSENT_SECRET:
if secrethash != sha256_secrethash(secret):
raise InvalidSecretHash("provided secret and secret_hash do not match.")
if len(secret) != SECRET_LENGTH:
raise InvalidSecret("secret of invalid length.")
log.debug(
"Mediated transfer",
node=to_checksum_address(self.address),
target=to_checksum_address(target),
amount=amount,
identifier=identifier,
token_network_address=to_checksum_address(token_network_address),
)
# We must check if the secret was registered against the latest block,
# even if the block is forked away and the transaction that registers
# the secret is removed from the blockchain. The rationale here is that
# someone else does know the secret, regardless of the chain state, so
# the node must not use it to start a payment.
#
# For this particular case, it's preferable to use `latest` instead of
# having a specific block_hash, because it's preferable to know if the secret
# was ever known, rather than having a consistent view of the blockchain.
secret_registered = self.default_secret_registry.is_secret_registered(
secrethash=secrethash, block_identifier=BLOCK_ID_LATEST
)
if secret_registered:
raise RaidenUnrecoverableError(
f"Attempted to initiate a locked transfer with secrethash {to_hex(secrethash)}."
f" That secret is already registered onchain."
)
# Checks if there is a payment in flight with the same payment_id and
# target. If there is such a payment and the details match, instead of
# starting a new payment this will give the caller the existing
# details. This prevents Raiden from having concurrently identical
# payments, which would likely mean paying more than once for the same
# thing.
with self.payment_identifier_lock:
payment_status = self.targets_to_identifiers_to_statuses[target].get(identifier)
if payment_status:
payment_status_matches = payment_status.matches(token_network_address, amount)
if not payment_status_matches:
raise PaymentConflict("Another payment with the same id is in flight")
return payment_status
payment_status = PaymentStatus(
payment_identifier=identifier,
amount=amount,
token_network_address=token_network_address,
payment_done=AsyncResult(),
lock_timeout=lock_timeout,
)
self.targets_to_identifiers_to_statuses[target][identifier] = payment_status
error_msg, init_initiator_statechange = initiator_init(
raiden=self,
transfer_identifier=identifier,
transfer_amount=amount,
transfer_secret=secret,
transfer_secrethash=secrethash,
token_network_address=token_network_address,
target_address=target,
lock_timeout=lock_timeout,
route_states=route_states,
)
# FIXME: Dispatch the state change even if there are no routes to
# create the WAL entry.
if error_msg is None:
self.handle_and_track_state_changes([init_initiator_statechange])
else:
failed = EventPaymentSentFailed(
token_network_registry_address=self.default_registry.address,
token_network_address=token_network_address,
identifier=identifier,
target=target,
reason=error_msg,
)
payment_status.payment_done.set(failed)
return payment_status
def withdraw(
self,
canonical_identifier: CanonicalIdentifier,
total_withdraw: WithdrawAmount,
recipient_metadata: AddressMetadata = None,
) -> None:
init_withdraw = ActionChannelWithdraw(
canonical_identifier=canonical_identifier,
total_withdraw=total_withdraw,
recipient_metadata=recipient_metadata,
)
self.handle_and_track_state_changes([init_withdraw])
def set_channel_reveal_timeout(
self, canonical_identifier: CanonicalIdentifier, reveal_timeout: BlockTimeout
) -> None:
action_set_channel_reveal_timeout = ActionChannelSetRevealTimeout(
canonical_identifier=canonical_identifier, reveal_timeout=reveal_timeout
)
self.handle_and_track_state_changes([action_set_channel_reveal_timeout])
def maybe_upgrade_db(self) -> None:
manager = UpgradeManager(
db_filename=self.config.database_path, raiden=self, web3=self.rpc_client.web3
)
manager.run()
| mit |
Cojacfar/Maker | comm/lib/python2.7/site-packages/django/contrib/gis/tests/relatedapp/tests.py | 58 | 14918 | from __future__ import absolute_import
from django.contrib.gis.geos import HAS_GEOS
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB, mysql, oracle, no_mysql, no_oracle, no_spatialite
from django.test import TestCase
from django.utils.unittest import skipUnless
if HAS_GEOS:
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint
from .models import City, Location, DirectoryEntry, Parcel, Book, Author, Article
@skipUnless(HAS_GEOS and HAS_SPATIAL_DB, "Geos and spatial db are required.")
class RelatedGeoModelTest(TestCase):
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.all()
qs2 = City.objects.select_related()
qs3 = City.objects.select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@no_mysql
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@no_mysql
@no_spatialite
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(state='NM').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol)
@no_mysql
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# Creating the reference union geometry depending on the spatial backend,
# as Oracle will have a different internal ordering of the component
# geometries than PostGIS. The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
if oracle:
ref_u1 = MultiPoint(p4, p5, p3, p1, p2, srid=4326)
ref_u2 = MultiPoint(p3, p2, srid=4326)
else:
# Looks like PostGIS points by longitude value.
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth')).unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(ref_u1, u1)
self.assertEqual(ref_u2, u2)
self.assertEqual(ref_u1, u3)
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
l = list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
p1 = Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
p2 = Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if not mysql:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if not mysql:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertTrue(isinstance(d['point'], Geometry))
self.assertTrue(isinstance(t[1], Geometry))
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.assertTrue('Aurora' in names)
self.assertTrue('Kecksburg' in names)
def test11_geoquery_pickle(self):
"Ensuring GeoQuery objects are unpickled correctly. See #10839."
import pickle
from django.contrib.gis.db.models.sql import GeoQuery
qs = City.objects.all()
q_str = pickle.dumps(qs.query)
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a `GeoValuesQuerySet`, see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
def test13c_count(self):
"Testing `Count` aggregate with `.values()`. See #15305."
qs = Location.objects.filter(id=5).annotate(num_cities=Count('city')).values('id', 'point', 'num_cities')
self.assertEqual(1, len(qs))
self.assertEqual(2, qs[0]['num_cities'])
self.assertTrue(isinstance(qs[0]['point'], GEOSGeometry))
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
no_author = Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@no_mysql
@no_oracle
@no_spatialite
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)')
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertEqual(ref_geom, coll)
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
sql = str(qs.query)
def test16_annotated_date_queryset(self):
"Ensure annotated date querysets work if spatial backend is used. See #14648."
birth_years = [dt.year for dt in
list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))]
birth_years.sort()
self.assertEqual([1950, 1974], birth_years)
# TODO: Related tests for KML, GML, and distance lookups.
| gpl-2.0 |
azov3902/linguinesim | linguineglobals.py | 1 | 2551 | ################################################################################
#
# File: linguineglobals.py
# Author: Anna Zovaro
# Email: [email protected]
#
# Description:
# Some useful constants.
#
# Copyright (C) 2016 Anna Zovaro
#
################################################################################
#
# This file is part of linguinesim.
#
# linguinesim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# linguinesim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with linguinesim. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
from __future__ import division, print_function
import astropy.constants
################################################################################
# Vega band magnitudes calculated using data from
# https://www.astro.umd.edu/~ssm/ASTR620/mags.html
# See also http://www.astronomy.ohio-state.edu/~martini/usefuldata.html
VEGA_MAGNITUDE_ZEROPOINT = {
'J' : 49.46953099,
'H' : 49.95637318,
'K' : 50.47441871
}
AB_MAGNITUDE_ZEROPOINT = 48.6
FILTER_BANDS_M = {
# [centre wavelength_m, width, min, max]
# Bands U through I taken from https://en.wikipedia.org/wiki/Photometric_system.
'U' : [365e-9, 66e-9, 0, 0],
'B' : [445e-9, 94e-9, 0, 0],
'V' : [551e-9, 88e-9, 0, 0],
'R' : [658e-9, 138e-9, 0, 0],
'I' : [806e-9, 149e-9, 0, 0],
'J' : [1.250e-6, 0.160e-6, 0, 0], # GMTIFS
'H' : [1.635e-6, 0.290e-6, 0, 0], # GMTIFS
'K' : [2.200e-6, 0.340e-6, 0, 0] # GMTIFS
}
# Calculating filter endpoints
for key in FILTER_BANDS_M:
FILTER_BANDS_M[key][2] = FILTER_BANDS_M[key][0] - 0.5 * FILTER_BANDS_M[key][1]
FILTER_BANDS_M[key][3] = FILTER_BANDS_M[key][0] + 0.5 * FILTER_BANDS_M[key][1]
AL_REFLECTIVITY = 0.909
AL_EMISSIVITY = 1 - AL_REFLECTIVITY
# TENTH_AIRY_RING = 10.25 # multiples of N_os corresponding to the 10th Airy ring in a diffraction-limited PSF
# Solar properties
T_SUN_K = 5777 # Temperature (K)
R_SUN_M = astropy.constants.R_sun.value # Radius (m)
DIST_SUN_M = astropy.constants.au.value # 1 AU (Distance from Earth's centre (m)) | gpl-3.0 |
fraxen/nordpil_arcpy | python/extractwoa.py | 1 | 2118 | ο»Ώimport sys,os
# {{{ HANDLE PARAMETERS
try:
if len(sys.argv) < 2: raise
inFile = sys.argv[1]
if (len(sys.argv) == 2):
outFile = 'outgrid'
else:
outFile = sys.argv[2]
if (not os.path.exists(inFile)): raise
if (os.path.exists(outFile)): raise
except:
print "\nPlease verify the input arguments:\n" + sys.argv[0] + " <input ascii-file> <output ascii grid>\n\n"
sys.exit()
if (outFile[(len(outFile)-4):len(outFile)] <> ".asc"): outFile = outFile + '.asc'
print "\nParsing file:\n....." + inFile + "\nand creating ArcInfo raster ascii grid\n....." + outFile + "\n"
# }}}
# Need to revise the script to extract other depth horizons, or for other cell sizes...
gridCols = 360
gridRows = 180
inLevels = 33
inputFile = open(inFile, "r")
outputFile = open(outFile, "w")
# {{{ READING
print 'Reading all values'
thisData = inputFile.readline()
allData ={}
for row in range(1, gridRows+1):
allData[row] = []
for col in range(0, gridCols+1):
allData[row].append(-9999)
row = 181
col = 180
# }}}
# {{{ PARSING
while not (row == 1 and col == 180):
for rowVal in range(0,10):
col = col + 1
if (col == 361): col = 1
if (col == 181): row = row-1
thisVal = float(thisData[8*rowVal:7+8*rowVal].strip())
if (thisVal == -99.999):
thisVal = -9999
else:
thisVal = int(round(thisVal * 100,0))
#thisVal = round(thisVal,3)
allData[row][col-1] = thisVal
#print str(row) + ' - ' + str(col) + ' - ' + str(thisVal)
if (thisVal == 20.307): exit()
thisData = inputFile.readline()
print 'Parsed values'
# }}}
# {{{ WRITING
# {{{ HEADER
outputFile.write('NCOLS ' + str(gridCols) + '\n')
outputFile.write('NROWS ' + str(gridRows) + '\n')
outputFile.write('XLLCORNER -180\n')
outputFile.write('YLLCORNER -90\n')
outputFile.write('CELLSIZE ' + str(gridCols/360) + '\n')
outputFile.write('NODATA_VALUE -9999\n')
print 'Header written'
# }}}
for row in range(1,gridRows+1):
outQueue = ''
for col in range(1,gridCols+1):
outQueue = outQueue + str(allData[row][col-1]) + ' '
outputFile.write(outQueue + '\n')
print 'Values written'
# }}}
print 'Script finished'
inputFile.close()
outputFile.close()
| apache-2.0 |
girving/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/batch_reshape_test.py | 25 | 21843 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for BatchReshape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import batch_reshape as batch_reshape_lib
from tensorflow.contrib.distributions.python.ops import mvn_diag as mvn_lib
from tensorflow.contrib.distributions.python.ops import poisson as poisson_lib
from tensorflow.contrib.distributions.python.ops import wishart as wishart_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.platform import test
class _BatchReshapeTest(object):
def make_wishart(self, dims, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = self.dtype([
[[1., 0.5],
[0.5, 1.]],
[[0.5, 0.25],
[0.25, 0.75]],
])
scale = np.reshape(np.concatenate([scale, scale], axis=0),
old_batch_shape + [dims, dims])
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
wishart = wishart_lib.WishartFull(df=5, scale=scale_ph)
reshape_wishart = batch_reshape_lib.BatchReshape(
distribution=wishart,
batch_shape=new_batch_shape_ph,
validate_args=True)
return wishart, reshape_wishart
def test_matrix_variate_sample_and_log_prob(self):
dims = 2
new_batch_shape = [4]
old_batch_shape = [2, 2]
wishart, reshape_wishart = self.make_wishart(
dims, new_batch_shape, old_batch_shape)
batch_shape = reshape_wishart.batch_shape_tensor()
event_shape = reshape_wishart.event_shape_tensor()
expected_sample_shape = [3, 1] + new_batch_shape + [dims, dims]
x = wishart.sample([3, 1], seed=42)
expected_sample = array_ops.reshape(x, expected_sample_shape)
actual_sample = reshape_wishart.sample([3, 1], seed=42)
expected_log_prob_shape = [3, 1] + new_batch_shape
expected_log_prob = array_ops.reshape(
wishart.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_wishart.log_prob(expected_sample)
with self.cached_session() as sess:
[
batch_shape_,
event_shape_,
expected_sample_, actual_sample_,
expected_log_prob_, actual_log_prob_,
] = sess.run([
batch_shape,
event_shape,
expected_sample, actual_sample,
expected_log_prob, actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([dims, dims], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_wishart.batch_shape)
self.assertAllEqual([dims, dims], reshape_wishart.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_matrix_variate_stats(self):
dims = 2
new_batch_shape = [4]
old_batch_shape = [2, 2]
wishart, reshape_wishart = self.make_wishart(
dims, new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_matrix_stat_shape = new_batch_shape + [dims, dims]
expected_entropy = array_ops.reshape(
wishart.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_wishart.entropy()
expected_mean = array_ops.reshape(
wishart.mean(), expected_matrix_stat_shape)
actual_mean = reshape_wishart.mean()
expected_mode = array_ops.reshape(
wishart.mode(), expected_matrix_stat_shape)
actual_mode = reshape_wishart.mode()
expected_stddev = array_ops.reshape(
wishart.stddev(), expected_matrix_stat_shape)
actual_stddev = reshape_wishart.stddev()
expected_variance = array_ops.reshape(
wishart.variance(), expected_matrix_stat_shape)
actual_variance = reshape_wishart.variance()
with self.cached_session() as sess:
[
expected_entropy_, actual_entropy_,
expected_mean_, actual_mean_,
expected_mode_, actual_mode_,
expected_stddev_, actual_stddev_,
expected_variance_, actual_variance_,
] = sess.run([
expected_entropy, actual_entropy,
expected_mean, actual_mean,
expected_mode, actual_mode,
expected_stddev, actual_stddev,
expected_variance, actual_variance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_variance.shape)
def make_normal(self, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = self.dtype(0.5 + np.arange(
np.prod(old_batch_shape)).reshape(old_batch_shape))
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
normal = normal_lib.Normal(loc=self.dtype(0), scale=scale_ph)
reshape_normal = batch_reshape_lib.BatchReshape(
distribution=normal,
batch_shape=new_batch_shape_ph,
validate_args=True)
return normal, reshape_normal
def test_scalar_variate_sample_and_log_prob(self):
new_batch_shape = [2, 2]
old_batch_shape = [4]
normal, reshape_normal = self.make_normal(
new_batch_shape, old_batch_shape)
batch_shape = reshape_normal.batch_shape_tensor()
event_shape = reshape_normal.event_shape_tensor()
expected_sample_shape = new_batch_shape
x = normal.sample(seed=52)
expected_sample = array_ops.reshape(x, expected_sample_shape)
actual_sample = reshape_normal.sample(seed=52)
expected_log_prob_shape = new_batch_shape
expected_log_prob = array_ops.reshape(
normal.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_normal.log_prob(expected_sample)
with self.cached_session() as sess:
[
batch_shape_,
event_shape_,
expected_sample_, actual_sample_,
expected_log_prob_, actual_log_prob_,
] = sess.run([
batch_shape,
event_shape,
expected_sample, actual_sample,
expected_log_prob, actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_normal.batch_shape)
self.assertAllEqual([], reshape_normal.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_scalar_variate_stats(self):
new_batch_shape = [2, 2]
old_batch_shape = [4]
normal, reshape_normal = self.make_normal(new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_entropy = array_ops.reshape(
normal.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_normal.entropy()
expected_mean = array_ops.reshape(
normal.mean(), expected_scalar_stat_shape)
actual_mean = reshape_normal.mean()
expected_mode = array_ops.reshape(
normal.mode(), expected_scalar_stat_shape)
actual_mode = reshape_normal.mode()
expected_stddev = array_ops.reshape(
normal.stddev(), expected_scalar_stat_shape)
actual_stddev = reshape_normal.stddev()
expected_variance = array_ops.reshape(
normal.variance(), expected_scalar_stat_shape)
actual_variance = reshape_normal.variance()
with self.cached_session() as sess:
[
expected_entropy_, actual_entropy_,
expected_mean_, actual_mean_,
expected_mode_, actual_mode_,
expected_stddev_, actual_stddev_,
expected_variance_, actual_variance_,
] = sess.run([
expected_entropy, actual_entropy,
expected_mean, actual_mean,
expected_mode, actual_mode,
expected_stddev, actual_stddev,
expected_variance, actual_variance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_scalar_stat_shape, actual_variance.shape)
def make_mvn(self, dims, new_batch_shape, old_batch_shape):
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)
reshape_mvn = batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
return mvn, reshape_mvn
def test_vector_variate_sample_and_log_prob(self):
dims = 3
new_batch_shape = [2, 1]
old_batch_shape = [2]
mvn, reshape_mvn = self.make_mvn(
dims, new_batch_shape, old_batch_shape)
batch_shape = reshape_mvn.batch_shape_tensor()
event_shape = reshape_mvn.event_shape_tensor()
expected_sample_shape = [3] + new_batch_shape + [dims]
x = mvn.sample(3, seed=62)
expected_sample = array_ops.reshape(x, expected_sample_shape)
actual_sample = reshape_mvn.sample(3, seed=62)
expected_log_prob_shape = [3] + new_batch_shape
expected_log_prob = array_ops.reshape(
mvn.log_prob(x), expected_log_prob_shape)
actual_log_prob = reshape_mvn.log_prob(expected_sample)
with self.cached_session() as sess:
[
batch_shape_,
event_shape_,
expected_sample_, actual_sample_,
expected_log_prob_, actual_log_prob_,
] = sess.run([
batch_shape,
event_shape,
expected_sample, actual_sample,
expected_log_prob, actual_log_prob,
])
self.assertAllEqual(new_batch_shape, batch_shape_)
self.assertAllEqual([dims], event_shape_)
self.assertAllClose(expected_sample_, actual_sample_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_log_prob_, actual_log_prob_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(new_batch_shape, reshape_mvn.batch_shape)
self.assertAllEqual([dims], reshape_mvn.event_shape)
self.assertAllEqual(expected_sample_shape, actual_sample.shape)
self.assertAllEqual(expected_log_prob_shape, actual_log_prob.shape)
def test_vector_variate_stats(self):
dims = 3
new_batch_shape = [2, 1]
old_batch_shape = [2]
mvn, reshape_mvn = self.make_mvn(
dims, new_batch_shape, old_batch_shape)
expected_scalar_stat_shape = new_batch_shape
expected_entropy = array_ops.reshape(
mvn.entropy(), expected_scalar_stat_shape)
actual_entropy = reshape_mvn.entropy()
expected_vector_stat_shape = new_batch_shape + [dims]
expected_mean = array_ops.reshape(
mvn.mean(), expected_vector_stat_shape)
actual_mean = reshape_mvn.mean()
expected_mode = array_ops.reshape(
mvn.mode(), expected_vector_stat_shape)
actual_mode = reshape_mvn.mode()
expected_stddev = array_ops.reshape(
mvn.stddev(), expected_vector_stat_shape)
actual_stddev = reshape_mvn.stddev()
expected_variance = array_ops.reshape(
mvn.variance(), expected_vector_stat_shape)
actual_variance = reshape_mvn.variance()
expected_matrix_stat_shape = new_batch_shape + [dims, dims]
expected_covariance = array_ops.reshape(
mvn.covariance(), expected_matrix_stat_shape)
actual_covariance = reshape_mvn.covariance()
with self.cached_session() as sess:
[
expected_entropy_, actual_entropy_,
expected_mean_, actual_mean_,
expected_mode_, actual_mode_,
expected_stddev_, actual_stddev_,
expected_variance_, actual_variance_,
expected_covariance_, actual_covariance_,
] = sess.run([
expected_entropy, actual_entropy,
expected_mean, actual_mean,
expected_mode, actual_mode,
expected_stddev, actual_stddev,
expected_variance, actual_variance,
expected_covariance, actual_covariance,
])
self.assertAllClose(expected_entropy_, actual_entropy_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mean_, actual_mean_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_mode_, actual_mode_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_stddev_, actual_stddev_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_variance_, actual_variance_,
atol=0., rtol=1e-6)
self.assertAllClose(expected_covariance_, actual_covariance_,
atol=0., rtol=1e-6)
if not self.is_static_shape:
return
self.assertAllEqual(expected_scalar_stat_shape, actual_entropy.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_mean.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_mode.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_stddev.shape)
self.assertAllEqual(expected_vector_stat_shape, actual_variance.shape)
self.assertAllEqual(expected_matrix_stat_shape, actual_covariance.shape)
def test_bad_reshape_size(self):
dims = 2
new_batch_shape = [2, 3]
old_batch_shape = [2] # 2 != 2*3
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape:
with self.assertRaisesRegexp(
ValueError, (r"`batch_shape` size \(6\) must match "
r"`distribution\.batch_shape` size \(2\)")):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.cached_session():
with self.assertRaisesOpError(r"Shape sizes do not match."):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample().eval()
def test_non_positive_shape(self):
dims = 2
old_batch_shape = [4]
if self.is_static_shape:
# Unknown first dimension does not trigger size check. Note that
# any dimension < 0 is treated statically as unknown.
new_batch_shape = [-1, 0]
else:
new_batch_shape = [-2, -2] # -2 * -2 = 4, same size as the old shape.
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape:
with self.assertRaisesRegexp(ValueError, r".*must be >=-1.*"):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.cached_session():
with self.assertRaisesOpError(r".*must be >=-1.*"):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample().eval()
def test_non_vector_shape(self):
dims = 2
new_batch_shape = 2
old_batch_shape = [2]
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
scale = np.ones(old_batch_shape + [dims], self.dtype)
scale_ph = array_ops.placeholder_with_default(
scale, shape=scale.shape if self.is_static_shape else None)
mvn = mvn_lib.MultivariateNormalDiag(scale_diag=scale_ph)
if self.is_static_shape:
with self.assertRaisesRegexp(ValueError, r".*must be a vector.*"):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True)
else:
with self.cached_session():
with self.assertRaisesOpError(r".*must be a vector.*"):
batch_reshape_lib.BatchReshape(
distribution=mvn,
batch_shape=new_batch_shape_ph,
validate_args=True).sample().eval()
def test_broadcasting_explicitly_unsupported(self):
old_batch_shape = [4]
new_batch_shape = [1, 4, 1]
rate_ = self.dtype([1, 10, 2, 20])
rate = array_ops.placeholder_with_default(
rate_,
shape=old_batch_shape if self.is_static_shape else None)
poisson_4 = poisson_lib.Poisson(rate)
new_batch_shape_ph = (
constant_op.constant(np.int32(new_batch_shape)) if self.is_static_shape
else array_ops.placeholder_with_default(
np.int32(new_batch_shape), shape=None))
poisson_141_reshaped = batch_reshape_lib.BatchReshape(
poisson_4, new_batch_shape_ph, validate_args=True)
x_4 = self.dtype([2, 12, 3, 23])
x_114 = self.dtype([2, 12, 3, 23]).reshape(1, 1, 4)
if self.is_static_shape:
with self.assertRaisesRegexp(NotImplementedError,
"too few batch and event dims"):
poisson_141_reshaped.log_prob(x_4)
with self.assertRaisesRegexp(NotImplementedError,
"unexpected batch and event shape"):
poisson_141_reshaped.log_prob(x_114)
return
with self.assertRaisesOpError("too few batch and event dims"):
with self.cached_session():
poisson_141_reshaped.log_prob(x_4).eval()
with self.assertRaisesOpError("unexpected batch and event shape"):
with self.cached_session():
poisson_141_reshaped.log_prob(x_114).eval()
class BatchReshapeStaticTest(_BatchReshapeTest, test.TestCase):
dtype = np.float32
is_static_shape = True
class BatchReshapeDynamicTest(_BatchReshapeTest, test.TestCase):
dtype = np.float64
is_static_shape = False
if __name__ == "__main__":
test.main()
| apache-2.0 |
iandd0824/ri-app | web/django_mongoengine/mongo_auth/models.py | 1 | 14304 | from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib.auth.models import _user_has_perm, _user_get_all_permissions, _user_has_module_perms
from django.db import models
from django.contrib.contenttypes.models import ContentTypeManager
from django.contrib import auth
from bson.objectid import ObjectId
from mongoengine import ImproperlyConfigured
from django_mongoengine import document
from django_mongoengine import fields
from .managers import MongoUserManager
try:
from django.contrib.auth.hashers import check_password, make_password
except ImportError:
"""Handle older versions of Django"""
from django.utils.hashcompat import md5_constructor, sha_constructor
def get_hexdigest(algorithm, salt, raw_password):
raw_password, salt = smart_str(raw_password), smart_str(salt)
if algorithm == 'md5':
return md5_constructor(salt + raw_password).hexdigest()
elif algorithm == 'sha1':
return sha_constructor(salt + raw_password).hexdigest()
raise ValueError('Got unknown password algorithm type in password')
def check_password(raw_password, password):
algo, salt, hash = password.split('$')
return hash == get_hexdigest(algo, salt, raw_password)
def make_password(raw_password):
from random import random
algo = 'sha1'
salt = get_hexdigest(algo, str(random()), str(random()))[:5]
hash = get_hexdigest(algo, salt, raw_password)
return '%s$%s$%s' % (algo, salt, hash)
class ContentType(document.Document):
name = fields.StringField(max_length=100)
app_label = fields.StringField(max_length=100)
model = fields.StringField(max_length=100, verbose_name=_('python model class name'),
unique_with='app_label')
objects = ContentTypeManager()
class Meta:
verbose_name = _('content type')
verbose_name_plural = _('content types')
# db_table = 'django_content_type'
# ordering = ('name',)
# unique_together = (('app_label', 'model'),)
def __unicode__(self):
return self.name
def model_class(self):
"Returns the Python model class for this type of content."
from django.db import models
return models.get_model(self.app_label, self.model)
def get_object_for_this_type(self, **kwargs):
"""
Returns an object of this type for the keyword arguments given.
Basically, this is a proxy around this object_type's get_object() model
method. The ObjectNotExist exception, if thrown, will not be caught,
so code that calls this method should catch it.
"""
return self.model_class()._default_manager.using(self._state.db).get(**kwargs)
def natural_key(self):
return (self.app_label, self.model)
class SiteProfileNotAvailable(Exception):
pass
class PermissionManager(models.Manager):
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.get_by_natural_key(app_label, model)
)
class Permission(document.Document):
"""The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add"
form and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have
a certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically
created for each Django model.
"""
name = fields.StringField(max_length=50, verbose_name=_('username'))
content_type = fields.ReferenceField(ContentType)
codename = fields.StringField(max_length=100, verbose_name=_('codename'))
# FIXME: don't access field of the other class
# unique_with=['content_type__app_label', 'content_type__model'])
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
# unique_together = (('content_type', 'codename'),)
# ordering = ('content_type__app_label', 'content_type__model', 'codename')
def __unicode__(self):
return u"%s | %s | %s" % (
unicode(self.content_type.app_label),
unicode(self.content_type),
unicode(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class Group(document.Document):
"""Groups are a generic way of categorizing users to apply permissions,
or some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group Site editors has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only
e-mail messages.
"""
name = fields.StringField(max_length=80, unique=True, verbose_name=_('name'))
permissions = fields.ListField(fields.ReferenceField(Permission, verbose_name=_('permissions'), required=False))
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __unicode__(self):
return self.name
class User(document.Document):
"""A User document that aims to mirror most of the API specified by Django
at http://docs.djangoproject.com/en/dev/topics/auth/#users
"""
username = fields.StringField(
max_length=254, verbose_name=_('username'),
help_text=_("Required. 254 characters or fewer. Letters, numbers and @/./+/-/_ characters"),
)
first_name = fields.StringField(
max_length=30, blank=True, verbose_name=_('first name'),
)
last_name = fields.StringField(
max_length=30, blank=True, verbose_name=_('last name'))
email = fields.EmailField(verbose_name=_('e-mail address'), blank=True)
password = fields.StringField(
max_length=128,
verbose_name=_('password'),
help_text=_("Use '[algo]$[iterations]$[salt]$[hexdigest]' or use the <a href=\"password/\">change password form</a>."))
is_staff = fields.BooleanField(
default=False,
verbose_name=_('staff status'),
help_text=_("Designates whether the user can log into this admin site."))
is_active = fields.BooleanField(
default=True,
verbose_name=_('active'),
help_text=_("Designates whether this user should be treated as active. Unselect this instead of deleting accounts."))
is_superuser = fields.BooleanField(
default=False,
verbose_name=_('superuser status'),
help_text=_("Designates that this user has all permissions without explicitly assigning them."))
last_login = fields.DateTimeField(
default=timezone.now,
verbose_name=_('last login'))
date_joined = fields.DateTimeField(
default=timezone.now,
verbose_name=_('date joined'))
user_permissions = fields.ListField(
fields.ReferenceField(Permission), verbose_name=_('user permissions'),
blank=True, help_text=_('Permissions for the user.'))
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
meta = {
'allow_inheritance': True,
'indexes': [
{'fields': ['username'], 'unique': True, 'sparse': True}
]
}
def __unicode__(self):
return self.username
def get_full_name(self):
"""Returns the users first and last names, separated by a space.
"""
full_name = u'%s %s' % (self.first_name or '', self.last_name or '')
return full_name.strip()
def is_anonymous(self):
return False
def is_authenticated(self):
return True
def set_password(self, raw_password):
"""Sets the user's password - always use this rather than directly
assigning to :attr:`~mongoengine.django.auth.User.password` as the
password is hashed before storage.
"""
self.password = make_password(raw_password)
self.save()
return self
def check_password(self, raw_password):
"""Checks the user's password against a provided password - always use
this rather than directly comparing to
:attr:`~mongoengine.django.auth.User.password` as the password is
hashed before storage.
"""
return check_password(raw_password, self.password)
@classmethod
def create_user(cls, username, password, email=None):
"""Create (and save) a new user with the given username, password and
email address.
"""
now = timezone.now()
# Normalize the address by lowercasing the domain part of the email
# address.
if email is not None:
try:
email_name, domain_part = email.strip().split('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
user = cls(username=username, email=email, date_joined=now)
user.set_password(password)
user.save()
return user
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through his/her
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self, obj))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
def email_user(self, subject, message, from_email=None):
"Sends an e-mail to this User."
from django.core.mail import send_mail
send_mail(subject, message, from_email, [self.email])
def get_profile(self):
"""
Returns site-specific profile for this user. Raises
SiteProfileNotAvailable if this site does not allow profiles.
"""
if not hasattr(self, '_profile_cache'):
from django.conf import settings
if not getattr(settings, 'AUTH_PROFILE_MODULE', False):
raise SiteProfileNotAvailable('You need to set AUTH_PROFILE_MO'
'DULE in your project settings')
try:
app_label, model_name = settings.AUTH_PROFILE_MODULE.split('.')
except ValueError:
raise SiteProfileNotAvailable('app_label and model_name should'
' be separated by a dot in the AUTH_PROFILE_MODULE set'
'ting')
try:
model = models.get_model(app_label, model_name)
if model is None:
raise SiteProfileNotAvailable('Unable to load the profile '
'model, check AUTH_PROFILE_MODULE in your project sett'
'ings')
self._profile_cache = model._default_manager.using(self._state.db).get(user__id__exact=self.id)
self._profile_cache.user = self
except (ImportError, ImproperlyConfigured):
raise SiteProfileNotAvailable
return self._profile_cache
class MongoUser(models.Model):
""""Dummy user model for Django.
MongoUser is used to replace Django's UserManager with MongoUserManager.
The actual user document class is django_mongoengine.auth.models.User or any
other document class specified in MONGOENGINE_USER_DOCUMENT.
To get the user document class, use `get_user_document()`.
"""
objects = MongoUserManager()
class Meta:
app_label = 'mongo_auth'
def set_password(self, password):
"""Doesn't do anything, but works around the issue with Django 1.6."""
make_password(password)
MongoUser._meta.pk.to_python = ObjectId
| apache-2.0 |
MikeAmy/django | django/contrib/sessions/backends/cached_db.py | 231 | 2861 | """
Cached, database-backed sessions.
"""
import logging
from django.conf import settings
from django.contrib.sessions.backends.db import SessionStore as DBStore
from django.core.cache import caches
from django.core.exceptions import SuspiciousOperation
from django.utils import timezone
from django.utils.encoding import force_text
KEY_PREFIX = "django.contrib.sessions.cached_db"
class SessionStore(DBStore):
"""
Implements cached, database backed sessions.
"""
cache_key_prefix = KEY_PREFIX
def __init__(self, session_key=None):
self._cache = caches[settings.SESSION_CACHE_ALIAS]
super(SessionStore, self).__init__(session_key)
@property
def cache_key(self):
return self.cache_key_prefix + self._get_or_create_session_key()
def load(self):
try:
data = self._cache.get(self.cache_key)
except Exception:
# Some backends (e.g. memcache) raise an exception on invalid
# cache keys. If this happens, reset the session. See #17810.
data = None
if data is None:
# Duplicate DBStore.load, because we need to keep track
# of the expiry date to set it properly in the cache.
try:
s = self.model.objects.get(
session_key=self.session_key,
expire_date__gt=timezone.now()
)
data = self.decode(s.session_data)
self._cache.set(self.cache_key, data,
self.get_expiry_age(expiry=s.expire_date))
except (self.model.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self._session_key = None
data = {}
return data
def exists(self, session_key):
if session_key and (self.cache_key_prefix + session_key) in self._cache:
return True
return super(SessionStore, self).exists(session_key)
def save(self, must_create=False):
super(SessionStore, self).save(must_create)
self._cache.set(self.cache_key, self._session, self.get_expiry_age())
def delete(self, session_key=None):
super(SessionStore, self).delete(session_key)
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
self._cache.delete(self.cache_key_prefix + session_key)
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete(self.session_key)
self._session_key = None
| bsd-3-clause |
40223246/w16b_test | static/Brython3.1.1-20150328-091302/Lib/xml/dom/__init__.py | 873 | 4019 | """W3C Document Object Model implementation for Python.
The Python mapping of the Document Object Model is documented in the
Python Library Reference in the section on the xml.dom package.
This package contains the following modules:
minidom -- A simple implementation of the Level 1 DOM with namespace
support added (based on the Level 2 specification) and other
minor Level 2 functionality.
pulldom -- DOM builder supporting on-demand tree-building for selected
subtrees of the document.
"""
class Node:
"""Class giving the NodeType constants."""
__slots__ = ()
# DOM implementations may use this as a base class for their own
# Node implementations. If they don't, the constants defined here
# should still be used as the canonical definitions as they match
# the values given in the W3C recommendation. Client code can
# safely refer to these values in all tests of Node.nodeType
# values.
ELEMENT_NODE = 1
ATTRIBUTE_NODE = 2
TEXT_NODE = 3
CDATA_SECTION_NODE = 4
ENTITY_REFERENCE_NODE = 5
ENTITY_NODE = 6
PROCESSING_INSTRUCTION_NODE = 7
COMMENT_NODE = 8
DOCUMENT_NODE = 9
DOCUMENT_TYPE_NODE = 10
DOCUMENT_FRAGMENT_NODE = 11
NOTATION_NODE = 12
#ExceptionCode
INDEX_SIZE_ERR = 1
DOMSTRING_SIZE_ERR = 2
HIERARCHY_REQUEST_ERR = 3
WRONG_DOCUMENT_ERR = 4
INVALID_CHARACTER_ERR = 5
NO_DATA_ALLOWED_ERR = 6
NO_MODIFICATION_ALLOWED_ERR = 7
NOT_FOUND_ERR = 8
NOT_SUPPORTED_ERR = 9
INUSE_ATTRIBUTE_ERR = 10
INVALID_STATE_ERR = 11
SYNTAX_ERR = 12
INVALID_MODIFICATION_ERR = 13
NAMESPACE_ERR = 14
INVALID_ACCESS_ERR = 15
VALIDATION_ERR = 16
class DOMException(Exception):
"""Abstract base class for DOM exceptions.
Exceptions with specific codes are specializations of this class."""
def __init__(self, *args, **kw):
if self.__class__ is DOMException:
raise RuntimeError(
"DOMException should not be instantiated directly")
Exception.__init__(self, *args, **kw)
def _get_code(self):
return self.code
class IndexSizeErr(DOMException):
code = INDEX_SIZE_ERR
class DomstringSizeErr(DOMException):
code = DOMSTRING_SIZE_ERR
class HierarchyRequestErr(DOMException):
code = HIERARCHY_REQUEST_ERR
class WrongDocumentErr(DOMException):
code = WRONG_DOCUMENT_ERR
class InvalidCharacterErr(DOMException):
code = INVALID_CHARACTER_ERR
class NoDataAllowedErr(DOMException):
code = NO_DATA_ALLOWED_ERR
class NoModificationAllowedErr(DOMException):
code = NO_MODIFICATION_ALLOWED_ERR
class NotFoundErr(DOMException):
code = NOT_FOUND_ERR
class NotSupportedErr(DOMException):
code = NOT_SUPPORTED_ERR
class InuseAttributeErr(DOMException):
code = INUSE_ATTRIBUTE_ERR
class InvalidStateErr(DOMException):
code = INVALID_STATE_ERR
class SyntaxErr(DOMException):
code = SYNTAX_ERR
class InvalidModificationErr(DOMException):
code = INVALID_MODIFICATION_ERR
class NamespaceErr(DOMException):
code = NAMESPACE_ERR
class InvalidAccessErr(DOMException):
code = INVALID_ACCESS_ERR
class ValidationErr(DOMException):
code = VALIDATION_ERR
class UserDataHandler:
"""Class giving the operation constants for UserDataHandler.handle()."""
# Based on DOM Level 3 (WD 9 April 2002)
NODE_CLONED = 1
NODE_IMPORTED = 2
NODE_DELETED = 3
NODE_RENAMED = 4
XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/"
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
EMPTY_NAMESPACE = None
EMPTY_PREFIX = None
from .domreg import getDOMImplementation, registerDOMImplementation
| agpl-3.0 |
Kiiv/Sick-Beard | lib/requests/packages/chardet2/euctwprober.py | 2994 | 1676 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTWSMModel
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCTWSMModel)
self._mDistributionAnalyzer = EUCTWDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-TW"
| gpl-3.0 |
maxdeliso/elevatorSim | Lib/functools.py | 15 | 12285 | """functools.py - Tools for working with functions and callable objects
"""
# Python module wrapper for _functools C module
# to allow utilities written in Python to be added
# to the functools module.
# Written by Nick Coghlan <ncoghlan at gmail.com>
# and Raymond Hettinger <python at rcn.com>
# Copyright (C) 2006-2010 Python Software Foundation.
# See C source code for _functools credits/copyright
__all__ = ['update_wrapper', 'wraps', 'WRAPPER_ASSIGNMENTS', 'WRAPPER_UPDATES',
'total_ordering', 'cmp_to_key', 'lru_cache', 'reduce', 'partial']
from _functools import partial, reduce
from collections import namedtuple
try:
from _thread import allocate_lock as Lock
except:
from _dummy_thread import allocate_lock as Lock
################################################################################
### update_wrapper() and wraps() decorator
################################################################################
# update_wrapper() and wraps() are tools to help write
# wrapper functions that can handle naive introspection
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__qualname__', '__doc__',
'__annotations__')
WRAPPER_UPDATES = ('__dict__',)
def update_wrapper(wrapper,
wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes of the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
wrapper.__wrapped__ = wrapped
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
pass
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
# Return the wrapper so this can be used as a decorator via partial()
return wrapper
def wraps(wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Decorator factory to apply update_wrapper() to a wrapper function
Returns a decorator that invokes update_wrapper() with the decorated
function as the wrapper argument and the arguments to wraps() as the
remaining arguments. Default arguments are as for update_wrapper().
This is a convenience function to simplify applying partial() to
update_wrapper().
"""
return partial(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
################################################################################
### total_ordering class decorator
################################################################################
def total_ordering(cls):
"""Class decorator that fills in missing ordering methods"""
convert = {
'__lt__': [('__gt__', lambda self, other: not (self < other or self == other)),
('__le__', lambda self, other: self < other or self == other),
('__ge__', lambda self, other: not self < other)],
'__le__': [('__ge__', lambda self, other: not self <= other or self == other),
('__lt__', lambda self, other: self <= other and not self == other),
('__gt__', lambda self, other: not self <= other)],
'__gt__': [('__lt__', lambda self, other: not (self > other or self == other)),
('__ge__', lambda self, other: self > other or self == other),
('__le__', lambda self, other: not self > other)],
'__ge__': [('__le__', lambda self, other: (not self >= other) or self == other),
('__gt__', lambda self, other: self >= other and not self == other),
('__lt__', lambda self, other: not self >= other)]
}
# Find user-defined comparisons (not those inherited from object).
roots = [op for op in convert if getattr(cls, op, None) is not getattr(object, op, None)]
if not roots:
raise ValueError('must define at least one ordering operation: < > <= >=')
root = max(roots) # prefer __lt__ to __le__ to __gt__ to __ge__
for opname, opfunc in convert[root]:
if opname not in roots:
opfunc.__name__ = opname
opfunc.__doc__ = getattr(int, opname).__doc__
setattr(cls, opname, opfunc)
return cls
################################################################################
### cmp_to_key() function converter
################################################################################
def cmp_to_key(mycmp):
"""Convert a cmp= function into a key= function"""
class K(object):
__slots__ = ['obj']
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
__hash__ = None
return K
try:
from _functools import cmp_to_key
except ImportError:
pass
################################################################################
### LRU Cache function decorator
################################################################################
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
__slots__ = 'hashvalue'
def __init__(self, tup, hash=hash):
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(args, kwds, typed,
kwd_mark = (object(),),
fasttypes = {int, str, frozenset, type(None)},
sorted=sorted, tuple=tuple, type=type, len=len):
'Make a cache key from optionally typed positional and keyword arguments'
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for k, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=128, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize)
with f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: http://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C version).
# Constants shared by all lru cache instances:
sentinel = object() # unique object used to signal cache misses
make_key = _make_key # build a key from the function arguments
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
def decorating_function(user_function):
cache = {}
hits = misses = currsize = 0
full = False
cache_get = cache.get # bound method to lookup a key or return None
lock = Lock() # because linkedlist updates aren't threadsafe
root = [] # root of the circular doubly linked list
root[:] = [root, root, None, None] # initialize by pointing to self
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just a statistics update after a successful call
nonlocal misses
result = user_function(*args, **kwds)
misses += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
nonlocal hits, misses, currsize
key = make_key(args, kwds, typed)
result = cache_get(key, sentinel)
if result is not sentinel:
hits += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
misses += 1
currsize += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
nonlocal root, hits, misses, currsize, full
key = make_key(args, kwds, typed)
with lock:
link = cache_get(key)
if link is not None:
# move the link to the front of the circular queue
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
hits += 1
return result
result = user_function(*args, **kwds)
with lock:
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif full:
# use root to store the new key and result
root[KEY] = key
root[RESULT] = result
cache[key] = root
# empty the oldest link and make it the new root
root = root[NEXT]
del cache[root[KEY]]
root[KEY] = root[RESULT] = None
else:
# put result in a new link at the front of the queue
last = root[PREV]
link = [last, root, key, result]
cache[key] = last[NEXT] = root[PREV] = link
currsize += 1
full = (currsize == maxsize)
misses += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(hits, misses, maxsize, currsize)
def cache_clear():
"""Clear the cache and cache statistics"""
nonlocal hits, misses, currsize, full
with lock:
cache.clear()
root[:] = [root, root, None, None]
hits = misses = currsize = 0
full = False
wrapper.cache_info = cache_info
wrapper.cache_clear = cache_clear
return update_wrapper(wrapper, user_function)
return decorating_function
| bsd-2-clause |
kamenim/samba | third_party/waf/wafadmin/Tools/unittestw.py | 32 | 9789 | #!/usr/bin/env python
# encoding: utf-8
# Carlos Rafael Giani, 2006
"""
Unit tests run in the shutdown() method, and for c/c++ programs
One should NOT have to give parameters to programs to execute
In the shutdown method, add the following code:
>>> def shutdown():
... ut = UnitTest.unit_test()
... ut.run()
... ut.print_results()
Each object to use as a unit test must be a program and must have X{obj.unit_test=1}
"""
import os, sys
import Build, TaskGen, Utils, Options, Logs, Task
from TaskGen import before, after, feature
from Constants import *
class unit_test(object):
"Unit test representation"
def __init__(self):
self.returncode_ok = 0 # Unit test returncode considered OK. All returncodes differing from this one
# will cause the unit test to be marked as "FAILED".
# The following variables are filled with data by run().
# print_results() uses these for printing the unit test summary,
# but if there is need for direct access to the results,
# they can be retrieved here, after calling run().
self.num_tests_ok = 0 # Number of successful unit tests
self.num_tests_failed = 0 # Number of failed unit tests
self.num_tests_err = 0 # Tests that have not even run
self.total_num_tests = 0 # Total amount of unit tests
self.max_label_length = 0 # Maximum label length (pretty-print the output)
self.unit_tests = Utils.ordered_dict() # Unit test dictionary. Key: the label (unit test filename relative
# to the build dir), value: unit test filename with absolute path
self.unit_test_results = {} # Dictionary containing the unit test results.
# Key: the label, value: result (true = success false = failure)
self.unit_test_erroneous = {} # Dictionary indicating erroneous unit tests.
# Key: the label, value: true = unit test has an error false = unit test is ok
self.change_to_testfile_dir = False #True if the test file needs to be executed from the same dir
self.want_to_see_test_output = False #True to see the stdout from the testfile (for example check suites)
self.want_to_see_test_error = False #True to see the stderr from the testfile (for example check suites)
self.run_if_waf_does = 'check' #build was the old default
def run(self):
"Run the unit tests and gather results (note: no output here)"
self.num_tests_ok = 0
self.num_tests_failed = 0
self.num_tests_err = 0
self.total_num_tests = 0
self.max_label_length = 0
self.unit_tests = Utils.ordered_dict()
self.unit_test_results = {}
self.unit_test_erroneous = {}
ld_library_path = []
# If waf is not building, don't run anything
if not Options.commands[self.run_if_waf_does]: return
# Get the paths for the shared libraries, and obtain the unit tests to execute
for obj in Build.bld.all_task_gen:
try:
link_task = obj.link_task
except AttributeError:
pass
else:
lib_path = link_task.outputs[0].parent.abspath(obj.env)
if lib_path not in ld_library_path:
ld_library_path.append(lib_path)
unit_test = getattr(obj, 'unit_test', '')
if unit_test and 'cprogram' in obj.features:
try:
output = obj.path
filename = os.path.join(output.abspath(obj.env), obj.target)
srcdir = output.abspath()
label = os.path.join(output.bldpath(obj.env), obj.target)
self.max_label_length = max(self.max_label_length, len(label))
self.unit_tests[label] = (filename, srcdir)
except KeyError:
pass
self.total_num_tests = len(self.unit_tests)
# Now run the unit tests
Utils.pprint('GREEN', 'Running the unit tests')
count = 0
result = 1
for label in self.unit_tests.allkeys:
file_and_src = self.unit_tests[label]
filename = file_and_src[0]
srcdir = file_and_src[1]
count += 1
line = Build.bld.progress_line(count, self.total_num_tests, Logs.colors.GREEN, Logs.colors.NORMAL)
if Options.options.progress_bar and line:
sys.stderr.write(line)
sys.stderr.flush()
try:
kwargs = {}
kwargs['env'] = os.environ.copy()
if self.change_to_testfile_dir:
kwargs['cwd'] = srcdir
if not self.want_to_see_test_output:
kwargs['stdout'] = Utils.pproc.PIPE # PIPE for ignoring output
if not self.want_to_see_test_error:
kwargs['stderr'] = Utils.pproc.PIPE # PIPE for ignoring output
if ld_library_path:
v = kwargs['env']
def add_path(dct, path, var):
dct[var] = os.pathsep.join(Utils.to_list(path) + [os.environ.get(var, '')])
if sys.platform == 'win32':
add_path(v, ld_library_path, 'PATH')
elif sys.platform == 'darwin':
add_path(v, ld_library_path, 'DYLD_LIBRARY_PATH')
add_path(v, ld_library_path, 'LD_LIBRARY_PATH')
else:
add_path(v, ld_library_path, 'LD_LIBRARY_PATH')
pp = Utils.pproc.Popen(filename, **kwargs)
(out, err) = pp.communicate() # uh, and the output is ignored?? - fortunately this is going to disappear
result = int(pp.returncode == self.returncode_ok)
if result:
self.num_tests_ok += 1
else:
self.num_tests_failed += 1
self.unit_test_results[label] = result
self.unit_test_erroneous[label] = 0
except OSError:
self.unit_test_erroneous[label] = 1
self.num_tests_err += 1
except KeyboardInterrupt:
pass
if Options.options.progress_bar: sys.stdout.write(Logs.colors.cursor_on)
def print_results(self):
"Pretty-prints a summary of all unit tests, along with some statistics"
# If waf is not building, don't output anything
if not Options.commands[self.run_if_waf_does]: return
p = Utils.pprint
# Early quit if no tests were performed
if self.total_num_tests == 0:
p('YELLOW', 'No unit tests present')
return
for label in self.unit_tests.allkeys:
filename = self.unit_tests[label]
err = 0
result = 0
try: err = self.unit_test_erroneous[label]
except KeyError: pass
try: result = self.unit_test_results[label]
except KeyError: pass
n = self.max_label_length - len(label)
if err: n += 4
elif result: n += 7
else: n += 3
line = '%s %s' % (label, '.' * n)
if err: p('RED', '%sERROR' % line)
elif result: p('GREEN', '%sOK' % line)
else: p('YELLOW', '%sFAILED' % line)
percentage_ok = float(self.num_tests_ok) / float(self.total_num_tests) * 100.0
percentage_failed = float(self.num_tests_failed) / float(self.total_num_tests) * 100.0
percentage_erroneous = float(self.num_tests_err) / float(self.total_num_tests) * 100.0
p('NORMAL', '''
Successful tests: %i (%.1f%%)
Failed tests: %i (%.1f%%)
Erroneous tests: %i (%.1f%%)
Total number of tests: %i
''' % (self.num_tests_ok, percentage_ok, self.num_tests_failed, percentage_failed,
self.num_tests_err, percentage_erroneous, self.total_num_tests))
p('GREEN', 'Unit tests finished')
############################################################################################
"""
New unit test system
The targets with feature 'test' are executed after they are built
bld(features='cprogram cc test', ...)
To display the results:
import UnitTest
bld.add_post_fun(UnitTest.summary)
"""
import threading
testlock = threading.Lock()
def set_options(opt):
opt.add_option('--alltests', action='store_true', default=True, help='Exec all unit tests', dest='all_tests')
@feature('test')
@after('apply_link', 'vars_target_cprogram')
def make_test(self):
if not 'cprogram' in self.features:
Logs.error('test cannot be executed %s' % self)
return
self.default_install_path = None
self.create_task('utest', self.link_task.outputs)
def exec_test(self):
status = 0
variant = self.env.variant()
filename = self.inputs[0].abspath(self.env)
self.ut_exec = getattr(self, 'ut_exec', [filename])
if getattr(self.generator, 'ut_fun', None):
self.generator.ut_fun(self)
try:
fu = getattr(self.generator.bld, 'all_test_paths')
except AttributeError:
fu = os.environ.copy()
self.generator.bld.all_test_paths = fu
lst = []
for obj in self.generator.bld.all_task_gen:
link_task = getattr(obj, 'link_task', None)
if link_task and link_task.env.variant() == variant:
lst.append(link_task.outputs[0].parent.abspath(obj.env))
def add_path(dct, path, var):
dct[var] = os.pathsep.join(Utils.to_list(path) + [os.environ.get(var, '')])
if sys.platform == 'win32':
add_path(fu, lst, 'PATH')
elif sys.platform == 'darwin':
add_path(fu, lst, 'DYLD_LIBRARY_PATH')
add_path(fu, lst, 'LD_LIBRARY_PATH')
else:
add_path(fu, lst, 'LD_LIBRARY_PATH')
cwd = getattr(self.generator, 'ut_cwd', '') or self.inputs[0].parent.abspath(self.env)
proc = Utils.pproc.Popen(self.ut_exec, cwd=cwd, env=fu, stderr=Utils.pproc.PIPE, stdout=Utils.pproc.PIPE)
(stdout, stderr) = proc.communicate()
tup = (filename, proc.returncode, stdout, stderr)
self.generator.utest_result = tup
testlock.acquire()
try:
bld = self.generator.bld
Logs.debug("ut: %r", tup)
try:
bld.utest_results.append(tup)
except AttributeError:
bld.utest_results = [tup]
finally:
testlock.release()
cls = Task.task_type_from_func('utest', func=exec_test, color='PINK', ext_in='.bin')
old = cls.runnable_status
def test_status(self):
ret = old(self)
if ret == SKIP_ME and getattr(Options.options, 'all_tests', False):
return RUN_ME
return ret
cls.runnable_status = test_status
cls.quiet = 1
def summary(bld):
lst = getattr(bld, 'utest_results', [])
if lst:
Utils.pprint('CYAN', 'execution summary')
total = len(lst)
tfail = len([x for x in lst if x[1]])
Utils.pprint('CYAN', ' tests that pass %d/%d' % (total-tfail, total))
for (f, code, out, err) in lst:
if not code:
Utils.pprint('CYAN', ' %s' % f)
Utils.pprint('CYAN', ' tests that fail %d/%d' % (tfail, total))
for (f, code, out, err) in lst:
if code:
Utils.pprint('CYAN', ' %s' % f)
| gpl-3.0 |
neilLasrado/erpnext | erpnext/regional/report/datev/datev.py | 1 | 14705 | # coding: utf-8
"""
Provide a report and downloadable CSV according to the German DATEV format.
- Query report showing only the columns that contain data, formatted nicely for
dispay to the user.
- CSV download functionality `download_datev_csv` that provides a CSV file with
all required columns. Used to import the data into the DATEV Software.
"""
from __future__ import unicode_literals
import datetime
import json
import six
from six import string_types
from csv import QUOTE_NONNUMERIC
import frappe
from frappe import _
import pandas as pd
def execute(filters=None):
"""Entry point for frappe."""
validate(filters)
result = get_gl_entries(filters, as_dict=0)
columns = get_columns()
return columns, result
def validate(filters):
"""Make sure all mandatory filters and settings are present."""
if not filters.get('company'):
frappe.throw(_('<b>Company</b> is a mandatory filter.'))
if not filters.get('from_date'):
frappe.throw(_('<b>From Date</b> is a mandatory filter.'))
if not filters.get('to_date'):
frappe.throw(_('<b>To Date</b> is a mandatory filter.'))
try:
frappe.get_doc('DATEV Settings', filters.get('company'))
except frappe.DoesNotExistError:
frappe.throw(_('Please create <b>DATEV Settings</b> for Company <b>{}</b>.').format(filters.get('company')))
def get_columns():
"""Return the list of columns that will be shown in query report."""
columns = [
{
"label": "Umsatz (ohne Soll/Haben-Kz)",
"fieldname": "Umsatz (ohne Soll/Haben-Kz)",
"fieldtype": "Currency",
},
{
"label": "Soll/Haben-Kennzeichen",
"fieldname": "Soll/Haben-Kennzeichen",
"fieldtype": "Data",
},
{
"label": "Konto",
"fieldname": "Konto",
"fieldtype": "Data",
},
{
"label": "Gegenkonto (ohne BU-SchlΓΌssel)",
"fieldname": "Gegenkonto (ohne BU-SchlΓΌssel)",
"fieldtype": "Data",
},
{
"label": "Belegdatum",
"fieldname": "Belegdatum",
"fieldtype": "Date",
},
{
"label": "Belegfeld 1",
"fieldname": "Belegfeld 1",
"fieldtype": "Data",
},
{
"label": "Buchungstext",
"fieldname": "Buchungstext",
"fieldtype": "Text",
},
{
"label": "Beleginfo - Art 1",
"fieldname": "Beleginfo - Art 1",
"fieldtype": "Link",
"options": "DocType"
},
{
"label": "Beleginfo - Inhalt 1",
"fieldname": "Beleginfo - Inhalt 1",
"fieldtype": "Dynamic Link",
"options": "Beleginfo - Art 1"
},
{
"label": "Beleginfo - Art 2",
"fieldname": "Beleginfo - Art 2",
"fieldtype": "Link",
"options": "DocType"
},
{
"label": "Beleginfo - Inhalt 2",
"fieldname": "Beleginfo - Inhalt 2",
"fieldtype": "Dynamic Link",
"options": "Beleginfo - Art 2"
}
]
return columns
def get_gl_entries(filters, as_dict):
"""
Get a list of accounting entries.
Select GL Entries joined with Account and Party Account in order to get the
account numbers. Returns a list of accounting entries.
Arguments:
filters -- dict of filters to be passed to the sql query
as_dict -- return as list of dicts [0,1]
"""
gl_entries = frappe.db.sql("""
select
/* either debit or credit amount; always positive */
case gl.debit when 0 then gl.credit else gl.debit end as 'Umsatz (ohne Soll/Haben-Kz)',
/* 'H' when credit, 'S' when debit */
case gl.debit when 0 then 'H' else 'S' end as 'Soll/Haben-Kennzeichen',
/* account number or, if empty, party account number */
coalesce(acc.account_number, acc_pa.account_number) as 'Konto',
/* against number or, if empty, party against number */
coalesce(acc_against.account_number, acc_against_pa.account_number) as 'Gegenkonto (ohne BU-SchlΓΌssel)',
gl.posting_date as 'Belegdatum',
gl.voucher_no as 'Belegfeld 1',
LEFT(gl.remarks, 60) as 'Buchungstext',
gl.voucher_type as 'Beleginfo - Art 1',
gl.voucher_no as 'Beleginfo - Inhalt 1',
gl.against_voucher_type as 'Beleginfo - Art 2',
gl.against_voucher as 'Beleginfo - Inhalt 2'
from `tabGL Entry` gl
/* Statistisches Konto (Debitoren/Kreditoren) */
left join `tabParty Account` pa
on gl.against = pa.parent
and gl.company = pa.company
/* Kontonummer */
left join `tabAccount` acc
on gl.account = acc.name
/* Gegenkonto-Nummer */
left join `tabAccount` acc_against
on gl.against = acc_against.name
/* Statistische Kontonummer */
left join `tabAccount` acc_pa
on pa.account = acc_pa.name
/* Statistische Gegenkonto-Nummer */
left join `tabAccount` acc_against_pa
on pa.account = acc_against_pa.name
where gl.company = %(company)s
and DATE(gl.posting_date) >= %(from_date)s
and DATE(gl.posting_date) <= %(to_date)s
order by 'Belegdatum', gl.voucher_no""", filters, as_dict=as_dict)
return gl_entries
def get_datev_csv(data, filters):
"""
Fill in missing columns and return a CSV in DATEV Format.
For automatic processing, DATEV requires the first line of the CSV file to
hold meta data such as the length of account numbers oder the category of
the data.
Arguments:
data -- array of dictionaries
filters -- dict
"""
coa = frappe.get_value("Company", filters.get("company"), "chart_of_accounts")
coa_used = "04" if "SKR04" in coa else ("03" if "SKR03" in coa else "")
header = [
# A = DATEV-Format-KZ
# DTVF = created by DATEV software,
# EXTF = created by other software
'"EXTF"',
# B = version of the DATEV format
# 141 = 1.41,
# 510 = 5.10,
# 720 = 7.20
"700",
# C = Data category
# 21 = Transaction batch (Buchungsstapel),
# 67 = Buchungstextkonstanten,
# 16 = Debitors/Creditors,
# 20 = Account names (Kontenbeschriftungen)
"21",
# D = Format name
# Buchungsstapel,
# Buchungstextkonstanten,
# Debitoren/Kreditoren,
# Kontenbeschriftungen
"Buchungsstapel",
# E = Format version (regarding format name)
"9",
# F = Generated on
datetime.datetime.now().strftime("%Y%m%d%H%M%S") + '000',
# G = Imported on -- stays empty
"",
# H = Herkunfts-Kennzeichen (Origin)
# Any two letters
'"EN"',
# I = Exported by
'"%s"' % frappe.session.user,
# J = Imported by -- stays empty
"",
# K = Tax consultant number (Beraternummer)
frappe.get_value("DATEV Settings", filters.get("company"), "consultant_number") or "",
# L = Tax client number (Mandantennummer)
frappe.get_value("DATEV Settings", filters.get("company"), "client_number") or "",
# M = Start of the fiscal year (Wirtschaftsjahresbeginn)
frappe.utils.formatdate(frappe.defaults.get_user_default("year_start_date"), "yyyyMMdd"),
# N = Length of account numbers (SachkontenlΓ€nge)
"4",
# O = Transaction batch start date (YYYYMMDD)
frappe.utils.formatdate(filters.get('from_date'), "yyyyMMdd"),
# P = Transaction batch end date (YYYYMMDD)
frappe.utils.formatdate(filters.get('to_date'), "yyyyMMdd"),
# Q = Description (for example, "January - February 2019 Transactions")
"Buchungsstapel",
# R = DiktatkΓΌrzel
"",
# S = Buchungstyp
# 1 = Transaction batch (Buchungsstapel),
# 2 = Annual financial statement (Jahresabschluss)
"1",
# T = Rechnungslegungszweck
"0", # vom Rechnungslegungszweck unabhΓ€ngig
# U = Festschreibung
"0", # keine Festschreibung
# V = KontofΓΌhrungs-WΓ€hrungskennzeichen des Geldkontos
frappe.get_value("Company", filters.get("company"), "default_currency"),
# reserviert
'',
# Derivatskennzeichen
'',
# reserviert
'',
# reserviert
'',
# SKR
'"%s"' % coa_used,
# Branchen-LΓΆsungs-ID
'',
# reserviert
'',
# reserviert
'',
# Anwendungsinformation (Verarbeitungskennzeichen der abgebenden Anwendung)
''
]
columns = [
# All possible columns must tbe listed here, because DATEV requires them to
# be present in the CSV.
# ---
# Umsatz
"Umsatz (ohne Soll/Haben-Kz)",
"Soll/Haben-Kennzeichen",
"WKZ Umsatz",
"Kurs",
"Basis-Umsatz",
"WKZ Basis-Umsatz",
# Konto/Gegenkonto
"Konto",
"Gegenkonto (ohne BU-SchlΓΌssel)",
"BU-SchlΓΌssel",
# Datum
"Belegdatum",
# Rechnungs- / Belegnummer
"Belegfeld 1",
# z.B. FΓ€lligkeitsdatum Format: TTMMJJ
"Belegfeld 2",
# Skonto-Betrag / -Abzug (Der Wert 0 ist unzulΓ€ssig)
"Skonto",
# Beschreibung des Buchungssatzes
"Buchungstext",
# Mahn- / Zahl-Sperre (1 = Postensperre)
"Postensperre",
"Diverse Adressnummer",
"GeschΓ€ftspartnerbank",
"Sachverhalt",
# Keine Mahnzinsen
"Zinssperre",
# Link auf den Buchungsbeleg (ProgrammkΓΌrzel + GUID)
"Beleglink",
# Beleginfo
"Beleginfo - Art 1",
"Beleginfo - Inhalt 1",
"Beleginfo - Art 2",
"Beleginfo - Inhalt 2",
"Beleginfo - Art 3",
"Beleginfo - Inhalt 3",
"Beleginfo - Art 4",
"Beleginfo - Inhalt 4",
"Beleginfo - Art 5",
"Beleginfo - Inhalt 5",
"Beleginfo - Art 6",
"Beleginfo - Inhalt 6",
"Beleginfo - Art 7",
"Beleginfo - Inhalt 7",
"Beleginfo - Art 8",
"Beleginfo - Inhalt 8",
# Zuordnung des GeschΓ€ftsvorfalls fΓΌr die Kostenrechnung
"KOST1 - Kostenstelle",
"KOST2 - Kostenstelle",
"KOST-Menge",
# USt-ID-Nummer (Beispiel: DE133546770)
"EU-Mitgliedstaat u. USt-IdNr.",
# Der im EU-Bestimmungsland gΓΌltige Steuersatz
"EU-Steuersatz",
# I = Ist-Versteuerung,
# K = keine Umsatzsteuerrechnung
# P = Pauschalierung (z. B. fΓΌr Land- und Forstwirtschaft),
# S = Soll-Versteuerung
"Abw. Versteuerungsart",
# Sachverhalte gem. Β§ 13b Abs. 1 Satz 1 Nrn. 1.-5. UStG
"Sachverhalt L+L",
# Steuersatz / Funktion zum L+L-Sachverhalt (Beispiel: Wert 190 fΓΌr 19%)
"FunktionsergΓ€nzung L+L",
# Bei Verwendung des BU-SchlΓΌssels 49 fΓΌr βandere SteuersΓ€tzeβ muss der
# steuerliche Sachverhalt mitgegeben werden
"BU 49 Hauptfunktionstyp",
"BU 49 Hauptfunktionsnummer",
"BU 49 FunktionsergΓ€nzung",
# Zusatzinformationen, besitzen den Charakter eines Notizzettels und kΓΆnnen
# frei erfasst werden.
"Zusatzinformation - Art 1",
"Zusatzinformation - Inhalt 1",
"Zusatzinformation - Art 2",
"Zusatzinformation - Inhalt 2",
"Zusatzinformation - Art 3",
"Zusatzinformation - Inhalt 3",
"Zusatzinformation - Art 4",
"Zusatzinformation - Inhalt 4",
"Zusatzinformation - Art 5",
"Zusatzinformation - Inhalt 5",
"Zusatzinformation - Art 6",
"Zusatzinformation - Inhalt 6",
"Zusatzinformation - Art 7",
"Zusatzinformation - Inhalt 7",
"Zusatzinformation - Art 8",
"Zusatzinformation - Inhalt 8",
"Zusatzinformation - Art 9",
"Zusatzinformation - Inhalt 9",
"Zusatzinformation - Art 10",
"Zusatzinformation - Inhalt 10",
"Zusatzinformation - Art 11",
"Zusatzinformation - Inhalt 11",
"Zusatzinformation - Art 12",
"Zusatzinformation - Inhalt 12",
"Zusatzinformation - Art 13",
"Zusatzinformation - Inhalt 13",
"Zusatzinformation - Art 14",
"Zusatzinformation - Inhalt 14",
"Zusatzinformation - Art 15",
"Zusatzinformation - Inhalt 15",
"Zusatzinformation - Art 16",
"Zusatzinformation - Inhalt 16",
"Zusatzinformation - Art 17",
"Zusatzinformation - Inhalt 17",
"Zusatzinformation - Art 18",
"Zusatzinformation - Inhalt 18",
"Zusatzinformation - Art 19",
"Zusatzinformation - Inhalt 19",
"Zusatzinformation - Art 20",
"Zusatzinformation - Inhalt 20",
# Wirkt sich nur bei Sachverhalt mit SKR 14 Land- und Forstwirtschaft aus,
# fΓΌr andere SKR werden die Felder beim Import / Export ΓΌberlesen bzw.
# leer exportiert.
"StΓΌck",
"Gewicht",
# 1 = Lastschrift
# 2 = Mahnung
# 3 = Zahlung
"Zahlweise",
"Forderungsart",
# JJJJ
"Veranlagungsjahr",
# TTMMJJJJ
"Zugeordnete FΓ€lligkeit",
# 1 = Einkauf von Waren
# 2 = Erwerb von Roh-Hilfs- und Betriebsstoffen
"Skontotyp",
# Allgemeine Bezeichnung, des Auftrags / Projekts.
"Auftragsnummer",
# AA = Angeforderte Anzahlung / Abschlagsrechnung
# AG = Erhaltene Anzahlung (Geldeingang)
# AV = Erhaltene Anzahlung (Verbindlichkeit)
# SR = Schlussrechnung
# SU = Schlussrechnung (Umbuchung)
# SG = Schlussrechnung (Geldeingang)
# SO = Sonstige
"Buchungstyp",
"USt-SchlΓΌssel (Anzahlungen)",
"EU-Mitgliedstaat (Anzahlungen)",
"Sachverhalt L+L (Anzahlungen)",
"EU-Steuersatz (Anzahlungen)",
"ErlΓΆskonto (Anzahlungen)",
# Wird beim Import durch SV (Stapelverarbeitung) ersetzt.
"Herkunft-Kz",
# Wird von DATEV verwendet.
"Leerfeld",
# Format TTMMJJJJ
"KOST-Datum",
# Vom ZahlungsempfΓ€nger individuell vergebenes Kennzeichen eines Mandats
# (z.B. Rechnungs- oder Kundennummer).
"SEPA-Mandatsreferenz",
# 1 = Skontosperre
# 0 = Keine Skontosperre
"Skontosperre",
# Gesellschafter und Sonderbilanzsachverhalt
"Gesellschaftername",
# Amtliche Nummer aus der FeststellungserklΓ€rung
"Beteiligtennummer",
"Identifikationsnummer",
"Zeichnernummer",
# Format TTMMJJJJ
"Postensperre bis",
# Gesellschafter und Sonderbilanzsachverhalt
"Bezeichnung SoBil-Sachverhalt",
"Kennzeichen SoBil-Buchung",
# 0 = keine Festschreibung
# 1 = Festschreibung
"Festschreibung",
# Format TTMMJJJJ
"Leistungsdatum",
# Format TTMMJJJJ
"Datum Zuord. Steuerperiode",
# OPOS-Informationen, Format TTMMJJJJ
"FΓ€lligkeit",
# G oder 1 = Generalumkehr
# 0 = keine Generalumkehr
"Generalumkehr (GU)",
# Steuersatz fΓΌr SteuerschlΓΌssel
"Steuersatz",
# Beispiel: DE fΓΌr Deutschland
"Land"
]
empty_df = pd.DataFrame(columns=columns)
data_df = pd.DataFrame.from_records(data)
result = empty_df.append(data_df)
result['Belegdatum'] = pd.to_datetime(result['Belegdatum'])
header = ';'.join(header).encode('latin_1')
data = result.to_csv(
# Reason for str(';'): https://github.com/pandas-dev/pandas/issues/6035
sep=str(';'),
# European decimal seperator
decimal=',',
# Windows "ANSI" encoding
encoding='latin_1',
# format date as DDMM
date_format='%d%m',
# Windows line terminator
line_terminator='\r\n',
# Do not number rows
index=False,
# Use all columns defined above
columns=columns,
# Quote most fields, even currency values with "," separator
quoting=QUOTE_NONNUMERIC
)
if not six.PY2:
data = data.encode('latin_1')
return header + b'\r\n' + data
@frappe.whitelist()
def download_datev_csv(filters=None):
"""
Provide accounting entries for download in DATEV format.
Validate the filters, get the data, produce the CSV file and provide it for
download. Can be called like this:
GET /api/method/erpnext.regional.report.datev.datev.download_datev_csv
Arguments / Params:
filters -- dict of filters to be passed to the sql query
"""
if isinstance(filters, string_types):
filters = json.loads(filters)
validate(filters)
data = get_gl_entries(filters, as_dict=1)
frappe.response['result'] = get_datev_csv(data, filters)
frappe.response['doctype'] = 'EXTF_Buchungsstapel'
frappe.response['type'] = 'csv'
| gpl-3.0 |
LCOGT/valhalla | valhalla/userrequests/target_helpers.py | 1 | 4432 | from django.utils.translation import ugettext as _
from numbers import Number
class BaseTargetHelper(object):
"""
These helper classes take a dictionary representation of a target
and performs validation specific to the target type Sidereal,
NonSidereal, Satellite. The dictionary it returns will also only contain
fields relevant to the specific type. These models should only be used in
TargetSerializer
"""
def __init__(self, target):
self.error_dict = {}
self._data = {}
for field in self.fields:
self._data[field] = target.get(field)
for field in self.defaults:
if not target.get(field):
self._data[field] = self.defaults[field]
for field in self.required_fields:
if not self._data.get(field) and not isinstance(self._data.get(field), Number):
self.error_dict[field] = ['This field is required']
self.validate()
def validate(self):
pass
def is_valid(self):
return not bool(self.error_dict)
@property
def data(self):
# Only return data that is not none so model defaults can take effect
return {k: v for k, v in self._data.items() if v is not None}
class SiderealTargetHelper(BaseTargetHelper):
def __init__(self, target):
self.fields = (
'type', 'name', 'ra', 'dec', 'proper_motion_ra', 'proper_motion_dec', 'parallax', 'radvel',
'coordinate_system', 'equinox', 'epoch', 'acquire_mode', 'rot_mode', 'rot_angle', 'vmag'
)
self.required_fields = ('ra', 'dec')
self.defaults = {
'coordinate_system': 'ICRS',
'equinox': 'J2000',
'parallax': 0.0,
'proper_motion_ra': 0.0,
'proper_motion_dec': 0.0,
'radvel': 0.0,
'epoch': 2000.0
}
super().__init__(target)
class NonSiderealTargetHelper(BaseTargetHelper):
def __init__(self, target):
self.defaults = {}
self.fields = ('acquire_mode', 'rot_mode', 'rot_angle', 'vmag')
self.required_fields = (
'type', 'name', 'epochofel', 'orbinc', 'longascnode', 'eccentricity', 'scheme'
)
if target.get('scheme') == 'ASA_MAJOR_PLANET':
self.required_fields += ('longofperih', 'meandist', 'meanlong', 'dailymot')
elif target.get('scheme') == 'ASA_MINOR_PLANET':
self.required_fields += ('argofperih', 'meandist', 'meananom')
elif target.get('scheme') == 'ASA_COMET':
self.required_fields += ('argofperih', 'perihdist', 'epochofperih')
elif target.get('scheme') == 'JPL_MAJOR_PLANET':
self.required_fields += ('argofperih', 'meandist', 'meananom', 'dailymot')
elif target.get('scheme') == 'JPL_MINOR_PLANET':
self.required_fields += ('argofperih', 'perihdist', 'epochofperih')
elif target.get('scheme') == 'MPC_MINOR_PLANET':
self.required_fields += ('argofperih', 'meandist', 'meananom')
elif target.get('scheme') == 'MPC_COMET':
self.required_fields += ('argofperih', 'perihdist', 'epochofperih')
self.fields += self.required_fields
super().__init__(target)
def validate(self):
ECCENTRICITY_LIMIT = 0.9
if self.is_valid() and 'COMET' not in self._data['scheme'] and self._data['eccentricity'] > ECCENTRICITY_LIMIT:
msg = _("Non sidereal pointing of scheme {} requires eccentricity to be lower than {}. ").format(
self._data['scheme'], ECCENTRICITY_LIMIT
)
msg += _("Submit with scheme MPC_COMET to use your eccentricity of {}.").format(
self._data['eccentricity']
)
self.error_dict['scheme'] = msg
class SatelliteTargetHelper(BaseTargetHelper):
def __init__(self, target):
self.fields = (
'name', 'type', 'altitude', 'azimuth', 'diff_pitch_rate', 'diff_roll_rate',
'diff_epoch_rate', 'diff_pitch_acceleration', 'diff_roll_acceleration'
)
self.required_fields = self.fields
self.fields += ('vmag',)
self.defaults = {}
super().__init__(target)
TARGET_TYPE_HELPER_MAP = {
'SIDEREAL': SiderealTargetHelper,
'NON_SIDEREAL': NonSiderealTargetHelper,
'SATELLITE': SatelliteTargetHelper,
'STATIC': SiderealTargetHelper,
}
| gpl-3.0 |
pinball1973b/git_work_slide | node_modules/node-gyp/gyp/pylib/gyp/win_tool.py | 1417 | 12751 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for Windows builds.
These functions are executed via gyp-win-tool when using the ninja generator.
"""
import os
import re
import shutil
import subprocess
import stat
import string
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# A regex matching an argument corresponding to the output filename passed to
# link.exe.
_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
def main(args):
executor = WinTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class WinTool(object):
"""This class performs all the Windows tooling steps. The methods can either
be executed directly, or dispatched from an argument list."""
def _UseSeparateMspdbsrv(self, env, args):
"""Allows to use a unique instance of mspdbsrv.exe per linker instead of a
shared one."""
if len(args) < 1:
raise Exception("Not enough arguments")
if args[0] != 'link.exe':
return
# Use the output filename passed to the linker to generate an endpoint name
# for mspdbsrv.exe.
endpoint_name = None
for arg in args:
m = _LINK_EXE_OUT_ARG.match(arg)
if m:
endpoint_name = re.sub(r'\W+', '',
'%s_%d' % (m.group('out'), os.getpid()))
break
if endpoint_name is None:
return
# Adds the appropriate environment variable. This will be read by link.exe
# to know which instance of mspdbsrv.exe it should connect to (if it's
# not set then the default endpoint is used).
env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like recursive-mirror to RecursiveMirror."""
return name_string.title().replace('-', '')
def _GetEnv(self, arch):
"""Gets the saved environment from a file for a given architecture."""
# The environment is saved as an "environment block" (see CreateProcess
# and msvs_emulation for details). We convert to a dict here.
# Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
pairs = open(arch).read()[:-2].split('\0')
kvs = [item.split('=', 1) for item in pairs]
return dict(kvs)
def ExecStamp(self, path):
"""Simple stamp command."""
open(path, 'w').close()
def ExecRecursiveMirror(self, source, dest):
"""Emulation of rm -rf out && cp -af in out."""
if os.path.exists(dest):
if os.path.isdir(dest):
def _on_error(fn, path, excinfo):
# The operation failed, possibly because the file is set to
# read-only. If that's why, make it writable and try the op again.
if not os.access(path, os.W_OK):
os.chmod(path, stat.S_IWRITE)
fn(path)
shutil.rmtree(dest, onerror=_on_error)
else:
if not os.access(dest, os.W_OK):
# Attempt to make the file writable before deleting it.
os.chmod(dest, stat.S_IWRITE)
os.unlink(dest)
if os.path.isdir(source):
shutil.copytree(source, dest)
else:
shutil.copy2(source, dest)
def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
"""Filter diagnostic output from link that looks like:
' Creating library ui.dll.lib and object ui.dll.exp'
This happens when there are exports from the dll or exe.
"""
env = self._GetEnv(arch)
if use_separate_mspdbsrv == 'True':
self._UseSeparateMspdbsrv(env, args)
link = subprocess.Popen([args[0].replace('/', '\\')] + list(args[1:]),
shell=True,
env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, _ = link.communicate()
for line in out.splitlines():
if (not line.startswith(' Creating library ') and
not line.startswith('Generating code') and
not line.startswith('Finished generating code')):
print line
return link.returncode
def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname,
mt, rc, intermediate_manifest, *manifests):
"""A wrapper for handling creating a manifest resource and then executing
a link command."""
# The 'normal' way to do manifests is to have link generate a manifest
# based on gathering dependencies from the object files, then merge that
# manifest with other manifests supplied as sources, convert the merged
# manifest to a resource, and then *relink*, including the compiled
# version of the manifest resource. This breaks incremental linking, and
# is generally overly complicated. Instead, we merge all the manifests
# provided (along with one that includes what would normally be in the
# linker-generated one, see msvs_emulation.py), and include that into the
# first and only link. We still tell link to generate a manifest, but we
# only use that to assert that our simpler process did not miss anything.
variables = {
'python': sys.executable,
'arch': arch,
'out': out,
'ldcmd': ldcmd,
'resname': resname,
'mt': mt,
'rc': rc,
'intermediate_manifest': intermediate_manifest,
'manifests': ' '.join(manifests),
}
add_to_ld = ''
if manifests:
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(manifests)s -out:%(out)s.manifest' % variables)
if embed_manifest == 'True':
subprocess.check_call(
'%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest'
' %(out)s.manifest.rc %(resname)s' % variables)
subprocess.check_call(
'%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s '
'%(out)s.manifest.rc' % variables)
add_to_ld = ' %(out)s.manifest.res' % variables
subprocess.check_call(ldcmd + add_to_ld)
# Run mt.exe on the theoretically complete manifest we generated, merging
# it with the one the linker generated to confirm that the linker
# generated one does not add anything. This is strictly unnecessary for
# correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not
# used in a #pragma comment.
if manifests:
# Merge the intermediate one with ours to .assert.manifest, then check
# that .assert.manifest is identical to ours.
subprocess.check_call(
'%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
'-manifest %(out)s.manifest %(intermediate_manifest)s '
'-out:%(out)s.assert.manifest' % variables)
assert_manifest = '%(out)s.assert.manifest' % variables
our_manifest = '%(out)s.manifest' % variables
# Load and normalize the manifests. mt.exe sometimes removes whitespace,
# and sometimes doesn't unfortunately.
with open(our_manifest, 'rb') as our_f:
with open(assert_manifest, 'rb') as assert_f:
our_data = our_f.read().translate(None, string.whitespace)
assert_data = assert_f.read().translate(None, string.whitespace)
if our_data != assert_data:
os.unlink(out)
def dump(filename):
sys.stderr.write('%s\n-----\n' % filename)
with open(filename, 'rb') as f:
sys.stderr.write(f.read() + '\n-----\n')
dump(intermediate_manifest)
dump(our_manifest)
dump(assert_manifest)
sys.stderr.write(
'Linker generated manifest "%s" added to final manifest "%s" '
'(result in "%s"). '
'Were /MANIFEST switches used in #pragma statements? ' % (
intermediate_manifest, our_manifest, assert_manifest))
return 1
def ExecManifestWrapper(self, arch, *args):
"""Run manifest tool with environment set. Strip out undesirable warning
(some XML blocks are recognized by the OS loader, but not the manifest
tool)."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if line and 'manifest authoring warning 81010002' not in line:
print line
return popen.returncode
def ExecManifestToRc(self, arch, *args):
"""Creates a resource file pointing a SxS assembly manifest.
|args| is tuple containing path to resource file, path to manifest file
and resource name which can be "1" (for executables) or "2" (for DLLs)."""
manifest_path, resource_path, resource_name = args
with open(resource_path, 'wb') as output:
output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % (
resource_name,
os.path.abspath(manifest_path).replace('\\', '/')))
def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
*flags):
"""Filter noisy filenames output from MIDL compile step that isn't
quietable via command line flags.
"""
args = ['midl', '/nologo'] + list(flags) + [
'/out', outdir,
'/tlb', tlb,
'/h', h,
'/dlldata', dlldata,
'/iid', iid,
'/proxy', proxy,
idl]
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
# Filter junk out of stdout, and write filtered versions. Output we want
# to filter is pairs of lines that look like this:
# Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
# objidl.idl
lines = out.splitlines()
prefixes = ('Processing ', '64 bit Processing ')
processing = set(os.path.basename(x)
for x in lines if x.startswith(prefixes))
for line in lines:
if not line.startswith(prefixes) and line not in processing:
print line
return popen.returncode
def ExecAsmWrapper(self, arch, *args):
"""Filter logo banner from invocations of asm.exe."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Copyright (C) Microsoft Corporation') and
not line.startswith('Microsoft (R) Macro Assembler') and
not line.startswith(' Assembling: ') and
line):
print line
return popen.returncode
def ExecRcWrapper(self, arch, *args):
"""Filter logo banner from invocations of rc.exe. Older versions of RC
don't support the /nologo flag."""
env = self._GetEnv(arch)
popen = subprocess.Popen(args, shell=True, env=env,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
not line.startswith('Copyright (C) Microsoft Corporation') and
line):
print line
return popen.returncode
def ExecActionWrapper(self, arch, rspfile, *dir):
"""Runs an action command line from a response file using the environment
for |arch|. If |dir| is supplied, use that as the working directory."""
env = self._GetEnv(arch)
# TODO(scottmg): This is a temporary hack to get some specific variables
# through to actions that are set after gyp-time. http://crbug.com/333738.
for k, v in os.environ.iteritems():
if k not in env:
env[k] = v
args = open(rspfile).read()
dir = dir[0] if dir else None
return subprocess.call(args, shell=True, env=env, cwd=dir)
def ExecClCompile(self, project_dir, selected_files):
"""Executed by msvs-ninja projects when the 'ClCompile' target is used to
build selected C/C++ files."""
project_dir = os.path.relpath(project_dir, BASE_DIR)
selected_files = selected_files.split(';')
ninja_targets = [os.path.join(project_dir, filename) + '^^'
for filename in selected_files]
cmd = ['ninja.exe']
cmd.extend(ninja_targets)
return subprocess.call(cmd, shell=True, cwd=BASE_DIR)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
cpennington/edx-platform | common/djangoapps/student/tests/test_enrollment.py | 4 | 14815 | """
Tests for student enrollment.
"""
import unittest
import ddt
import six
from django.conf import settings
from django.urls import reverse
from mock import patch
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from openedx.core.djangoapps.embargo.test_utils import restrict_course
from student.models import (
SCORE_RECALCULATION_DELAY_ON_ENROLLMENT_UPDATE,
CourseEnrollment,
CourseFullError,
EnrollmentClosedError
)
from student.roles import CourseInstructorRole, CourseStaffRole
from student.tests.factories import CourseEnrollmentAllowedFactory, UserFactory
from util.testing import UrlResetMixin
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class EnrollmentTest(UrlResetMixin, SharedModuleStoreTestCase):
"""
Test student enrollment, especially with different course modes.
"""
USERNAME = "Bob"
EMAIL = "[email protected]"
PASSWORD = "edx"
URLCONF_MODULES = ['openedx.core.djangoapps.embargo']
@classmethod
def setUpClass(cls):
super(EnrollmentTest, cls).setUpClass()
cls.course = CourseFactory.create()
cls.course_limited = CourseFactory.create()
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
""" Create a course and user, then log in. """
super(EnrollmentTest, self).setUp()
self.user = UserFactory.create(username=self.USERNAME, email=self.EMAIL, password=self.PASSWORD)
self.client.login(username=self.USERNAME, password=self.PASSWORD)
self.course_limited.max_student_enrollments_allowed = 1
self.store.update_item(self.course_limited, self.user.id)
self.urls = [
reverse('course_modes_choose', kwargs={'course_id': six.text_type(self.course.id)})
]
@ddt.data(
# Default (no course modes in the database)
# Expect that we're redirected to the dashboard
# and automatically enrolled
([], '', CourseMode.DEFAULT_MODE_SLUG),
# Audit / Verified
# We should always go to the "choose your course" page.
# We should also be enrolled as the default mode.
(['verified', 'audit'], 'course_modes_choose', CourseMode.DEFAULT_MODE_SLUG),
# Audit / Verified / Honor
# We should always go to the "choose your course" page.
# We should also be enrolled as the honor mode.
# Since honor and audit are currently offered together this precedence must
# be maintained.
(['honor', 'verified', 'audit'], 'course_modes_choose', CourseMode.HONOR),
# Professional ed
# Expect that we're sent to the "choose your track" page
# (which will, in turn, redirect us to a page where we can verify/pay)
# We should NOT be auto-enrolled, because that would be giving
# away an expensive course for free :)
(['professional'], 'course_modes_choose', None),
(['no-id-professional'], 'course_modes_choose', None),
)
@ddt.unpack
def test_enroll(self, course_modes, next_url, enrollment_mode):
# Create the course modes (if any) required for this test case
for mode_slug in course_modes:
CourseModeFactory.create(
course_id=self.course.id,
mode_slug=mode_slug,
mode_display_name=mode_slug,
)
# Reverse the expected next URL, if one is provided
# (otherwise, use an empty string, which the JavaScript client
# interprets as a redirect to the dashboard)
full_url = (
reverse(next_url, kwargs={'course_id': six.text_type(self.course.id)})
if next_url else next_url
)
# Enroll in the course and verify the URL we get sent to
resp = self._change_enrollment('enroll')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content.decode('utf-8'), full_url)
# If we're not expecting to be enrolled, verify that this is the case
if enrollment_mode is None:
self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course.id))
# Otherwise, verify that we're enrolled with the expected course mode
else:
self.assertTrue(CourseEnrollment.is_enrolled(self.user, self.course.id))
course_mode, is_active = CourseEnrollment.enrollment_mode_for_user(self.user, self.course.id)
self.assertTrue(is_active)
self.assertEqual(course_mode, enrollment_mode)
def test_unenroll(self):
# Enroll the student in the course
CourseEnrollment.enroll(self.user, self.course.id, mode="honor")
# Attempt to unenroll the student
resp = self._change_enrollment('unenroll')
self.assertEqual(resp.status_code, 200)
# Expect that we're no longer enrolled
self.assertFalse(CourseEnrollment.is_enrolled(self.user, self.course.id))
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_EMAIL_OPT_IN': True})
@patch('openedx.core.djangoapps.user_api.preferences.api.update_email_opt_in')
@ddt.data(
([], 'true'),
([], 'false'),
([], None),
(['honor', 'verified'], 'true'),
(['honor', 'verified'], 'false'),
(['honor', 'verified'], None),
(['professional'], 'true'),
(['professional'], 'false'),
(['professional'], None),
(['no-id-professional'], 'true'),
(['no-id-professional'], 'false'),
(['no-id-professional'], None),
)
@ddt.unpack
def test_enroll_with_email_opt_in(self, course_modes, email_opt_in, mock_update_email_opt_in):
# Create the course modes (if any) required for this test case
for mode_slug in course_modes:
CourseModeFactory.create(
course_id=self.course.id,
mode_slug=mode_slug,
mode_display_name=mode_slug,
)
# Enroll in the course
self._change_enrollment('enroll', email_opt_in=email_opt_in)
# Verify that the profile API has been called as expected
if email_opt_in is not None:
opt_in = email_opt_in == 'true'
mock_update_email_opt_in.assert_called_once_with(self.user, self.course.org, opt_in)
else:
self.assertFalse(mock_update_email_opt_in.called)
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def test_embargo_restrict(self):
# When accessing the course from an embargoed country,
# we should be blocked.
with restrict_course(self.course.id) as redirect_url:
response = self._change_enrollment('enroll')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf-8'), redirect_url)
# Verify that we weren't enrolled
is_enrolled = CourseEnrollment.is_enrolled(self.user, self.course.id)
self.assertFalse(is_enrolled)
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def test_embargo_allow(self):
response = self._change_enrollment('enroll')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.decode('utf-8'), '')
# Verify that we were enrolled
is_enrolled = CourseEnrollment.is_enrolled(self.user, self.course.id)
self.assertTrue(is_enrolled)
def test_user_not_authenticated(self):
# Log out, so we're no longer authenticated
self.client.logout()
# Try to enroll, expecting a forbidden response
resp = self._change_enrollment('enroll')
self.assertEqual(resp.status_code, 403)
def test_missing_course_id_param(self):
resp = self.client.post(
reverse('change_enrollment'),
{'enrollment_action': 'enroll'}
)
self.assertEqual(resp.status_code, 400)
def test_unenroll_not_enrolled_in_course(self):
# Try unenroll without first enrolling in the course
resp = self._change_enrollment('unenroll')
self.assertEqual(resp.status_code, 400)
def test_invalid_enrollment_action(self):
resp = self._change_enrollment('not_an_action')
self.assertEqual(resp.status_code, 400)
def test_with_invalid_course_id(self):
CourseEnrollment.enroll(self.user, self.course.id, mode="honor")
resp = self._change_enrollment('unenroll', course_id="edx/")
self.assertEqual(resp.status_code, 400)
def test_enrollment_limit(self):
"""
Assert that in a course with max student limit set to 1, we can enroll staff and instructor along with
student. To make sure course full check excludes staff and instructors.
"""
self.assertEqual(self.course_limited.max_student_enrollments_allowed, 1)
user1 = UserFactory.create(username="tester1", email="[email protected]", password="test")
user2 = UserFactory.create(username="tester2", email="[email protected]", password="test")
# create staff on course.
staff = UserFactory.create(username="staff", email="[email protected]", password="test")
role = CourseStaffRole(self.course_limited.id)
role.add_users(staff)
# create instructor on course.
instructor = UserFactory.create(username="instructor", email="[email protected]", password="test")
role = CourseInstructorRole(self.course_limited.id)
role.add_users(instructor)
CourseEnrollment.enroll(staff, self.course_limited.id, check_access=True)
CourseEnrollment.enroll(instructor, self.course_limited.id, check_access=True)
self.assertTrue(
CourseEnrollment.objects.filter(course_id=self.course_limited.id, user=staff).exists()
)
self.assertTrue(
CourseEnrollment.objects.filter(course_id=self.course_limited.id, user=instructor).exists()
)
CourseEnrollment.enroll(user1, self.course_limited.id, check_access=True)
self.assertTrue(
CourseEnrollment.objects.filter(course_id=self.course_limited.id, user=user1).exists()
)
with self.assertRaises(CourseFullError):
CourseEnrollment.enroll(user2, self.course_limited.id, check_access=True)
self.assertFalse(
CourseEnrollment.objects.filter(course_id=self.course_limited.id, user=user2).exists()
)
def _change_enrollment(self, action, course_id=None, email_opt_in=None):
"""Change the student's enrollment status in a course.
Args:
action (str): The action to perform (either "enroll" or "unenroll")
Keyword Args:
course_id (unicode): If provided, use this course ID. Otherwise, use the
course ID created in the setup for this test.
email_opt_in (unicode): If provided, pass this value along as
an additional GET parameter.
Returns:
Response
"""
if course_id is None:
course_id = six.text_type(self.course.id)
params = {
'enrollment_action': action,
'course_id': course_id
}
if email_opt_in:
params['email_opt_in'] = email_opt_in
return self.client.post(reverse('change_enrollment'), params)
def test_cea_enrolls_only_one_user(self):
"""
Tests that a CourseEnrollmentAllowed can be used by just one user.
If the user changes e-mail and then a second user tries to enroll with the same accepted e-mail,
the second enrollment should fail.
However, the original user can reuse the CEA many times.
"""
cea = CourseEnrollmentAllowedFactory(
email='[email protected]',
course_id=self.course.id,
auto_enroll=False,
)
# Still unlinked
self.assertIsNone(cea.user)
user1 = UserFactory.create(username="tester1", email="[email protected]", password="test")
user2 = UserFactory.create(username="tester2", email="[email protected]", password="test")
self.assertFalse(
CourseEnrollment.objects.filter(course_id=self.course.id, user=user1).exists()
)
user1.email = '[email protected]'
user1.save()
CourseEnrollment.enroll(user1, self.course.id, check_access=True)
self.assertTrue(
CourseEnrollment.objects.filter(course_id=self.course.id, user=user1).exists()
)
# The CEA is now linked
cea.refresh_from_db()
self.assertEqual(cea.user, user1)
# user2 wants to enroll too, (ab)using the same allowed e-mail, but cannot
user1.email = '[email protected]'
user1.save()
user2.email = '[email protected]'
user2.save()
with self.assertRaises(EnrollmentClosedError):
CourseEnrollment.enroll(user2, self.course.id, check_access=True)
# CEA still linked to user1. Also after unenrolling
cea.refresh_from_db()
self.assertEqual(cea.user, user1)
CourseEnrollment.unenroll(user1, self.course.id)
cea.refresh_from_db()
self.assertEqual(cea.user, user1)
# Enroll user1 again. Because it's the original owner of the CEA, the enrollment is allowed
CourseEnrollment.enroll(user1, self.course.id, check_access=True)
# Still same
cea.refresh_from_db()
self.assertEqual(cea.user, user1)
def test_score_recalculation_on_enrollment_update(self):
"""
Test that an update in enrollment cause score recalculation.
Note:
Score recalculation task must be called with a delay of SCORE_RECALCULATION_DELAY_ON_ENROLLMENT_UPDATE
"""
course_modes = ['verified', 'audit']
for mode_slug in course_modes:
CourseModeFactory.create(
course_id=self.course.id,
mode_slug=mode_slug,
mode_display_name=mode_slug,
)
CourseEnrollment.enroll(self.user, self.course.id, mode="audit")
local_task_args = dict(
user_id=self.user.id,
course_key=str(self.course.id)
)
with patch(
'lms.djangoapps.grades.tasks.recalculate_course_and_subsection_grades_for_user.apply_async',
return_value=None
) as mock_task_apply:
CourseEnrollment.enroll(self.user, self.course.id, mode="verified")
mock_task_apply.assert_called_once_with(
countdown=SCORE_RECALCULATION_DELAY_ON_ENROLLMENT_UPDATE,
kwargs=local_task_args
)
| agpl-3.0 |
gfyoung/pandas | pandas/tests/indexes/test_engines.py | 4 | 8656 | import re
import numpy as np
import pytest
from pandas._libs import algos as libalgos, index as libindex
import pandas as pd
import pandas._testing as tm
@pytest.fixture(
params=[
(libindex.Int64Engine, np.int64),
(libindex.Int32Engine, np.int32),
(libindex.Int16Engine, np.int16),
(libindex.Int8Engine, np.int8),
(libindex.UInt64Engine, np.uint64),
(libindex.UInt32Engine, np.uint32),
(libindex.UInt16Engine, np.uint16),
(libindex.UInt8Engine, np.uint8),
(libindex.Float64Engine, np.float64),
(libindex.Float32Engine, np.float32),
],
ids=lambda x: x[0].__name__,
)
def numeric_indexing_engine_type_and_dtype(request):
return request.param
class TestDatetimeEngine:
@pytest.mark.parametrize(
"scalar",
[
pd.Timedelta(pd.Timestamp("2016-01-01").asm8.view("m8[ns]")),
pd.Timestamp("2016-01-01").value,
pd.Timestamp("2016-01-01").to_pydatetime(),
pd.Timestamp("2016-01-01").to_datetime64(),
],
)
def test_not_contains_requires_timestamp(self, scalar):
dti1 = pd.date_range("2016-01-01", periods=3)
dti2 = dti1.insert(1, pd.NaT) # non-monotonic
dti3 = dti1.insert(3, dti1[0]) # non-unique
dti4 = pd.date_range("2016-01-01", freq="ns", periods=2_000_000)
dti5 = dti4.insert(0, dti4[0]) # over size threshold, not unique
msg = "|".join([re.escape(str(scalar)), re.escape(repr(scalar))])
for dti in [dti1, dti2, dti3, dti4, dti5]:
with pytest.raises(TypeError, match=msg):
scalar in dti._engine
with pytest.raises(KeyError, match=msg):
dti._engine.get_loc(scalar)
class TestTimedeltaEngine:
@pytest.mark.parametrize(
"scalar",
[
pd.Timestamp(pd.Timedelta(days=42).asm8.view("datetime64[ns]")),
pd.Timedelta(days=42).value,
pd.Timedelta(days=42).to_pytimedelta(),
pd.Timedelta(days=42).to_timedelta64(),
],
)
def test_not_contains_requires_timestamp(self, scalar):
tdi1 = pd.timedelta_range("42 days", freq="9h", periods=1234)
tdi2 = tdi1.insert(1, pd.NaT) # non-monotonic
tdi3 = tdi1.insert(3, tdi1[0]) # non-unique
tdi4 = pd.timedelta_range("42 days", freq="ns", periods=2_000_000)
tdi5 = tdi4.insert(0, tdi4[0]) # over size threshold, not unique
msg = "|".join([re.escape(str(scalar)), re.escape(repr(scalar))])
for tdi in [tdi1, tdi2, tdi3, tdi4, tdi5]:
with pytest.raises(TypeError, match=msg):
scalar in tdi._engine
with pytest.raises(KeyError, match=msg):
tdi._engine.get_loc(scalar)
class TestNumericEngine:
def test_is_monotonic(self, numeric_indexing_engine_type_and_dtype):
engine_type, dtype = numeric_indexing_engine_type_and_dtype
num = 1000
arr = np.array([1] * num + [2] * num + [3] * num, dtype=dtype)
# monotonic increasing
engine = engine_type(lambda: arr, len(arr))
assert engine.is_monotonic_increasing is True
assert engine.is_monotonic_decreasing is False
# monotonic decreasing
engine = engine_type(lambda: arr[::-1], len(arr))
assert engine.is_monotonic_increasing is False
assert engine.is_monotonic_decreasing is True
# neither monotonic increasing or decreasing
arr = np.array([1] * num + [2] * num + [1] * num, dtype=dtype)
engine = engine_type(lambda: arr[::-1], len(arr))
assert engine.is_monotonic_increasing is False
assert engine.is_monotonic_decreasing is False
def test_is_unique(self, numeric_indexing_engine_type_and_dtype):
engine_type, dtype = numeric_indexing_engine_type_and_dtype
# unique
arr = np.array([1, 3, 2], dtype=dtype)
engine = engine_type(lambda: arr, len(arr))
assert engine.is_unique is True
# not unique
arr = np.array([1, 2, 1], dtype=dtype)
engine = engine_type(lambda: arr, len(arr))
assert engine.is_unique is False
def test_get_loc(self, numeric_indexing_engine_type_and_dtype):
engine_type, dtype = numeric_indexing_engine_type_and_dtype
# unique
arr = np.array([1, 2, 3], dtype=dtype)
engine = engine_type(lambda: arr, len(arr))
assert engine.get_loc(2) == 1
# monotonic
num = 1000
arr = np.array([1] * num + [2] * num + [3] * num, dtype=dtype)
engine = engine_type(lambda: arr, len(arr))
assert engine.get_loc(2) == slice(1000, 2000)
# not monotonic
arr = np.array([1, 2, 3] * num, dtype=dtype)
engine = engine_type(lambda: arr, len(arr))
expected = np.array([False, True, False] * num, dtype=bool)
result = engine.get_loc(2)
assert (result == expected).all()
def test_get_backfill_indexer(self, numeric_indexing_engine_type_and_dtype):
engine_type, dtype = numeric_indexing_engine_type_and_dtype
arr = np.array([1, 5, 10], dtype=dtype)
engine = engine_type(lambda: arr, len(arr))
new = np.arange(12, dtype=dtype)
result = engine.get_backfill_indexer(new)
expected = libalgos.backfill(arr, new)
tm.assert_numpy_array_equal(result, expected)
def test_get_pad_indexer(self, numeric_indexing_engine_type_and_dtype):
engine_type, dtype = numeric_indexing_engine_type_and_dtype
arr = np.array([1, 5, 10], dtype=dtype)
engine = engine_type(lambda: arr, len(arr))
new = np.arange(12, dtype=dtype)
result = engine.get_pad_indexer(new)
expected = libalgos.pad(arr, new)
tm.assert_numpy_array_equal(result, expected)
class TestObjectEngine:
engine_type = libindex.ObjectEngine
dtype = np.object_
values = list("abc")
def test_is_monotonic(self):
num = 1000
arr = np.array(["a"] * num + ["a"] * num + ["c"] * num, dtype=self.dtype)
# monotonic increasing
engine = self.engine_type(lambda: arr, len(arr))
assert engine.is_monotonic_increasing is True
assert engine.is_monotonic_decreasing is False
# monotonic decreasing
engine = self.engine_type(lambda: arr[::-1], len(arr))
assert engine.is_monotonic_increasing is False
assert engine.is_monotonic_decreasing is True
# neither monotonic increasing or decreasing
arr = np.array(["a"] * num + ["b"] * num + ["a"] * num, dtype=self.dtype)
engine = self.engine_type(lambda: arr[::-1], len(arr))
assert engine.is_monotonic_increasing is False
assert engine.is_monotonic_decreasing is False
def test_is_unique(self):
# unique
arr = np.array(self.values, dtype=self.dtype)
engine = self.engine_type(lambda: arr, len(arr))
assert engine.is_unique is True
# not unique
arr = np.array(["a", "b", "a"], dtype=self.dtype)
engine = self.engine_type(lambda: arr, len(arr))
assert engine.is_unique is False
def test_get_loc(self):
# unique
arr = np.array(self.values, dtype=self.dtype)
engine = self.engine_type(lambda: arr, len(arr))
assert engine.get_loc("b") == 1
# monotonic
num = 1000
arr = np.array(["a"] * num + ["b"] * num + ["c"] * num, dtype=self.dtype)
engine = self.engine_type(lambda: arr, len(arr))
assert engine.get_loc("b") == slice(1000, 2000)
# not monotonic
arr = np.array(self.values * num, dtype=self.dtype)
engine = self.engine_type(lambda: arr, len(arr))
expected = np.array([False, True, False] * num, dtype=bool)
result = engine.get_loc("b")
assert (result == expected).all()
def test_get_backfill_indexer(self):
arr = np.array(["a", "e", "j"], dtype=self.dtype)
engine = self.engine_type(lambda: arr, len(arr))
new = np.array(list("abcdefghij"), dtype=self.dtype)
result = engine.get_backfill_indexer(new)
expected = libalgos.backfill["object"](arr, new)
tm.assert_numpy_array_equal(result, expected)
def test_get_pad_indexer(self):
arr = np.array(["a", "e", "j"], dtype=self.dtype)
engine = self.engine_type(lambda: arr, len(arr))
new = np.array(list("abcdefghij"), dtype=self.dtype)
result = engine.get_pad_indexer(new)
expected = libalgos.pad["object"](arr, new)
tm.assert_numpy_array_equal(result, expected)
| bsd-3-clause |
GraemeFulton/job-search | docutils-0.12/build/lib/docutils/languages/nl.py | 200 | 1865 | # $Id: nl.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Martijn Pieters <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Dutch-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': 'Auteur',
'authors': 'Auteurs',
'organization': 'Organisatie',
'address': 'Adres',
'contact': 'Contact',
'version': 'Versie',
'revision': 'Revisie',
'status': 'Status',
'date': 'Datum',
'copyright': 'Copyright',
'dedication': 'Toewijding',
'abstract': 'Samenvatting',
'attention': 'Attentie!',
'caution': 'Let op!',
'danger': '!GEVAAR!',
'error': 'Fout',
'hint': 'Hint',
'important': 'Belangrijk',
'note': 'Opmerking',
'tip': 'Tip',
'warning': 'Waarschuwing',
'contents': 'Inhoud'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
'auteur': 'author',
'auteurs': 'authors',
'organisatie': 'organization',
'adres': 'address',
'contact': 'contact',
'versie': 'version',
'revisie': 'revision',
'status': 'status',
'datum': 'date',
'copyright': 'copyright',
'toewijding': 'dedication',
'samenvatting': 'abstract'}
"""Dutch (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| gpl-2.0 |
hurricup/intellij-community | python/lib/Lib/site-packages/django/contrib/sites/models.py | 387 | 2867 | from django.db import models
from django.utils.translation import ugettext_lazy as _
SITE_CACHE = {}
class SiteManager(models.Manager):
def get_current(self):
"""
Returns the current ``Site`` based on the SITE_ID in the
project's settings. The ``Site`` object is cached the first
time it's retrieved from the database.
"""
from django.conf import settings
try:
sid = settings.SITE_ID
except AttributeError:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You're using the Django \"sites framework\" without having set the SITE_ID setting. Create a site in your database and set the SITE_ID setting to fix this error.")
try:
current_site = SITE_CACHE[sid]
except KeyError:
current_site = self.get(pk=sid)
SITE_CACHE[sid] = current_site
return current_site
def clear_cache(self):
"""Clears the ``Site`` object cache."""
global SITE_CACHE
SITE_CACHE = {}
class Site(models.Model):
domain = models.CharField(_('domain name'), max_length=100)
name = models.CharField(_('display name'), max_length=50)
objects = SiteManager()
class Meta:
db_table = 'django_site'
verbose_name = _('site')
verbose_name_plural = _('sites')
ordering = ('domain',)
def __unicode__(self):
return self.domain
def save(self, *args, **kwargs):
super(Site, self).save(*args, **kwargs)
# Cached information will likely be incorrect now.
if self.id in SITE_CACHE:
del SITE_CACHE[self.id]
def delete(self):
pk = self.pk
super(Site, self).delete()
try:
del SITE_CACHE[pk]
except KeyError:
pass
class RequestSite(object):
"""
A class that shares the primary interface of Site (i.e., it has
``domain`` and ``name`` attributes) but gets its data from a Django
HttpRequest object rather than from a database.
The save() and delete() methods raise NotImplementedError.
"""
def __init__(self, request):
self.domain = self.name = request.get_host()
def __unicode__(self):
return self.domain
def save(self, force_insert=False, force_update=False):
raise NotImplementedError('RequestSite cannot be saved.')
def delete(self):
raise NotImplementedError('RequestSite cannot be deleted.')
def get_current_site(request):
"""
Checks if contrib.sites is installed and returns either the current
``Site`` object or a ``RequestSite`` object based on the request.
"""
if Site._meta.installed:
current_site = Site.objects.get_current()
else:
current_site = RequestSite(request)
return current_site
| apache-2.0 |
rgommers/statsmodels | statsmodels/sandbox/regression/tests/test_gmm_poisson.py | 31 | 13338 | '''
TestGMMMultTwostepDefault() has lower precision
'''
from statsmodels.compat.python import lmap
import numpy as np
from numpy.testing.decorators import skipif
import pandas
import scipy
from scipy import stats
from statsmodels.regression.linear_model import OLS
from statsmodels.sandbox.regression import gmm
from numpy.testing import assert_allclose, assert_equal
from statsmodels.compat.scipy import NumpyVersion
def get_data():
import os
curdir = os.path.split(__file__)[0]
dt = pandas.read_csv(os.path.join(curdir, 'racd10data_with_transformed.csv'))
# Transformations compared to original data
##dt3['income'] /= 10.
##dt3['aget'] = (dt3['age'] - dt3['age'].min()) / 5.
##dt3['aget2'] = dt3['aget']**2
# How do we do this with pandas
mask = ~((np.asarray(dt['private']) == 1) & (dt['medicaid'] == 1))
mask = mask & (dt['docvis'] <= 70)
dt3 = dt[mask]
dt3['const'] = 1 # add constant
return dt3
DATA = get_data()
#------------- moment conditions for example
def moment_exponential_add(params, exog, exp=True):
if not np.isfinite(params).all():
print("invalid params", params)
# moment condition without instrument
if exp:
predicted = np.exp(np.dot(exog, params))
#if not np.isfinite(predicted).all():
#print "invalid predicted", predicted
#raise RuntimeError('invalid predicted')
predicted = np.clip(predicted, 0, 1e100) # try to avoid inf
else:
predicted = np.dot(exog, params)
return predicted
def moment_exponential_mult(params, data, exp=True):
# multiplicative error model
endog = data[:,0]
exog = data[:,1:]
if not np.isfinite(params).all():
print("invalid params", params)
# moment condition without instrument
if exp:
predicted = np.exp(np.dot(exog, params))
predicted = np.clip(predicted, 0, 1e100) # avoid inf
resid = endog / predicted - 1
if not np.isfinite(resid).all():
print("invalid resid", resid)
else:
resid = endog - np.dot(exog, params)
return resid
#------------------- test classes
# copied from test_gmm.py, with changes
class CheckGMM(object):
# default tolerance, overwritten by subclasses
params_tol = [5e-6, 5e-6]
bse_tol = [5e-7, 5e-7]
q_tol = [5e-6, 1e-9]
j_tol = [5e-5, 1e-9]
def test_basic(self):
res1, res2 = self.res1, self.res2
# test both absolute and relative difference
rtol, atol = self.params_tol
assert_allclose(res1.params, res2.params, rtol=rtol, atol=0)
assert_allclose(res1.params, res2.params, rtol=0, atol=atol)
rtol, atol = self.bse_tol
assert_allclose(res1.bse, res2.bse, rtol=rtol, atol=0)
assert_allclose(res1.bse, res2.bse, rtol=0, atol=atol)
def test_other(self):
res1, res2 = self.res1, self.res2
rtol, atol = self.q_tol
assert_allclose(res1.q, res2.Q, rtol=atol, atol=rtol)
rtol, atol = self.j_tol
assert_allclose(res1.jval, res2.J, rtol=atol, atol=rtol)
j, jpval, jdf = res1.jtest()
# j and jval should be the same
assert_allclose(res1.jval, res2.J, rtol=13, atol=13)
#pvalue is not saved in Stata results
pval = stats.chi2.sf(res2.J, res2.J_df)
#assert_allclose(jpval, pval, rtol=1e-4, atol=1e-6)
assert_allclose(jpval, pval, rtol=rtol, atol=atol)
assert_equal(jdf, res2.J_df)
def test_smoke(self):
res1 = self.res1
res1.summary()
class TestGMMAddOnestep(CheckGMM):
@classmethod
def setup_class(self):
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
self.bse_tol = [5e-6, 5e-7]
q_tol = [0.04, 0]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(np.log(endog+1), exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog, exog, instrument, moment_exponential_add)
res0 = mod.fit(start, maxiter=0, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False})
self.res1 = res0
from .results_gmm_poisson import results_addonestep as results
self.res2 = results
class TestGMMAddTwostep(CheckGMM):
@classmethod
def setup_class(self):
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
self.bse_tol = [5e-6, 5e-7]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(np.log(endog+1), exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog, exog, instrument, moment_exponential_add)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res0
from .results_gmm_poisson import results_addtwostep as results
self.res2 = results
class TestGMMMultOnestep(CheckGMM):
#compares has_optimal_weights=True with Stata's has_optimal_weights=False
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
self.bse_tol = [5e-6, 5e-7]
self.q_tol = [0.04, 0]
self.j_tol = [0.04, 0]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=0, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res0
from .results_gmm_poisson import results_multonestep as results
self.res2 = results
class TestGMMMultTwostep(CheckGMM):
#compares has_optimal_weights=True with Stata's has_optimal_weights=False
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
self.bse_tol = [5e-6, 5e-7]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res0
from .results_gmm_poisson import results_multtwostep as results
self.res2 = results
class TestGMMMultTwostepDefault(CheckGMM):
# compares my defaults with the same options in Stata
# agreement is not very high, maybe vce(unadjusted) is different after all
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
self.bse_tol = [0.004, 5e-4]
self.params_tol = [5e-5, 5e-5]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
#wargs={'centered':True}, has_optimal_weights=True
)
self.res1 = res0
from .results_gmm_poisson import results_multtwostepdefault as results
self.res2 = results
class TestGMMMultTwostepCenter(CheckGMM):
#compares my defaults with the same options in Stata
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
self.bse_tol = [5e-4, 5e-5]
self.params_tol = [5e-5, 5e-5]
q_tol = [5e-5, 1e-8]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':True}, has_optimal_weights=False
)
self.res1 = res0
from .results_gmm_poisson import results_multtwostepcenter as results
self.res2 = results
def test_more(self):
# from Stata `overid`
J_df = 1
J_p = 0.332254330027383
J = 0.940091427212973
j, jpval, jdf = self.res1.jtest()
assert_allclose(jpval, J_p, rtol=5e-5, atol=0)
if __name__ == '__main__':
tt = TestGMMAddOnestep()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMAddTwostep()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMMultOnestep()
tt.setup_class()
tt.test_basic()
#tt.test_other()
tt = TestGMMMultTwostep()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMMultTwostepDefault()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMMultTwostepCenter()
tt.setup_class()
tt.test_basic()
tt.test_other()
| bsd-3-clause |
Louiiiss/ros_asr | src/grammar/mit_g2p_tools/g2p/Evaluation.py | 4 | 6540 | from __future__ import division
__author__ = 'Maximilian Bisani'
__version__ = '$LastChangedRevision: 1668 $'
__date__ = '$LastChangedDate: 2007-06-02 18:14:47 +0200 (Sat, 02 Jun 2007) $'
__copyright__ = 'Copyright (c) 2004-2005 RWTH Aachen University'
__license__ = """
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License Version 2 (June
1991) as published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, you will find it at
http://www.gnu.org/licenses/gpl.html, or write to the Free Software
Foundation, Inc., 51 Franlin Street, Fifth Floor, Boston, MA 02110,
USA.
Should a provision of no. 9 and 10 of the GNU General Public License
be invalid or become invalid, a valid provision is deemed to have been
agreed upon which comes closest to what the parties intended
commercially. In any case guarantee/warranty shall be limited to gross
negligent actions or intended actions or fraudulent concealment.
"""
from sequitur_ import align
class Result:
def __init__(self, name = None, tableFile = None):
self.name = name
self.tableFile = tableFile
self.nStringsTranslated = 0
self.nStringsFailed = 0
self.nSymbolsTranslated = 0
self.nSymbolsFailed = 0
self.nInsertions = 0
self.nDeletions = 0
self.nSubstitutions = 0
self.nStringErrors = 0
if self.tableFile:
row = [ column for column, var in self.tableFormat if column is not None ]
print >> self.tableFile, u'\t'.join(row)
tableFormat = [
(None, '"".join(source)'),
('weight', 'weight'),
('symbols', 'nSymbols'),
('ins', 'nInsertions'),
('del', 'nDeletions'),
('sub', 'nSubstitutions'),
('err', 'nStringErrors')]
def accu(self, source, reference, candidate, alignment, errors, weight = 1):
self.nStringsTranslated += weight
if errors > 0:
self.nStringErrors += weight
nStringErrors = weight
else:
nStringErrors = 0
nSymbols = len(reference) * weight
self.nSymbolsTranslated += nSymbols
nInsertions = 0
nDeletions = 0
nSubstitutions = 0
for ss, rr in alignment:
if ss is None:
assert rr is not None
nInsertions += weight
elif rr is None:
assert ss is not None
nDeletions += weight
elif ss == rr:
pass
else:
nSubstitutions += weight
self.nInsertions += nInsertions
self.nDeletions += nDeletions
self.nSubstitutions += nSubstitutions
if self.tableFile:
row = [ unicode(eval(var)) for column, var in self.tableFormat ]
print >> self.tableFile, u'\t'.join(row)
def accuFailure(self, reference, weight = 1):
self.nStringsFailed += weight
self.nSymbolsFailed += len(reference) * weight
def relativeCount(self, n, total):
if total:
return '%d (%1.2f%%)' % (n, 100.0 * float(n) / float(total))
else:
return '%d (n/a)' % n
stringError = property(
lambda self: self.relativeCount(self.nStringsIncorrect, self.nStrings))
symbolError = property(
lambda self: self.relativeCount(self.nSymbolsIncorrect, self.nSymbols))
def __getattr__(self, attr):
if attr.startswith('rc:'):
n, m = attr[3:].split('/')
return self.relativeCount(getattr(self, n), getattr(self, m))
elif attr == 'nStrings':
return self.nStringsTranslated + self.nStringsFailed
elif attr == 'nStringsIncorrect':
return self.nStringErrors + self.nStringsFailed
elif attr == 'nSymbols':
return self.nSymbolsTranslated + self.nSymbolsFailed
elif attr == 'nSymbolErrors':
return self.nInsertions + self.nDeletions + self.nSubstitutions
elif attr == 'nSymbolsIncorrect':
return self.nSymbolErrors + self.nSymbolsFailed
else:
raise AttributeError(attr)
def __getitem__(self, key):
return getattr(self, key)
template = """%(name)s
total: %(nStrings)d strings, %(nSymbols)d symbols
successfully translated: %(rc:nStringsTranslated/nStrings)s strings, %(rc:nSymbolsTranslated/nSymbols)s symbols
string errors: %(rc:nStringErrors/nStringsTranslated)s
symbol errors: %(rc:nSymbolErrors/nSymbolsTranslated)s
insertions: %(rc:nInsertions/nSymbolsTranslated)s
deletions: %(rc:nDeletions/nSymbolsTranslated)s
substitutions: %(rc:nSubstitutions/nSymbolsTranslated)s
translation failed: %(rc:nStringsFailed/nStrings)s strings, %(rc:nSymbolsFailed/nSymbols)s symbols
total string errors: %(rc:nStringsIncorrect/nStrings)s
total symbol errors: %(rc:nSymbolsIncorrect/nSymbols)s
"""
def __str__(self):
return self.template % self
def showAlignedResult(source, alignment, errors, out):
vis = []
for ss, rr in alignment:
if ss is None:
vis.append('\033[0;32m%s\033[0m' % rr)
elif rr is None:
vis.append('\033[0;31m[%s]\033[0m' % ss)
elif ss == rr:
vis.append('%s' % rr)
else:
vis.append('\033[0;31m%s/%s\033[0m' % (rr, ss))
print >> out, u'%s\t%s\t(%d errors)' % (''.join(source), ' '.join(vis), errors)
def collateSample(sample):
sources = []
references = {}
for source, reference in sample:
if source in references:
references[source].append(reference)
else:
sources.append(source)
references[source] = [reference]
return sources, references
class Evaluator(object):
resultFile = None
compareFilter = None
verboseLog = None
def setSample(self, sample):
self.sources, self.references = collateSample(sample)
def evaluate(self, translator):
result = Result(tableFile = self.resultFile)
for source in self.sources:
references = self.references[source]
if self.compareFilter:
references = map(self.compareFilter, references)
try:
candidate = translator(source)
except translator.TranslationFailure:
result.accuFailure(references[0])
continue
if self.compareFilter:
candidate = self.compareFilter(candidate)
eval = []
for reference in references:
alignment, errors = align(reference, candidate)
eval.append((errors, reference, alignment))
eval.sort()
errors, reference, alignment = eval[0]
result.accu(source, reference, candidate, alignment, errors)
if self.verboseLog:
showAlignedResult(source, alignment, errors, self.verboseLog)
return result
| gpl-2.0 |
sergio-incaser/bank-payment | account_banking_pain_base/__init__.py | 11 | 1104 | # -*- encoding: utf-8 -*-
##############################################################################
#
# PAIN Base module for Odoo
# Copyright (C) 2013 Akretion (http://www.akretion.com)
# @author: Alexis de Lattre <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import models
from .post_install import set_default_initiating_party
| agpl-3.0 |
TeamBliss-LP/android_external_skia | gm/rebaseline_server/results.py | 66 | 11097 | #!/usr/bin/python
"""
Copyright 2013 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Repackage expected/actual GM results as needed by our HTML rebaseline viewer.
"""
# System-level imports
import fnmatch
import os
import re
# Imports from within Skia
import fix_pythonpath # must do this first
import gm_json
import imagepairset
# Keys used to link an image to a particular GM test.
# NOTE: Keep these in sync with static/constants.js
VALUE__HEADER__SCHEMA_VERSION = 3
KEY__EXPECTATIONS__BUGS = gm_json.JSONKEY_EXPECTEDRESULTS_BUGS
KEY__EXPECTATIONS__IGNOREFAILURE = gm_json.JSONKEY_EXPECTEDRESULTS_IGNOREFAILURE
KEY__EXPECTATIONS__REVIEWED = gm_json.JSONKEY_EXPECTEDRESULTS_REVIEWED
KEY__EXTRACOLUMNS__BUILDER = 'builder'
KEY__EXTRACOLUMNS__CONFIG = 'config'
KEY__EXTRACOLUMNS__RESULT_TYPE = 'resultType'
KEY__EXTRACOLUMNS__TEST = 'test'
KEY__HEADER__DATAHASH = 'dataHash'
KEY__HEADER__IS_EDITABLE = 'isEditable'
KEY__HEADER__IS_EXPORTED = 'isExported'
KEY__HEADER__IS_STILL_LOADING = 'resultsStillLoading'
KEY__HEADER__RESULTS_ALL = 'all'
KEY__HEADER__RESULTS_FAILURES = 'failures'
KEY__HEADER__SCHEMA_VERSION = 'schemaVersion'
KEY__HEADER__TIME_NEXT_UPDATE_AVAILABLE = 'timeNextUpdateAvailable'
KEY__HEADER__TIME_UPDATED = 'timeUpdated'
KEY__HEADER__TYPE = 'type'
KEY__RESULT_TYPE__FAILED = gm_json.JSONKEY_ACTUALRESULTS_FAILED
KEY__RESULT_TYPE__FAILUREIGNORED = gm_json.JSONKEY_ACTUALRESULTS_FAILUREIGNORED
KEY__RESULT_TYPE__NOCOMPARISON = gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON
KEY__RESULT_TYPE__SUCCEEDED = gm_json.JSONKEY_ACTUALRESULTS_SUCCEEDED
IMAGE_FILENAME_RE = re.compile(gm_json.IMAGE_FILENAME_PATTERN)
IMAGE_FILENAME_FORMATTER = '%s_%s.png' # pass in (testname, config)
PARENT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
DEFAULT_ACTUALS_DIR = '.gm-actuals'
DEFAULT_GENERATED_IMAGES_ROOT = os.path.join(
PARENT_DIRECTORY, '.generated-images')
# Define the default set of builders we will process expectations/actuals for.
# This allows us to ignore builders for which we don't maintain expectations
# (trybots, Valgrind, ASAN, TSAN), and avoid problems like
# https://code.google.com/p/skia/issues/detail?id=2036 ('rebaseline_server
# produces error when trying to add baselines for ASAN/TSAN builders')
DEFAULT_MATCH_BUILDERS_PATTERN_LIST = ['.*']
DEFAULT_SKIP_BUILDERS_PATTERN_LIST = [
'.*-Trybot', '.*Valgrind.*', '.*TSAN.*', '.*ASAN.*']
class BaseComparisons(object):
"""Base class for generating summary of comparisons between two image sets.
"""
def get_results_of_type(self, results_type):
"""Return results of some/all tests (depending on 'results_type' parameter).
Args:
results_type: string describing which types of results to include; must
be one of the RESULTS_* constants
Results are returned in a dictionary as output by ImagePairSet.as_dict().
"""
return self._results[results_type]
def get_packaged_results_of_type(self, results_type, reload_seconds=None,
is_editable=False, is_exported=True):
"""Package the results of some/all tests as a complete response_dict.
Args:
results_type: string indicating which set of results to return;
must be one of the RESULTS_* constants
reload_seconds: if specified, note that new results may be available once
these results are reload_seconds old
is_editable: whether clients are allowed to submit new baselines
is_exported: whether these results are being made available to other
network hosts
"""
response_dict = self._results[results_type]
time_updated = self.get_timestamp()
response_dict[imagepairset.KEY__ROOT__HEADER] = {
KEY__HEADER__SCHEMA_VERSION: (
VALUE__HEADER__SCHEMA_VERSION),
# Timestamps:
# 1. when this data was last updated
# 2. when the caller should check back for new data (if ever)
KEY__HEADER__TIME_UPDATED: time_updated,
KEY__HEADER__TIME_NEXT_UPDATE_AVAILABLE: (
(time_updated+reload_seconds) if reload_seconds else None),
# The type we passed to get_results_of_type()
KEY__HEADER__TYPE: results_type,
# Hash of dataset, which the client must return with any edits--
# this ensures that the edits were made to a particular dataset.
KEY__HEADER__DATAHASH: str(hash(repr(
response_dict[imagepairset.KEY__ROOT__IMAGEPAIRS]))),
# Whether the server will accept edits back.
KEY__HEADER__IS_EDITABLE: is_editable,
# Whether the service is accessible from other hosts.
KEY__HEADER__IS_EXPORTED: is_exported,
}
return response_dict
def get_timestamp(self):
"""Return the time at which this object was created, in seconds past epoch
(UTC).
"""
return self._timestamp
_match_builders_pattern_list = [
re.compile(p) for p in DEFAULT_MATCH_BUILDERS_PATTERN_LIST]
_skip_builders_pattern_list = [
re.compile(p) for p in DEFAULT_SKIP_BUILDERS_PATTERN_LIST]
def set_match_builders_pattern_list(self, pattern_list):
"""Override the default set of builders we should process.
The default is DEFAULT_MATCH_BUILDERS_PATTERN_LIST .
Note that skip_builders_pattern_list overrides this; regardless of whether a
builder is in the "match" list, if it's in the "skip" list, we will skip it.
Args:
pattern_list: list of regex patterns; process builders that match any
entry within this list
"""
if pattern_list == None:
pattern_list = []
self._match_builders_pattern_list = [re.compile(p) for p in pattern_list]
def set_skip_builders_pattern_list(self, pattern_list):
"""Override the default set of builders we should skip while processing.
The default is DEFAULT_SKIP_BUILDERS_PATTERN_LIST .
This overrides match_builders_pattern_list; regardless of whether a
builder is in the "match" list, if it's in the "skip" list, we will skip it.
Args:
pattern_list: list of regex patterns; skip builders that match any
entry within this list
"""
if pattern_list == None:
pattern_list = []
self._skip_builders_pattern_list = [re.compile(p) for p in pattern_list]
def _ignore_builder(self, builder):
"""Returns True if we should skip processing this builder.
Args:
builder: name of this builder, as a string
Returns:
True if we should ignore expectations and actuals for this builder.
"""
for pattern in self._skip_builders_pattern_list:
if pattern.match(builder):
return True
for pattern in self._match_builders_pattern_list:
if pattern.match(builder):
return False
return True
def _read_builder_dicts_from_root(self, root, pattern='*.json'):
"""Read all JSON dictionaries within a directory tree.
Skips any dictionaries belonging to a builder we have chosen to ignore.
Args:
root: path to root of directory tree
pattern: which files to read within root (fnmatch-style pattern)
Returns:
A meta-dictionary containing all the JSON dictionaries found within
the directory tree, keyed by builder name (the basename of the directory
where each JSON dictionary was found).
Raises:
IOError if root does not refer to an existing directory
"""
# I considered making this call _read_dicts_from_root(), but I decided
# it was better to prune out the ignored builders within the os.walk().
if not os.path.isdir(root):
raise IOError('no directory found at path %s' % root)
meta_dict = {}
for dirpath, dirnames, filenames in os.walk(root):
for matching_filename in fnmatch.filter(filenames, pattern):
builder = os.path.basename(dirpath)
if self._ignore_builder(builder):
continue
full_path = os.path.join(dirpath, matching_filename)
meta_dict[builder] = gm_json.LoadFromFile(full_path)
return meta_dict
def _read_dicts_from_root(self, root, pattern='*.json'):
"""Read all JSON dictionaries within a directory tree.
Args:
root: path to root of directory tree
pattern: which files to read within root (fnmatch-style pattern)
Returns:
A meta-dictionary containing all the JSON dictionaries found within
the directory tree, keyed by the pathname (relative to root) of each JSON
dictionary.
Raises:
IOError if root does not refer to an existing directory
"""
if not os.path.isdir(root):
raise IOError('no directory found at path %s' % root)
meta_dict = {}
for abs_dirpath, dirnames, filenames in os.walk(root):
rel_dirpath = os.path.relpath(abs_dirpath, root)
for matching_filename in fnmatch.filter(filenames, pattern):
abs_path = os.path.join(abs_dirpath, matching_filename)
rel_path = os.path.join(rel_dirpath, matching_filename)
meta_dict[rel_path] = gm_json.LoadFromFile(abs_path)
return meta_dict
@staticmethod
def _read_noncomment_lines(path):
"""Return a list of all noncomment lines within a file.
(A "noncomment" line is one that does not start with a '#'.)
Args:
path: path to file
"""
lines = []
with open(path, 'r') as fh:
for line in fh:
if not line.startswith('#'):
lines.append(line.strip())
return lines
@staticmethod
def _create_relative_url(hashtype_and_digest, test_name):
"""Returns the URL for this image, relative to GM_ACTUALS_ROOT_HTTP_URL.
If we don't have a record of this image, returns None.
Args:
hashtype_and_digest: (hash_type, hash_digest) tuple, or None if we
don't have a record of this image
test_name: string; name of the GM test that created this image
"""
if not hashtype_and_digest:
return None
return gm_json.CreateGmRelativeUrl(
test_name=test_name,
hash_type=hashtype_and_digest[0],
hash_digest=hashtype_and_digest[1])
@staticmethod
def combine_subdicts(input_dict):
""" Flatten out a dictionary structure by one level.
Input:
{
KEY_A1 : {
KEY_B1 : VALUE_B1,
},
KEY_A2 : {
KEY_B2 : VALUE_B2,
}
}
Output:
{
KEY_B1 : VALUE_B1,
KEY_B2 : VALUE_B2,
}
If this would result in any repeated keys, it will raise an Exception.
"""
output_dict = {}
for key, subdict in input_dict.iteritems():
for subdict_key, subdict_value in subdict.iteritems():
if subdict_key in output_dict:
raise Exception('duplicate key %s in combine_subdicts' % subdict_key)
output_dict[subdict_key] = subdict_value
return output_dict
@staticmethod
def get_multilevel(input_dict, *keys):
""" Returns input_dict[key1][key2][...], or None if any key is not found.
"""
for key in keys:
if input_dict == None:
return None
input_dict = input_dict.get(key, None)
return input_dict
| bsd-3-clause |
bikashgupta11/javarobot | src/main/resources/jython/Lib/selenium/webdriver/support/abstract_event_listener.py | 61 | 2033 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class AbstractEventListener(object):
"""
Event listener must subclass and implement this fully or partially
"""
def before_navigate_to(self, url, driver):
pass
def after_navigate_to(self, url, driver):
pass
def before_navigate_back(self, driver):
pass
def after_navigate_back(self, driver):
pass
def before_navigate_forward(self, driver):
pass
def after_navigate_forward(self, driver):
pass
def before_find(self, by, value, driver):
pass
def after_find(self, by, value, driver):
pass
def before_click(self, element, driver):
pass
def after_click(self, element, driver):
pass
def before_change_value_of(self, element, driver):
pass
def after_change_value_of(self, element, driver):
pass
def before_execute_script(self, script, driver):
pass
def after_execute_script(self, script, driver):
pass
def before_close(self, driver):
pass
def after_close(self, driver):
pass
def before_quit(self, driver):
pass
def after_quit(self, driver):
pass
def on_exception(self, exception, driver):
pass
| gpl-3.0 |
fastflo/emma | emmalib/dialogs/ExecuteQueryFromDisk.py | 1 | 11103 | """
Execute Query From Disk Dialog
"""
import gc
import bz2
import time
import datetime
from stat import *
import gtk
import gtk.glade
from emmalib.Query import *
from emmalib.dialogs import *
class ExecuteQueryFromDisk:
"""
@param emma: Emma
"""
def __init__(self, emma):
self.emma = emma
#
# PATHS
#
self.path = os.path.dirname(os.path.abspath(__file__))
self.glade_file = os.path.join(self.path, "ExecuteQueryFromDisk.glade")
#
# UI
#
self.glade = None
self.window = None
#
# Commons
#
self.created_once = []
self.using_compression = False
self.last_query_line = ""
self.query_from_disk = False
self.read_one_query_started = False
def show(self):
"""
Show dialog
"""
self.glade = gtk.glade.XML(self.glade_file)
self.glade.signal_autoconnect(self)
self.window = self.glade.get_widget('execute_query_from_disk1')
self.window.connect('destroy', lambda *args: self.hide)
self.window.show()
def hide(self):
"""
Hide dialog
"""
self.window.hide()
def on_start_execute_from_disk_clicked(self, _):
"""
:param _: gtk.Button
:return:
"""
host = self.emma.connections_tv.current_host
fc = self.glade.get_widget("eqfd_file_chooser")
exclude = self.glade.get_widget("eqfd_exclude").get_active()
exclude_regex = self.glade.get_widget("eqfd_exclude_entry").get_text()
exclude = exclude and exclude_regex
if exclude:
try:
exclude_regex = re.compile(exclude_regex, re.DOTALL)
except:
show_message(
"execute query from disk",
"error compiling your regular expression: %s" % sys.exc_value
)
return
filename = fc.get_filename()
try:
sbuf = os.stat(filename)
except:
show_message("execute query from disk", "%s does not exists!" % filename)
return
if not S_ISREG(sbuf.st_mode):
show_message(
"execute query from disk",
"%s exists, but is not a regular file!" % filename
)
return
size = sbuf.st_size
try:
fp = bz2.BZ2File(filename, "r", 1024 * 8)
self.last_query_line = fp.readline()
self.using_compression = True
except:
self.using_compression = False
fp = None
if fp is None:
try:
fp = file(filename, "rb")
self.last_query_line = fp.readline()
except:
show_message(
"execute query from disk",
"error opening query from file %s: %s" % (filename, sys.exc_value))
return
self.window.hide()
start_line = self.glade.get_widget("eqfd_start_line").get_value()
if start_line < 1:
start_line = 1
ui = self.glade.get_widget("eqfd_update_interval")
update_interval = ui.get_value()
if update_interval == 0:
update_interval = 2
p = self.glade.get_widget("execute_from_disk_progress")
pb = self.glade.get_widget("exec_progress")
offset_entry = self.glade.get_widget("edfq_offset")
line_entry = self.glade.get_widget("eqfd_line")
query_entry = self.glade.get_widget("eqfd_query")
eta_label = self.glade.get_widget("eqfd_eta")
append_to_log = self.glade.get_widget("eqfd_append_to_log").get_active()
stop_on_error = self.glade.get_widget("eqfd_stop_on_error").get_active()
limit_dbname = self.glade.get_widget("eqfd_db_entry").get_text()
limit_db = self.glade.get_widget("eqfd_limit_db").get_active() and limit_dbname != ""
if limit_db:
limit_re = re.compile(
"(?is)^use[ \r\n\t]+`?" + re.escape(limit_dbname) + "`?|^create database[^`]+`?" +
re.escape(limit_dbname) + "`?")
limit_end_re = re.compile("(?is)^use[ \r\n\t]+`?.*`?|^create database")
# last = 0
_start = time.time()
def update_ui(force=False, offset=0):
"""
:param force: bool
:param offset: int
:return:
"""
global last_update
now = time.time()
if not force and now - last_update < update_interval:
return
last_update = now
pos = offset
f = float(pos) / float(size)
expired = now - _start
if not self.using_compression and expired > 10:
sr = float(expired) / float(pos) * float(size - pos)
remaining = " (%.0fs remaining)" % sr
eta_label.set_text("eta: %-19.19s" % datetime.datetime.fromtimestamp(now + sr))
else:
remaining = ""
query_entry.set_text(query[0:512])
offset_entry.set_text("%d" % pos)
line_entry.set_text("%d" % current_line)
if f > 1.0:
f = 1.0
pb.set_fraction(f)
pb_text = "%.2f%%%s" % (f * 100.0, remaining)
pb.set_text(pb_text)
self.emma.process_events()
new_line = 1
current_line = _start
query = ""
p.show()
while time.time() - _start < 0.10:
update_ui(True)
self.query_from_disk = True
line_offset = 0
found_db = False
while self.query_from_disk:
current_line = new_line
query, line_offset, new_line = self.read_one_query(
fp, line_offset, current_line, update_ui, limit_db and not found_db, start_line)
if current_line < start_line:
current_line = start_line
if query is None:
break
if limit_db:
if not found_db:
first = query.lstrip("\r\n\t ")[0:15].lower()
if (first[0:3] == "use" or first == "create database") and \
limit_re.search(query):
found_db = True
else:
if limit_end_re.search(query) and not limit_re.search(query):
found_db = False
update_ui(False, fp.tell())
if not limit_db or found_db:
if exclude and exclude_regex.match(query):
# print "skipping query %r" % query[0:80]
pass
elif not host.query(query, True, append_to_log) and stop_on_error:
show_message(
"execute query from disk",
"an error occoured. maybe remind the line number "
"and press cancel to close this dialog!"
)
self.query_from_disk = False
break
query = ""
update_ui(True, fp.tell())
fp.close()
if not self.query_from_disk:
show_message("execute query from disk",
"aborted by user whish - click cancel again to close window")
return
else:
show_message("execute query from disk", "done!")
p.hide()
def on_cancel_execute_from_disk_clicked(self, _):
"""
:param _: gtk.Button
:return:
"""
if not self.query_from_disk:
p = self.assign_once(
"execute_from_disk_progress",
self.glade.get_widget,
"execute_from_disk_progress"
)
p.hide()
return
self.read_one_query_started = False
self.query_from_disk = False
def on_eqfd_exclude_toggled(self, button):
"""
:param button: gtk.Button
"""
entry = self.glade.get_widget("eqfd_exclude_entry")
entry.set_sensitive(button.get_active())
def on_eqfd_limit_db_toggled(self, button):
"""
:param button: gtk.Button
"""
entry = self.glade.get_widget("eqfd_db_entry")
entry.set_sensitive(button.get_active())
def on_abort_execute_from_disk_clicked(self, _):
"""
:param _: gtk.Button
"""
self.window.hide()
def read_one_query(
self, fp,
_start=None, count_lines=0,
update_function=None, only_use_queries=False,
start_line=1):
"""
:param fp:
:param _start:
:param count_lines: int
:param update_function:
:param only_use_queries: bool
:param start_line: int
:return:
"""
current_query = []
self.read_one_query_started = True
while self.read_one_query_started:
gc.collect()
if _start is None:
while 1:
line = fp.readline()
if line == "":
if len(current_query) > 0:
return ' '.join(current_query), _start, count_lines
return None, _start, count_lines
if count_lines is not None:
count_lines += 1
if update_function is not None:
lb = fp.tell() - len(line)
update_function(False, lb)
if count_lines is not None and count_lines <= start_line:
# print count_lines
continue
first = line.lstrip("\r\n\t ")[0:15].lower()
if only_use_queries and first[0:3] != "use" and first != "create database":
continue
if line.lstrip(" \t")[0:2] != "--":
break
# print "skipping line", [line]
self.last_query_line = line
_start = 0
else:
line = self.last_query_line
_start, end = read_query(line, _start)
_next = line[end:end + 1]
# print "next: '%s'" % next
if _start is not None:
# print "append query", [line[start:end]]
current_query.append(line[_start:end])
if _next == ";":
return ''.join(current_query), end + 1, count_lines
_start = None
return None, None, None
def assign_once(self, name, creator, *args):
"""
:param name: str
:param creator: str
:param args: []
:return:
"""
try:
return self.created_once[name]
except:
obj = creator(*args)
self.created_once[name] = obj
return obj
if __name__ == '__main__':
instance = ExecuteQueryFromDisk(None)
gtk.main()
| gpl-2.0 |
163gal/Time-Line | specs/CategorySorter.py | 2 | 1614 | # Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import unittest
from timelinelib.db.objects.category import sort_categories
from timelinelib.db.objects import Category
class CategorySorter(unittest.TestCase):
def test_sorts_categories_by_name(self):
self.sort([
self.category_named("b"),
self.category_named("a")])
self.assert_sorted_in_order(["a", "b"])
def test_ignores_case(self):
self.sort([
self.category_named("Foo"),
self.category_named("bar")])
self.assert_sorted_in_order(["bar", "Foo"])
def sort(self, categories):
self.sorted_categories = sort_categories(categories)
def category_named(self, name):
return Category(name, (0, 0, 0), None, True)
def assert_sorted_in_order(self, names):
self.assertEquals(
names,
[category.name for category in self.sorted_categories])
| gpl-3.0 |
senior-zero/metanet | metanet/datasets/xor.py | 1 | 2138 | """
Copyright (C) 2015 Evtushenko Georgy
Authors: Evtushenko Georgy
This file is part of MetaNet.
MetaNet is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
MetaNet is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with MetaNet. If not, see <http://www.gnu.org/licenses/>.
(ΠΡΠΎΡ ΡΠ°ΠΉΠ» β ΡΠ°ΡΡΡ MetaNet.
MetaNet - ΡΠ²ΠΎΠ±ΠΎΠ΄Π½Π°Ρ ΠΏΡΠΎΠ³ΡΠ°ΠΌΠΌΠ°: Π²Ρ ΠΌΠΎΠΆΠ΅ΡΠ΅ ΠΏΠ΅ΡΠ΅ΡΠ°ΡΠΏΡΠΎΡΡΡΠ°Π½ΡΡΡ Π΅Π΅ ΠΈ/ΠΈΠ»ΠΈ
ΠΈΠ·ΠΌΠ΅Π½ΡΡΡ Π΅Π΅ Π½Π° ΡΡΠ»ΠΎΠ²ΠΈΡΡ
Π‘ΡΠ°Π½Π΄Π°ΡΡΠ½ΠΎΠΉ ΠΎΠ±ΡΠ΅ΡΡΠ²Π΅Π½Π½ΠΎΠΉ Π»ΠΈΡΠ΅Π½Π·ΠΈΠΈ GNU Π² ΡΠΎΠΌ Π²ΠΈΠ΄Π΅,
Π² ΠΊΠ°ΠΊΠΎΠΌ ΠΎΠ½Π° Π±ΡΠ»Π° ΠΎΠΏΡΠ±Π»ΠΈΠΊΠΎΠ²Π°Π½Π° Π€ΠΎΠ½Π΄ΠΎΠΌ ΡΠ²ΠΎΠ±ΠΎΠ΄Π½ΠΎΠ³ΠΎ ΠΏΡΠΎΠ³ΡΠ°ΠΌΠΌΠ½ΠΎΠ³ΠΎ ΠΎΠ±Π΅ΡΠΏΠ΅ΡΠ΅Π½ΠΈΡ;
Π»ΠΈΠ±ΠΎ Π²Π΅ΡΡΠΈΠΈ 3 Π»ΠΈΡΠ΅Π½Π·ΠΈΠΈ, Π»ΠΈΠ±ΠΎ (ΠΏΠΎ Π²Π°ΡΠ΅ΠΌΡ Π²ΡΠ±ΠΎΡΡ) Π»ΡΠ±ΠΎΠΉ Π±ΠΎΠ»Π΅Π΅ ΠΏΠΎΠ·Π΄Π½Π΅ΠΉ
Π²Π΅ΡΡΠΈΠΈ.
MetaNet ΡΠ°ΡΠΏΡΠΎΡΡΡΠ°Π½ΡΠ΅ΡΡΡ Π² Π½Π°Π΄Π΅ΠΆΠ΄Π΅, ΡΡΠΎ ΠΎΠ½Π° Π±ΡΠ΄Π΅Ρ ΠΏΠΎΠ»Π΅Π·Π½ΠΎΠΉ,
Π½ΠΎ ΠΠΠΠ ΠΠ‘Π―ΠΠΠ₯ ΠΠΠ ΠΠΠ’ΠΠ; Π΄Π°ΠΆΠ΅ Π±Π΅Π· Π½Π΅ΡΠ²Π½ΠΎΠΉ Π³Π°ΡΠ°Π½ΡΠΈΠΈ Π’ΠΠΠΠ ΠΠΠΠ ΠΠΠΠ
ΠΈΠ»ΠΈ ΠΠ ΠΠΠΠΠΠΠ‘Π’Π ΠΠΠ― ΠΠΠ ΠΠΠΠΠΠΠΠ«Π₯ Π¦ΠΠΠΠ. ΠΠΎΠ΄ΡΠΎΠ±Π½Π΅Π΅ ΡΠΌ. Π² Π‘ΡΠ°Π½Π΄Π°ΡΡΠ½ΠΎΠΉ
ΠΎΠ±ΡΠ΅ΡΡΠ²Π΅Π½Π½ΠΎΠΉ Π»ΠΈΡΠ΅Π½Π·ΠΈΠΈ GNU.
ΠΡ Π΄ΠΎΠ»ΠΆΠ½Ρ Π±ΡΠ»ΠΈ ΠΏΠΎΠ»ΡΡΠΈΡΡ ΠΊΠΎΠΏΠΈΡ Π‘ΡΠ°Π½Π΄Π°ΡΡΠ½ΠΎΠΉ ΠΎΠ±ΡΠ΅ΡΡΠ²Π΅Π½Π½ΠΎΠΉ Π»ΠΈΡΠ΅Π½Π·ΠΈΠΈ GNU
Π²ΠΌΠ΅ΡΡΠ΅ Ρ ΡΡΠΎΠΉ ΠΏΡΠΎΠ³ΡΠ°ΠΌΠΌΠΎΠΉ. ΠΡΠ»ΠΈ ΡΡΠΎ Π½Π΅ ΡΠ°ΠΊ, ΡΠΌ.
<http://www.gnu.org/licenses/>.)
"""
__author__ = 'Evtushenko Georgy'
def get_xor():
return [[0.0, 0.0], [1.0, 0.0], [0.0, 1.0], [1.0, 1.0]], [[0.0], [1.0], [1.0], [0.0]] | gpl-3.0 |
artas360/pythran | pythran/transformations/remove_nested_functions.py | 1 | 3162 | """ RemoveNestedFunctions turns nested function into top-level functions. """
from pythran.analyses import GlobalDeclarations, ImportedIds
from pythran.passmanager import Transformation
from pythran.tables import MODULES
import ast
class _NestedFunctionRemover(Transformation):
def __init__(self, pm, ctx):
Transformation.__init__(self)
self.ctx = ctx
self.passmanager = pm
self.global_declarations = pm.gather(GlobalDeclarations, ctx.module)
def visit_FunctionDef(self, node):
if MODULES['functools'] not in self.global_declarations.values():
import_ = ast.Import([ast.alias('functools', None)])
self.ctx.module.body.insert(0, import_)
self.global_declarations['functools'] = MODULES['functools']
self.ctx.module.body.append(node)
former_name = node.name
new_name = "pythran_{0}".format(former_name)
ii = self.passmanager.gather(ImportedIds, node, self.ctx)
binded_args = [ast.Name(iin, ast.Load()) for iin in sorted(ii)]
node.args.args = ([ast.Name(iin, ast.Param()) for iin in sorted(ii)] +
node.args.args)
class Renamer(ast.NodeTransformer):
def visit_Call(self, node):
self.generic_visit(node)
if (isinstance(node.func, ast.Name) and
node.func.id == former_name):
node.func.id = new_name
node.args = (
[ast.Name(iin, ast.Load()) for iin in sorted(ii)] +
node.args
)
return node
Renamer().visit(node)
node.name = new_name
proxy_call = ast.Name(new_name, ast.Load())
new_node = ast.Assign(
[ast.Name(former_name, ast.Store())],
ast.Call(
ast.Attribute(
ast.Name('functools', ast.Load()),
"partial",
ast.Load()
),
[proxy_call] + binded_args,
[],
None,
None
)
)
self.generic_visit(node)
return new_node
class RemoveNestedFunctions(Transformation):
"""
Replace nested function by top-level functions.
Also add a call to a bind intrinsic that
generates a local function with some arguments binded.
>>> import ast
>>> from pythran import passmanager, backend
>>> node = ast.parse("def foo(x):\\n def bar(y): return x+y\\n bar(12)")
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(RemoveNestedFunctions, node)
>>> print pm.dump(backend.Python, node)
import functools
def foo(x):
bar = functools.partial(pythran_bar, x)
bar(12)
def pythran_bar(x, y):
return (x + y)
"""
def visit_Module(self, node):
map(self.visit, node.body)
return node
def visit_FunctionDef(self, node):
nfr = _NestedFunctionRemover(self.passmanager, self.ctx)
node.body = map(nfr.visit, node.body)
return node
| bsd-3-clause |
nutsboard/linux-am335x | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 1891 | 3300 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm, common_callchain):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
self.callchain = common_callchain
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
redmi/android_kernel_HM2014811 | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
googleapis/googleapis-gen | google/cloud/talent/v4beta1/talent-v4beta1-py/google/cloud/talent_v4beta1/services/tenant_service/pagers.py | 1 | 5775 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import Any, AsyncIterable, Awaitable, Callable, Iterable, Sequence, Tuple, Optional
from google.cloud.talent_v4beta1.types import tenant
from google.cloud.talent_v4beta1.types import tenant_service
class ListTenantsPager:
"""A pager for iterating through ``list_tenants`` requests.
This class thinly wraps an initial
:class:`google.cloud.talent_v4beta1.types.ListTenantsResponse` object, and
provides an ``__iter__`` method to iterate through its
``tenants`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListTenants`` requests and continue to iterate
through the ``tenants`` field on the
corresponding responses.
All the usual :class:`google.cloud.talent_v4beta1.types.ListTenantsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., tenant_service.ListTenantsResponse],
request: tenant_service.ListTenantsRequest,
response: tenant_service.ListTenantsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.talent_v4beta1.types.ListTenantsRequest):
The initial request object.
response (google.cloud.talent_v4beta1.types.ListTenantsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = tenant_service.ListTenantsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterable[tenant_service.ListTenantsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterable[tenant.Tenant]:
for page in self.pages:
yield from page.tenants
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
class ListTenantsAsyncPager:
"""A pager for iterating through ``list_tenants`` requests.
This class thinly wraps an initial
:class:`google.cloud.talent_v4beta1.types.ListTenantsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``tenants`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListTenants`` requests and continue to iterate
through the ``tenants`` field on the
corresponding responses.
All the usual :class:`google.cloud.talent_v4beta1.types.ListTenantsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(self,
method: Callable[..., Awaitable[tenant_service.ListTenantsResponse]],
request: tenant_service.ListTenantsRequest,
response: tenant_service.ListTenantsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.talent_v4beta1.types.ListTenantsRequest):
The initial request object.
response (google.cloud.talent_v4beta1.types.ListTenantsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = tenant_service.ListTenantsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterable[tenant_service.ListTenantsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterable[tenant.Tenant]:
async def async_generator():
async for page in self.pages:
for response in page.tenants:
yield response
return async_generator()
def __repr__(self) -> str:
return '{0}<{1!r}>'.format(self.__class__.__name__, self._response)
| apache-2.0 |
bjolivot/ansible | lib/ansible/executor/playbook_executor.py | 57 | 12811 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible import constants as C
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.module_utils._text import to_native, to_text
from ansible.playbook import Playbook
from ansible.template import Templar
from ansible.utils.helpers import pct_to_int
from ansible.utils.path import makedirs_safe
from ansible.utils.ssh_functions import check_for_controlpersist
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class PlaybookExecutor:
'''
This is the primary class for executing playbooks, and thus the
basis for bin/ansible-playbook operation.
'''
def __init__(self, playbooks, inventory, variable_manager, loader, options, passwords):
self._playbooks = playbooks
self._inventory = inventory
self._variable_manager = variable_manager
self._loader = loader
self._options = options
self.passwords = passwords
self._unreachable_hosts = dict()
if options.listhosts or options.listtasks or options.listtags or options.syntax:
self._tqm = None
else:
self._tqm = TaskQueueManager(inventory=inventory, variable_manager=variable_manager, loader=loader, options=options, passwords=self.passwords)
# Note: We run this here to cache whether the default ansible ssh
# executable supports control persist. Sometime in the future we may
# need to enhance this to check that ansible_ssh_executable specified
# in inventory is also cached. We can't do this caching at the point
# where it is used (in task_executor) because that is post-fork and
# therefore would be discarded after every task.
check_for_controlpersist(C.ANSIBLE_SSH_EXECUTABLE)
def run(self):
'''
Run the given playbook, based on the settings in the play which
may limit the runs to serialized groups, etc.
'''
result = 0
entrylist = []
entry = {}
try:
for playbook_path in self._playbooks:
pb = Playbook.load(playbook_path, variable_manager=self._variable_manager, loader=self._loader)
self._inventory.set_playbook_basedir(os.path.realpath(os.path.dirname(playbook_path)))
if self._tqm is None: # we are doing a listing
entry = {'playbook': playbook_path}
entry['plays'] = []
else:
# make sure the tqm has callbacks loaded
self._tqm.load_callbacks()
self._tqm.send_callback('v2_playbook_on_start', pb)
i = 1
plays = pb.get_plays()
display.vv(u'%d plays in %s' % (len(plays), to_text(playbook_path)))
for play in plays:
if play._included_path is not None:
self._loader.set_basedir(play._included_path)
else:
self._loader.set_basedir(pb._basedir)
# clear any filters which may have been applied to the inventory
self._inventory.remove_restriction()
if play.vars_prompt:
for var in play.vars_prompt:
vname = var['name']
prompt = var.get("prompt", vname)
default = var.get("default", None)
private = var.get("private", True)
confirm = var.get("confirm", False)
encrypt = var.get("encrypt", None)
salt_size = var.get("salt_size", None)
salt = var.get("salt", None)
if vname not in self._variable_manager.extra_vars:
if self._tqm:
self._tqm.send_callback('v2_playbook_on_vars_prompt', vname, private, prompt, encrypt, confirm, salt_size, salt, default)
play.vars[vname] = display.do_var_prompt(vname, private, prompt, encrypt, confirm, salt_size, salt, default)
else: # we are either in --list-<option> or syntax check
play.vars[vname] = default
# Create a temporary copy of the play here, so we can run post_validate
# on it without the templating changes affecting the original object.
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
if self._options.syntax:
continue
if self._tqm is None:
# we are just doing a listing
entry['plays'].append(new_play)
else:
self._tqm._unreachable_hosts.update(self._unreachable_hosts)
previously_failed = len(self._tqm._failed_hosts)
previously_unreachable = len(self._tqm._unreachable_hosts)
break_play = False
# we are actually running plays
batches = self._get_serialized_batches(new_play)
if len(batches) == 0:
self._tqm.send_callback('v2_playbook_on_play_start', new_play)
self._tqm.send_callback('v2_playbook_on_no_hosts_matched')
for batch in batches:
# restrict the inventory to the hosts in the serialized batch
self._inventory.restrict_to_hosts(batch)
# and run it...
result = self._tqm.run(play=play)
# break the play if the result equals the special return code
if result & self._tqm.RUN_FAILED_BREAK_PLAY != 0:
result = self._tqm.RUN_FAILED_HOSTS
break_play = True
# check the number of failures here, to see if they're above the maximum
# failure percentage allowed, or if any errors are fatal. If either of those
# conditions are met, we break out, otherwise we only break out if the entire
# batch failed
failed_hosts_count = len(self._tqm._failed_hosts) + len(self._tqm._unreachable_hosts) - \
(previously_failed + previously_unreachable)
if len(batch) == failed_hosts_count:
break_play = True
break
# update the previous counts so they don't accumulate incorrectly
# over multiple serial batches
previously_failed += len(self._tqm._failed_hosts) - previously_failed
previously_unreachable += len(self._tqm._unreachable_hosts) - previously_unreachable
# save the unreachable hosts from this batch
self._unreachable_hosts.update(self._tqm._unreachable_hosts)
if break_play:
break
i = i + 1 # per play
if entry:
entrylist.append(entry) # per playbook
# send the stats callback for this playbook
if self._tqm is not None:
if C.RETRY_FILES_ENABLED:
retries = set(self._tqm._failed_hosts.keys())
retries.update(self._tqm._unreachable_hosts.keys())
retries = sorted(retries)
if len(retries) > 0:
if C.RETRY_FILES_SAVE_PATH:
basedir = C.shell_expand(C.RETRY_FILES_SAVE_PATH)
elif playbook_path:
basedir = os.path.dirname(os.path.abspath(playbook_path))
else:
basedir = '~/'
(retry_name, _) = os.path.splitext(os.path.basename(playbook_path))
filename = os.path.join(basedir, "%s.retry" % retry_name)
if self._generate_retry_inventory(filename, retries):
display.display("\tto retry, use: --limit @%s\n" % filename)
self._tqm.send_callback('v2_playbook_on_stats', self._tqm._stats)
# if the last result wasn't zero, break out of the playbook file name loop
if result != 0:
break
if entrylist:
return entrylist
finally:
if self._tqm is not None:
self._tqm.cleanup()
if self._loader:
self._loader.cleanup_all_tmp_files()
if self._options.syntax:
display.display("No issues encountered")
return result
return result
def _get_serialized_batches(self, play):
'''
Returns a list of hosts, subdivided into batches based on
the serial size specified in the play.
'''
# make sure we have a unique list of hosts
all_hosts = self._inventory.get_hosts(play.hosts)
all_hosts_len = len(all_hosts)
# the serial value can be listed as a scalar or a list of
# scalars, so we make sure it's a list here
serial_batch_list = play.serial
if len(serial_batch_list) == 0:
serial_batch_list = [-1]
cur_item = 0
serialized_batches = []
while len(all_hosts) > 0:
# get the serial value from current item in the list
serial = pct_to_int(serial_batch_list[cur_item], all_hosts_len)
# if the serial count was not specified or is invalid, default to
# a list of all hosts, otherwise grab a chunk of the hosts equal
# to the current serial item size
if serial <= 0:
serialized_batches.append(all_hosts)
break
else:
play_hosts = []
for x in range(serial):
if len(all_hosts) > 0:
play_hosts.append(all_hosts.pop(0))
serialized_batches.append(play_hosts)
# increment the current batch list item number, and if we've hit
# the end keep using the last element until we've consumed all of
# the hosts in the inventory
cur_item += 1
if cur_item > len(serial_batch_list) - 1:
cur_item = len(serial_batch_list) - 1
return serialized_batches
def _generate_retry_inventory(self, retry_path, replay_hosts):
'''
Called when a playbook run fails. It generates an inventory which allows
re-running on ONLY the failed hosts. This may duplicate some variable
information in group_vars/host_vars but that is ok, and expected.
'''
try:
makedirs_safe(os.path.dirname(retry_path))
with open(retry_path, 'w') as fd:
for x in replay_hosts:
fd.write("%s\n" % x)
except Exception as e:
display.warning("Could not create retry file '%s'.\n\t%s" % (retry_path, to_native(e)))
return False
return True
| gpl-3.0 |
blockstack/blockstack-server | integration_tests/blockstack_integration_tests/scenarios/name_pre_regup_subdomain_seq_xfer_pending.py | 1 | 12369 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of Blockstack
Blockstack is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Blockstack. If not, see <http://www.gnu.org/licenses/>.
"""
# activate F-day 2017
"""
TEST ENV BLOCKSTACK_EPOCH_1_END_BLOCK 682
TEST ENV BLOCKSTACK_EPOCH_2_END_BLOCK 683
TEST ENV BLOCKSTACK_EPOCH_2_NAMESPACE_LIFETIME_MULTIPLIER 1
"""
import testlib
import virtualchain
import json
import blockstack
import blockstack.lib.subdomains as subdomains
import blockstack.lib.storage as storage
import blockstack.lib.client as client
import blockstack_zones
import base64
wallets = [
testlib.Wallet( "5JesPiN68qt44Hc2nT8qmyZ1JDwHebfoh9KQ52Lazb1m1LaKNj9", 100000000000 ),
testlib.Wallet( "5KHqsiU9qa77frZb6hQy9ocV7Sus9RWJcQGYYBJJBb2Efj1o77e", 100000000000 ),
testlib.Wallet( "5Kg5kJbQHvk1B64rJniEmgbD83FpZpbw2RjdAZEzTefs9ihN3Bz", 100000000000 ),
testlib.Wallet( "5JuVsoS9NauksSkqEjbUZxWwgGDQbMwPsEfoRBSpLpgDX1RtLX7", 100000000000 ),
testlib.Wallet( "5KEpiSRr1BrT8vRD7LKGCEmudokTh1iMHbiThMQpLdwBwhDJB1T", 100000000000 )
]
consensus = "17ac43c1d8549c3181b200f1bf97eb7d"
def scenario( wallets, **kw ):
testlib.blockstack_namespace_preorder( "test", wallets[1].addr, wallets[0].privkey )
testlib.next_block( **kw )
testlib.blockstack_namespace_reveal( "test", wallets[1].addr, 52595, 250, 4, [6,5,4,3,2,1,0,0,0,0,0,0,0,0,0,0], 10, 10, wallets[0].privkey )
testlib.next_block( **kw )
testlib.blockstack_namespace_ready( "test", wallets[1].privkey )
testlib.next_block( **kw )
testlib.blockstack_name_preorder( "foo1.test", wallets[2].privkey, wallets[3].addr )
testlib.blockstack_name_preorder( "foo2.test", wallets[2].privkey, wallets[3].addr )
testlib.blockstack_name_preorder( "foo3.test", wallets[2].privkey, wallets[3].addr )
testlib.blockstack_name_preorder( "foo4.test", wallets[2].privkey, wallets[3].addr )
testlib.blockstack_name_preorder( "foo5.test", wallets[2].privkey, wallets[3].addr )
testlib.blockstack_name_preorder( "foo6.test", wallets[2].privkey, wallets[3].addr )
testlib.blockstack_name_preorder( "foo7.test", wallets[2].privkey, wallets[3].addr )
testlib.next_block( **kw )
zf_template = "$ORIGIN {}\n$TTL 3600\n{}"
zf_default_url = '_https._tcp URI 10 1 "https://raw.githubusercontent.com/nobody/content/profile.md"'
zonefiles = {
'foo1.test': zf_template.format('foo1.test', subdomains.make_subdomain_txt('bar.foo1.test', 'foo1.test', wallets[4].addr, 0, zf_template.format('bar.foo1.test', zf_default_url), wallets[4].privkey)),
'foo2.test': zf_template.format('foo2.test', subdomains.make_subdomain_txt('bar.foo2.test', 'foo2.test', wallets[4].addr, 0, zf_template.format('bar.foo2.test', zf_default_url), wallets[4].privkey)),
'foo3.test': zf_template.format('foo3.test', subdomains.make_subdomain_txt('bar.foo3.test', 'foo3.test', wallets[4].addr, 0, zf_template.format('bar.foo3.test', zf_default_url), wallets[4].privkey)),
}
testlib.blockstack_name_register( "foo1.test", wallets[2].privkey, wallets[3].addr, zonefile_hash=storage.get_zonefile_data_hash(zonefiles['foo1.test']))
testlib.blockstack_name_register( "foo2.test", wallets[2].privkey, wallets[3].addr, zonefile_hash=storage.get_zonefile_data_hash(zonefiles['foo2.test']))
testlib.blockstack_name_register( "foo3.test", wallets[2].privkey, wallets[3].addr, zonefile_hash=storage.get_zonefile_data_hash(zonefiles['foo3.test']))
testlib.blockstack_name_register( "foo4.test", wallets[2].privkey, wallets[3].addr)
testlib.blockstack_name_register( "foo5.test", wallets[2].privkey, wallets[3].addr)
testlib.blockstack_name_register( "foo6.test", wallets[2].privkey, wallets[3].addr)
testlib.blockstack_name_register( "foo7.test", wallets[2].privkey, wallets[3].addr)
testlib.next_block( **kw )
assert testlib.blockstack_put_zonefile(zonefiles['foo1.test'])
assert testlib.blockstack_put_zonefile(zonefiles['foo2.test'])
assert testlib.blockstack_put_zonefile(zonefiles['foo3.test'])
# kick off indexing and check
testlib.next_block(**kw)
def _query_subdomains(subdomain_names, expected_sequence, expected_owner, expect_pending):
# query each subdomain. Should get the latest
for fqn in subdomain_names:
res = client.get_name_record(fqn, hostport='http://localhost:16264')
if 'error' in res:
print res
print 'failed to query {}'.format(fqn)
return False
# should have right sequence
if res['sequence'] != expected_sequence:
print 'wrong sequence; expected {}'.format(expected_sequence)
print res
return False
# should have right owner
if res['address'] != expected_owner:
print 'wrong owner'
print 'expected {}'.format(res['address'])
print res
return False
# do we expect pending?
if res['pending'] != expect_pending:
print 'wrong pending (expected {})'.format(expect_pending)
print res
return False
return True
assert _query_subdomains(['bar.foo1.test', 'bar.foo2.test', 'bar.foo3.test'], 0, wallets[4].addr, False)
expected_owners = [wallets[4].addr]
# update and transfer, but if i % 2 == 0, transfer to a different address
# use a different domain name in each case.
# verify that only transfers on the creator domain are valid.
wallet_schedule = [
(4, 0),
(0, 1),
(1, 2),
(2, 3),
]
expected_zf_default_url = '_https._tcp URI 10 1 "https://test.com/?index={}"'.format(4)
expect_pending = False
expect_sequence = 0
unsent_zonefiles = []
# send updates too, and transfer subdomains
for i in range(0, 4):
zf_template = "$ORIGIN {}\n$TTL 3600\n{}"
zf_default_url = '_https._tcp URI 10 1 "https://test.com/?index={}"'.format(i+1)
names = [
'foo1.test',
'foo2.test',
'foo3.test',
]
k = wallet_schedule[i][0]
k2 = wallet_schedule[i][1]
zonefiles = {
'foo1.test': zf_template.format(names[0], subdomains.make_subdomain_txt('bar.foo1.test', names[0], wallets[k2].addr, i+1, zf_template.format('bar.foo1.test', zf_default_url), wallets[k].privkey)),
'foo2.test': zf_template.format(names[1], subdomains.make_subdomain_txt('bar.foo2.test', names[1], wallets[k2].addr, i+1, zf_template.format('bar.foo2.test', zf_default_url), wallets[k].privkey)),
'foo3.test': zf_template.format(names[2], subdomains.make_subdomain_txt('bar.foo3.test', names[2], wallets[k2].addr, i+1, zf_template.format('bar.foo3.test', zf_default_url), wallets[k].privkey)),
}
testlib.blockstack_name_update(names[0], storage.get_zonefile_data_hash(zonefiles['foo1.test']), wallets[3].privkey)
testlib.blockstack_name_update(names[1], storage.get_zonefile_data_hash(zonefiles['foo2.test']), wallets[3].privkey)
testlib.blockstack_name_update(names[2], storage.get_zonefile_data_hash(zonefiles['foo3.test']), wallets[3].privkey)
testlib.next_block(**kw)
if i % 2 == 1:
# only broadcast periodically
assert testlib.blockstack_put_zonefile(zonefiles['foo1.test'])
assert testlib.blockstack_put_zonefile(zonefiles['foo2.test'])
assert testlib.blockstack_put_zonefile(zonefiles['foo3.test'])
else:
expect_pending = True
unsent_zonefiles.append(zonefiles)
# kick off subdomain indexing
testlib.next_block(**kw)
# verify history
assert _query_subdomains(['bar.foo1.test', 'bar.foo2.test', 'bar.foo3.test'], expect_sequence, wallets[4].addr, expect_pending)
expected_owners.append(wallets[k2].addr)
# query subdomain history
for subd in ['bar.foo1.test', 'bar.foo2.test', 'bar.foo3.test']:
res = client.get_name_record(subd, include_history=True, hostport='http://localhost:16264')
if 'error' in res:
print res
return False
if not res['pending']:
print 'not pending, but it should be'
print res
return False
# nothing should have been accepted after 0, since we didn't send the zone files for sequence=1
if res['sequence'] != 0:
print 'wrong sequence'
print res
return False
for i, block_height in enumerate(sorted(res['history'])):
if res['history'][block_height][0]['address'] != expected_owners[i]:
print 'wrong owner at {}: expected {}'.format(block_height, expected_owners[i])
print json.dumps(res, indent=4, sort_keys=True)
return False
if res['history'][block_height][0]['sequence'] != i:
print 'wrong sequence at {}: expected {}'.format(block_height, i)
print json.dumps(res, indent=4, sort_keys=True)
return False
# send all missing subdomains
for zfbatch in unsent_zonefiles:
for k in zfbatch:
assert testlib.blockstack_put_zonefile(zfbatch[k])
testlib.next_block(**kw)
# query subdomain history again. Only pending should change
for subd in ['bar.foo1.test', 'bar.foo2.test', 'bar.foo3.test']:
res = client.get_name_record(subd, include_history=True, hostport='http://localhost:16264')
if 'error' in res:
print res
return False
if res['pending']:
print 'pending, but it should not be'
print res
return False
if res['sequence'] != 4:
print 'wrong sequence'
print res
return False
for i, block_height in enumerate(sorted(res['history'])):
if res['history'][block_height][0]['address'] != expected_owners[i]:
print 'wrong owner at {}: expected {}'.format(block_height, expected_owners[i])
print json.dumps(res, indent=4, sort_keys=True)
return False
if res['history'][block_height][0]['sequence'] != i:
print 'wrong sequence at {}: expected {}'.format(block_height, i)
print json.dumps(res, indent=4, sort_keys=True)
return False
# reindex
assert testlib.check_subdomain_db(**kw)
def check( state_engine ):
# not revealed, but ready
ns = state_engine.get_namespace_reveal( "test" )
if ns is not None:
return False
ns = state_engine.get_namespace( "test" )
if ns is None:
return False
if ns['namespace_id'] != 'test':
return False
for i in xrange(1, 4):
name = 'foo{}.test'.format(i)
# not preordered
preorder = state_engine.get_name_preorder( name, virtualchain.make_payment_script(wallets[2].addr), wallets[3].addr )
if preorder is not None:
print 'still have preorder: {}'.format(preorder)
return False
# registered
name_rec = state_engine.get_name(name)
if name_rec is None:
print 'did not get name {}'.format(name)
return False
# owned by
if name_rec['address'] != wallets[3].addr or name_rec['sender'] != virtualchain.make_payment_script(wallets[3].addr):
print 'wrong address for {}: {}'.format(name, name_rec)
return False
return True
| gpl-3.0 |
caphrim007/ansible | lib/ansible/plugins/action/ce_config.py | 27 | 4239 | #
# Copyright 2015 Peter Sprygada <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.ce import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=to_text(exc))
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in list(result):
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 |
Codefans-fan/odoo | openerp/addons/base/module/wizard/base_export_language.py | 269 | 3648 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2004-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import contextlib
import cStringIO
from openerp import tools
from openerp.osv import fields,osv
from openerp.tools.translate import _
from openerp.tools.misc import get_iso_codes
NEW_LANG_KEY = '__new__'
class base_language_export(osv.osv_memory):
_name = "base.language.export"
def _get_languages(self, cr, uid, context):
lang_obj = self.pool.get('res.lang')
ids = lang_obj.search(cr, uid, [('translatable', '=', True)])
langs = lang_obj.browse(cr, uid, ids)
return [(NEW_LANG_KEY, _('New Language (Empty translation template)'))] + [(lang.code, lang.name) for lang in langs]
_columns = {
'name': fields.char('File Name', readonly=True),
'lang': fields.selection(_get_languages, 'Language', required=True),
'format': fields.selection([('csv','CSV File'),
('po','PO File'),
('tgz', 'TGZ Archive')], 'File Format', required=True),
'modules': fields.many2many('ir.module.module', 'rel_modules_langexport', 'wiz_id', 'module_id', 'Modules To Export', domain=[('state','=','installed')]),
'data': fields.binary('File', readonly=True),
'state': fields.selection([('choose', 'choose'), # choose language
('get', 'get')]) # get the file
}
_defaults = {
'state': 'choose',
'lang': NEW_LANG_KEY,
'format': 'csv',
}
def act_getfile(self, cr, uid, ids, context=None):
this = self.browse(cr, uid, ids, context=context)[0]
lang = this.lang if this.lang != NEW_LANG_KEY else False
mods = sorted(map(lambda m: m.name, this.modules)) or ['all']
with contextlib.closing(cStringIO.StringIO()) as buf:
tools.trans_export(lang, mods, buf, this.format, cr)
out = base64.encodestring(buf.getvalue())
filename = 'new'
if lang:
filename = get_iso_codes(lang)
elif len(mods) == 1:
filename = mods[0]
extension = this.format
if not lang and extension == 'po':
extension = 'pot'
name = "%s.%s" % (filename, extension)
this.write({ 'state': 'get', 'data': out, 'name': name })
return {
'type': 'ir.actions.act_window',
'res_model': 'base.language.export',
'view_mode': 'form',
'view_type': 'form',
'res_id': this.id,
'views': [(False, 'form')],
'target': 'new',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mpdehaan/ansible | lib/ansible/module_utils/facts.py | 4 | 103392 | # (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import stat
import array
import errno
import fcntl
import fnmatch
import glob
import platform
import re
import signal
import socket
import struct
import datetime
import getpass
import ConfigParser
import StringIO
from string import maketrans
try:
import selinux
HAVE_SELINUX=True
except ImportError:
HAVE_SELINUX=False
try:
import json
except ImportError:
import simplejson as json
# --------------------------------------------------------------
# timeout function to make sure some fact gathering
# steps do not exceed a time limit
class TimeoutError(Exception):
pass
def timeout(seconds=10, error_message="Timer expired"):
def decorator(func):
def _handle_timeout(signum, frame):
raise TimeoutError(error_message)
def wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return wrapper
return decorator
# --------------------------------------------------------------
class Facts(object):
"""
This class should only attempt to populate those facts that
are mostly generic to all systems. This includes platform facts,
service facts (e.g. ssh keys or selinux), and distribution facts.
Anything that requires extensive code or may have more than one
possible implementation to establish facts for a given topic should
subclass Facts.
"""
_I386RE = re.compile(r'i[3456]86')
# For the most part, we assume that platform.dist() will tell the truth.
# This is the fallback to handle unknowns or exceptions
OSDIST_LIST = ( ('/etc/redhat-release', 'RedHat'),
('/etc/vmware-release', 'VMwareESX'),
('/etc/openwrt_release', 'OpenWrt'),
('/etc/system-release', 'OtherLinux'),
('/etc/alpine-release', 'Alpine'),
('/etc/release', 'Solaris'),
('/etc/arch-release', 'Archlinux'),
('/etc/SuSE-release', 'SuSE'),
('/etc/os-release', 'SuSE'),
('/etc/gentoo-release', 'Gentoo'),
('/etc/os-release', 'Debian'),
('/etc/lsb-release', 'Mandriva') )
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
# A list of dicts. If there is a platform with more than one
# package manager, put the preferred one last. If there is an
# ansible module, use that as the value for the 'name' key.
PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
{ 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
{ 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
{ 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
{ 'path' : '/bin/opkg', 'name' : 'opkg' },
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
{ 'path' : '/opt/local/bin/port', 'name' : 'macports' },
{ 'path' : '/sbin/apk', 'name' : 'apk' },
{ 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' },
{ 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' },
{ 'path' : '/usr/bin/emerge', 'name' : 'portage' },
{ 'path' : '/usr/sbin/pkgadd', 'name' : 'svr4pkg' },
{ 'path' : '/usr/bin/pkg', 'name' : 'pkg' },
]
def __init__(self):
self.facts = {}
self.get_platform_facts()
self.get_distribution_facts()
self.get_cmdline()
self.get_public_ssh_host_keys()
self.get_selinux_facts()
self.get_fips_facts()
self.get_pkg_mgr_facts()
self.get_lsb_facts()
self.get_date_time_facts()
self.get_user_facts()
self.get_local_facts()
self.get_env_facts()
def populate(self):
return self.facts
# Platform
# platform.system() can be Linux, Darwin, Java, or Windows
def get_platform_facts(self):
self.facts['system'] = platform.system()
self.facts['kernel'] = platform.release()
self.facts['machine'] = platform.machine()
self.facts['python_version'] = platform.python_version()
self.facts['fqdn'] = socket.getfqdn()
self.facts['hostname'] = platform.node().split('.')[0]
self.facts['nodename'] = platform.node()
self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
arch_bits = platform.architecture()[0]
self.facts['userspace_bits'] = arch_bits.replace('bit', '')
if self.facts['machine'] == 'x86_64':
self.facts['architecture'] = self.facts['machine']
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
elif Facts._I386RE.search(self.facts['machine']):
self.facts['architecture'] = 'i386'
if self.facts['userspace_bits'] == '64':
self.facts['userspace_architecture'] = 'x86_64'
elif self.facts['userspace_bits'] == '32':
self.facts['userspace_architecture'] = 'i386'
else:
self.facts['architecture'] = self.facts['machine']
if self.facts['system'] == 'Linux':
self.get_distribution_facts()
elif self.facts['system'] == 'AIX':
rc, out, err = module.run_command("/usr/sbin/bootinfo -p")
data = out.split('\n')
self.facts['architecture'] = data[0]
def get_local_facts(self):
fact_path = module.params.get('fact_path', None)
if not fact_path or not os.path.exists(fact_path):
return
local = {}
for fn in sorted(glob.glob(fact_path + '/*.fact')):
# where it will sit under local facts
fact_base = os.path.basename(fn).replace('.fact','')
if stat.S_IXUSR & os.stat(fn)[stat.ST_MODE]:
# run it
# try to read it as json first
# if that fails read it with ConfigParser
# if that fails, skip it
rc, out, err = module.run_command(fn)
else:
out = open(fn).read()
# load raw json
fact = 'loading %s' % fact_base
try:
fact = json.loads(out)
except ValueError, e:
# load raw ini
cp = ConfigParser.ConfigParser()
try:
cp.readfp(StringIO.StringIO(out))
except ConfigParser.Error, e:
fact="error loading fact - please check content"
else:
fact = {}
#print cp.sections()
for sect in cp.sections():
if sect not in fact:
fact[sect] = {}
for opt in cp.options(sect):
val = cp.get(sect, opt)
fact[sect][opt]=val
local[fact_base] = fact
if not local:
return
self.facts['local'] = local
# platform.dist() is deprecated in 2.6
# in 2.6 and newer, you should use platform.linux_distribution()
def get_distribution_facts(self):
# A list with OS Family members
OS_FAMILY = dict(
RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat',
SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', SLES = 'Suse',
SLED = 'Suse', OpenSuSE = 'Suse', SuSE = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo',
Archlinux = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake',
Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
FreeBSD = 'FreeBSD', HPUX = 'HP-UX'
)
# TODO: Rewrite this to use the function references in a dict pattern
# as it's much cleaner than this massive if-else
if self.facts['system'] == 'AIX':
self.facts['distribution'] = 'AIX'
rc, out, err = module.run_command("/usr/bin/oslevel")
data = out.split('.')
self.facts['distribution_version'] = data[0]
self.facts['distribution_release'] = data[1]
elif self.facts['system'] == 'HP-UX':
self.facts['distribution'] = 'HP-UX'
rc, out, err = module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'", use_unsafe_shell=True)
data = re.search('HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
if data:
self.facts['distribution_version'] = data.groups()[0]
self.facts['distribution_release'] = data.groups()[1]
elif self.facts['system'] == 'Darwin':
self.facts['distribution'] = 'MacOSX'
rc, out, err = module.run_command("/usr/bin/sw_vers -productVersion")
data = out.split()[-1]
self.facts['distribution_version'] = data
elif self.facts['system'] == 'FreeBSD':
self.facts['distribution'] = 'FreeBSD'
self.facts['distribution_release'] = platform.release()
self.facts['distribution_version'] = platform.version()
elif self.facts['system'] == 'OpenBSD':
self.facts['distribution'] = 'OpenBSD'
self.facts['distribution_release'] = platform.release()
rc, out, err = module.run_command("/sbin/sysctl -n kern.version")
match = re.match('OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
if match:
self.facts['distribution_version'] = match.groups()[0]
else:
self.facts['distribution_version'] = 'release'
else:
dist = platform.dist()
self.facts['distribution'] = dist[0].capitalize() or 'NA'
self.facts['distribution_version'] = dist[1] or 'NA'
self.facts['distribution_major_version'] = dist[1].split('.')[0] or 'NA'
self.facts['distribution_release'] = dist[2] or 'NA'
# Try to handle the exceptions now ...
for (path, name) in Facts.OSDIST_LIST:
if os.path.exists(path):
if os.path.getsize(path) > 0:
if self.facts['distribution'] in ('Fedora', ):
# Once we determine the value is one of these distros
# we trust the values are always correct
break
elif name == 'RedHat':
data = get_file_content(path)
if 'Red Hat' in data:
self.facts['distribution'] = name
else:
self.facts['distribution'] = data.split()[0]
break
elif name == 'OtherLinux':
data = get_file_content(path)
if 'Amazon' in data:
self.facts['distribution'] = 'Amazon'
self.facts['distribution_version'] = data.split()[-1]
break
elif name == 'OpenWrt':
data = get_file_content(path)
if 'OpenWrt' in data:
self.facts['distribution'] = name
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
self.facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
self.facts['distribution_release'] = release.groups()[0]
break
elif name == 'Alpine':
data = get_file_content(path)
self.facts['distribution'] = name
self.facts['distribution_version'] = data
break
elif name == 'Solaris':
data = get_file_content(path).split('\n')[0]
if 'Solaris' in data:
ora_prefix = ''
if 'Oracle Solaris' in data:
data = data.replace('Oracle ','')
ora_prefix = 'Oracle '
self.facts['distribution'] = data.split()[0]
self.facts['distribution_version'] = data.split()[1]
self.facts['distribution_release'] = ora_prefix + data
break
elif name == 'SuSE':
data = get_file_content(path)
if 'suse' in data.lower():
if path == '/etc/os-release':
release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
distdata = get_file_content(path).split('\n')[0]
self.facts['distribution'] = distdata.split('=')[1]
if release:
self.facts['distribution_release'] = release.groups()[0]
break
elif path == '/etc/SuSE-release':
data = data.splitlines()
distdata = get_file_content(path).split('\n')[0]
self.facts['distribution'] = distdata.split()[0]
for line in data:
release = re.search('CODENAME *= *([^\n]+)', line)
if release:
self.facts['distribution_release'] = release.groups()[0].strip()
break
elif name == 'Debian':
data = get_file_content(path)
if 'Debian' in data:
release = re.search("PRETTY_NAME=[^(]+ \(?([^)]+?)\)", data)
if release:
self.facts['distribution_release'] = release.groups()[0]
break
elif name == 'Mandriva':
data = get_file_content(path)
if 'Mandriva' in data:
version = re.search('DISTRIB_RELEASE="(.*)"', data)
if version:
self.facts['distribution_version'] = version.groups()[0]
release = re.search('DISTRIB_CODENAME="(.*)"', data)
if release:
self.facts['distribution_release'] = release.groups()[0]
self.facts['distribution'] = name
break
else:
self.facts['distribution'] = name
self.facts['os_family'] = self.facts['distribution']
if self.facts['distribution'] in OS_FAMILY:
self.facts['os_family'] = OS_FAMILY[self.facts['distribution']]
def get_cmdline(self):
data = get_file_content('/proc/cmdline')
if data:
self.facts['cmdline'] = {}
try:
for piece in shlex.split(data):
item = piece.split('=', 1)
if len(item) == 1:
self.facts['cmdline'][item[0]] = True
else:
self.facts['cmdline'][item[0]] = item[1]
except ValueError, e:
pass
def get_public_ssh_host_keys(self):
dsa_filename = '/etc/ssh/ssh_host_dsa_key.pub'
rsa_filename = '/etc/ssh/ssh_host_rsa_key.pub'
ecdsa_filename = '/etc/ssh/ssh_host_ecdsa_key.pub'
if self.facts['system'] == 'Darwin':
dsa_filename = '/etc/ssh_host_dsa_key.pub'
rsa_filename = '/etc/ssh_host_rsa_key.pub'
ecdsa_filename = '/etc/ssh_host_ecdsa_key.pub'
dsa = get_file_content(dsa_filename)
rsa = get_file_content(rsa_filename)
ecdsa = get_file_content(ecdsa_filename)
if dsa is None:
dsa = 'NA'
else:
self.facts['ssh_host_key_dsa_public'] = dsa.split()[1]
if rsa is None:
rsa = 'NA'
else:
self.facts['ssh_host_key_rsa_public'] = rsa.split()[1]
if ecdsa is None:
ecdsa = 'NA'
else:
self.facts['ssh_host_key_ecdsa_public'] = ecdsa.split()[1]
def get_pkg_mgr_facts(self):
self.facts['pkg_mgr'] = 'unknown'
for pkg in Facts.PKG_MGRS:
if os.path.exists(pkg['path']):
self.facts['pkg_mgr'] = pkg['name']
if self.facts['system'] == 'OpenBSD':
self.facts['pkg_mgr'] = 'openbsd_pkg'
def get_lsb_facts(self):
lsb_path = module.get_bin_path('lsb_release')
if lsb_path:
rc, out, err = module.run_command([lsb_path, "-a"])
if rc == 0:
self.facts['lsb'] = {}
for line in out.split('\n'):
if len(line) < 1:
continue
value = line.split(':', 1)[1].strip()
if 'LSB Version:' in line:
self.facts['lsb']['release'] = value
elif 'Distributor ID:' in line:
self.facts['lsb']['id'] = value
elif 'Description:' in line:
self.facts['lsb']['description'] = value
elif 'Release:' in line:
self.facts['lsb']['release'] = value
elif 'Codename:' in line:
self.facts['lsb']['codename'] = value
if 'lsb' in self.facts and 'release' in self.facts['lsb']:
self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
elif lsb_path is None and os.path.exists('/etc/lsb-release'):
self.facts['lsb'] = {}
f = open('/etc/lsb-release', 'r')
try:
for line in f.readlines():
value = line.split('=',1)[1].strip()
if 'DISTRIB_ID' in line:
self.facts['lsb']['id'] = value
elif 'DISTRIB_RELEASE' in line:
self.facts['lsb']['release'] = value
elif 'DISTRIB_DESCRIPTION' in line:
self.facts['lsb']['description'] = value
elif 'DISTRIB_CODENAME' in line:
self.facts['lsb']['codename'] = value
finally:
f.close()
else:
return self.facts
if 'lsb' in self.facts and 'release' in self.facts['lsb']:
self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
def get_selinux_facts(self):
if not HAVE_SELINUX:
self.facts['selinux'] = False
return
self.facts['selinux'] = {}
if not selinux.is_selinux_enabled():
self.facts['selinux']['status'] = 'disabled'
else:
self.facts['selinux']['status'] = 'enabled'
try:
self.facts['selinux']['policyvers'] = selinux.security_policyvers()
except OSError, e:
self.facts['selinux']['policyvers'] = 'unknown'
try:
(rc, configmode) = selinux.selinux_getenforcemode()
if rc == 0:
self.facts['selinux']['config_mode'] = Facts.SELINUX_MODE_DICT.get(configmode, 'unknown')
else:
self.facts['selinux']['config_mode'] = 'unknown'
except OSError, e:
self.facts['selinux']['config_mode'] = 'unknown'
try:
mode = selinux.security_getenforce()
self.facts['selinux']['mode'] = Facts.SELINUX_MODE_DICT.get(mode, 'unknown')
except OSError, e:
self.facts['selinux']['mode'] = 'unknown'
try:
(rc, policytype) = selinux.selinux_getpolicytype()
if rc == 0:
self.facts['selinux']['type'] = policytype
else:
self.facts['selinux']['type'] = 'unknown'
except OSError, e:
self.facts['selinux']['type'] = 'unknown'
def get_fips_facts(self):
self.facts['fips'] = False
data = get_file_content('/proc/sys/crypto/fips_enabled')
if data and data == '1':
self.facts['fips'] = True
def get_date_time_facts(self):
self.facts['date_time'] = {}
now = datetime.datetime.now()
self.facts['date_time']['year'] = now.strftime('%Y')
self.facts['date_time']['month'] = now.strftime('%m')
self.facts['date_time']['weekday'] = now.strftime('%A')
self.facts['date_time']['day'] = now.strftime('%d')
self.facts['date_time']['hour'] = now.strftime('%H')
self.facts['date_time']['minute'] = now.strftime('%M')
self.facts['date_time']['second'] = now.strftime('%S')
self.facts['date_time']['epoch'] = now.strftime('%s')
if self.facts['date_time']['epoch'] == '' or self.facts['date_time']['epoch'][0] == '%':
self.facts['date_time']['epoch'] = str(int(time.time()))
self.facts['date_time']['date'] = now.strftime('%Y-%m-%d')
self.facts['date_time']['time'] = now.strftime('%H:%M:%S')
self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
self.facts['date_time']['tz'] = time.strftime("%Z")
self.facts['date_time']['tz_offset'] = time.strftime("%z")
# User
def get_user_facts(self):
self.facts['user_id'] = getpass.getuser()
def get_env_facts(self):
self.facts['env'] = {}
for k,v in os.environ.iteritems():
self.facts['env'][k] = v
class Hardware(Facts):
"""
This is a generic Hardware subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this, it
should define:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
All subclasses MUST define platform.
"""
platform = 'Generic'
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in Hardware.__subclasses__():
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def __init__(self):
Facts.__init__(self)
def populate(self):
return self.facts
class LinuxHardware(Hardware):
"""
Linux-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
In addition, it also defines number of DMI facts and device facts.
"""
platform = 'Linux'
MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
for line in open("/proc/meminfo").readlines():
data = line.split(":", 1)
key = data[0]
if key in LinuxHardware.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = long(val) / 1024
def get_cpu_facts(self):
i = 0
physid = 0
coreid = 0
sockets = {}
cores = {}
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in open("/proc/cpuinfo").readlines():
data = line.split(":", 1)
key = data[0].strip()
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key == 'model name' or key == 'Processor' or key == 'vendor_id':
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'core id':
coreid = data[1].strip()
if coreid not in sockets:
cores[coreid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
elif key == 'siblings':
cores[coreid] = int(data[1].strip())
elif key == '# processors':
self.facts['processor_cores'] = int(data[1].strip())
if self.facts['architecture'] != 's390x':
self.facts['processor_count'] = sockets and len(sockets) or i
self.facts['processor_cores'] = sockets.values() and sockets.values()[0] or 1
self.facts['processor_threads_per_core'] = ((cores.values() and
cores.values()[0] or 1) / self.facts['processor_cores'])
self.facts['processor_vcpus'] = (self.facts['processor_threads_per_core'] *
self.facts['processor_count'] * self.facts['processor_cores'])
def get_dmi_facts(self):
''' learn dmi facts from system
Try /sys first for dmi related facts.
If that is not available, fall back to dmidecode executable '''
if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
# Use kernel DMI info, if available
# DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf
FORM_FACTOR = [ "Unknown", "Other", "Unknown", "Desktop",
"Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
"Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
"All In One", "Sub Notebook", "Space-saving", "Lunch Box",
"Main Server Chassis", "Expansion Chassis", "Sub Chassis",
"Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
"Rack Mount Chassis", "Sealed-case PC", "Multi-system",
"CompactPCI", "AdvancedTCA", "Blade" ]
DMI_DICT = {
'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
'product_name': '/sys/devices/virtual/dmi/id/product_name',
'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
'product_version': '/sys/devices/virtual/dmi/id/product_version',
'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor'
}
for (key,path) in DMI_DICT.items():
data = get_file_content(path)
if data is not None:
if key == 'form_factor':
try:
self.facts['form_factor'] = FORM_FACTOR[int(data)]
except IndexError, e:
self.facts['form_factor'] = 'unknown (%s)' % data
else:
self.facts[key] = data
else:
self.facts[key] = 'NA'
else:
# Fall back to using dmidecode, if available
dmi_bin = module.get_bin_path('dmidecode')
DMI_DICT = {
'bios_date': 'bios-release-date',
'bios_version': 'bios-version',
'form_factor': 'chassis-type',
'product_name': 'system-product-name',
'product_serial': 'system-serial-number',
'product_uuid': 'system-uuid',
'product_version': 'system-version',
'system_vendor': 'system-manufacturer'
}
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
thisvalue = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
try:
json.dumps(thisvalue)
except UnicodeDecodeError:
thisvalue = "NA"
self.facts[k] = thisvalue
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
mtab = get_file_content('/etc/mtab', '')
for line in mtab.split('\n'):
if line.startswith('/'):
fields = line.rstrip('\n').split()
if(fields[2] != 'none'):
size_total = None
size_available = None
try:
statvfs_result = os.statvfs(fields[1])
size_total = statvfs_result.f_bsize * statvfs_result.f_blocks
size_available = statvfs_result.f_bsize * (statvfs_result.f_bavail)
except OSError, e:
continue
self.facts['mounts'].append(
{'mount': fields[1],
'device':fields[0],
'fstype': fields[2],
'options': fields[3],
# statvfs data
'size_total': size_total,
'size_available': size_available,
})
def get_device_facts(self):
self.facts['devices'] = {}
lspci = module.get_bin_path('lspci')
if lspci:
rc, pcidata, err = module.run_command([lspci, '-D'])
else:
pcidata = None
try:
block_devs = os.listdir("/sys/block")
except OSError:
return
for block in block_devs:
virtual = 1
sysfs_no_links = 0
try:
path = os.readlink(os.path.join("/sys/block/", block))
except OSError, e:
if e.errno == errno.EINVAL:
path = block
sysfs_no_links = 1
else:
continue
if "virtual" in path:
continue
sysdir = os.path.join("/sys/block", path)
if sysfs_no_links == 1:
for folder in os.listdir(sysdir):
if "device" in folder:
virtual = 0
break
if virtual:
continue
d = {}
diskname = os.path.basename(sysdir)
for key in ['vendor', 'model']:
d[key] = get_file_content(sysdir + "/device/" + key)
for key,test in [ ('removable','/removable'), \
('support_discard','/queue/discard_granularity'),
]:
d[key] = get_file_content(sysdir + test)
d['partitions'] = {}
for folder in os.listdir(sysdir):
m = re.search("(" + diskname + "\d+)", folder)
if m:
part = {}
partname = m.group(1)
part_sysdir = sysdir + "/" + partname
part['start'] = get_file_content(part_sysdir + "/start",0)
part['sectors'] = get_file_content(part_sysdir + "/size",0)
part['sectorsize'] = get_file_content(part_sysdir + "/queue/physical_block_size")
if not part['sectorsize']:
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512)
part['size'] = module.pretty_bytes((float(part['sectors']) * float(part['sectorsize'])))
d['partitions'][partname] = part
d['rotational'] = get_file_content(sysdir + "/queue/rotational")
d['scheduler_mode'] = ""
scheduler = get_file_content(sysdir + "/queue/scheduler")
if scheduler is not None:
m = re.match(".*?(\[(.*)\])", scheduler)
if m:
d['scheduler_mode'] = m.group(2)
d['sectors'] = get_file_content(sysdir + "/size")
if not d['sectors']:
d['sectors'] = 0
d['sectorsize'] = get_file_content(sysdir + "/queue/physical_block_size")
if not d['sectorsize']:
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size",512)
d['size'] = module.pretty_bytes(float(d['sectors']) * float(d['sectorsize']))
d['host'] = ""
# domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
m = re.match(".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
if m and pcidata:
pciid = m.group(1)
did = re.escape(pciid)
m = re.search("^" + did + "\s(.*)$", pcidata, re.MULTILINE)
d['host'] = m.group(1)
d['holders'] = []
if os.path.isdir(sysdir + "/holders"):
for folder in os.listdir(sysdir + "/holders"):
if not folder.startswith("dm-"):
continue
name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
if name:
d['holders'].append(name)
else:
d['holders'].append(folder)
self.facts['devices'][diskname] = d
class SunOSHardware(Hardware):
"""
In addition to the generic memory and cpu facts, this also sets
swap_reserved_mb and swap_allocated_mb that is available from *swap -s*.
"""
platform = 'SunOS'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
return self.facts
def get_cpu_facts(self):
physid = 0
sockets = {}
rc, out, err = module.run_command("/usr/bin/kstat cpu_info")
self.facts['processor'] = []
for line in out.split('\n'):
if len(line) < 1:
continue
data = line.split(None, 1)
key = data[0].strip()
# "brand" works on Solaris 10 & 11. "implementation" for Solaris 9.
if key == 'module:':
brand = ''
elif key == 'brand':
brand = data[1].strip()
elif key == 'clock_MHz':
clock_mhz = data[1].strip()
elif key == 'implementation':
processor = brand or data[1].strip()
# Add clock speed to description for SPARC CPU
if self.facts['machine'] != 'i86pc':
processor += " @ " + clock_mhz + "MHz"
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(processor)
elif key == 'chip_id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
else:
sockets[physid] += 1
# Counting cores on Solaris can be complicated.
# https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu
# Treat 'processor_count' as physical sockets and 'processor_cores' as
# virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as
# these processors have: sockets -> cores -> threads/virtual CPU.
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_cores'] = 'NA'
self.facts['processor_count'] = len(self.facts['processor'])
def get_memory_facts(self):
rc, out, err = module.run_command(["/usr/sbin/prtconf"])
for line in out.split('\n'):
if 'Memory size' in line:
self.facts['memtotal_mb'] = line.split()[2]
rc, out, err = module.run_command("/usr/sbin/swap -s")
allocated = long(out.split()[1][:-1])
reserved = long(out.split()[5][:-1])
used = long(out.split()[8][:-1])
free = long(out.split()[10][:-1])
self.facts['swapfree_mb'] = free / 1024
self.facts['swaptotal_mb'] = (free + used) / 1024
self.facts['swap_allocated_mb'] = allocated / 1024
self.facts['swap_reserved_mb'] = reserved / 1024
class OpenBSDHardware(Hardware):
"""
OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- processor_speed
- devices
"""
platform = 'OpenBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.sysctl = self.get_sysctl()
self.get_memory_facts()
self.get_processor_facts()
self.get_device_facts()
return self.facts
def get_sysctl(self):
rc, out, err = module.run_command(["/sbin/sysctl", "hw"])
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
(key, value) = line.split('=')
sysctl[key] = value.strip()
return sysctl
def get_memory_facts(self):
# Get free memory. vmstat output looks like:
# procs memory page disks traps cpu
# r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id
# 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99
rc, out, err = module.run_command("/usr/bin/vmstat")
if rc == 0:
self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[4]) / 1024
self.facts['memtotal_mb'] = long(self.sysctl['hw.usermem']) / 1024 / 1024
# Get swapctl info. swapctl output looks like:
# total: 69268 1K-blocks allocated, 0 used, 69268 available
# And for older OpenBSD:
# total: 69268k bytes allocated = 0k used, 69268k available
rc, out, err = module.run_command("/sbin/swapctl -sk")
if rc == 0:
swaptrans = maketrans(' ', ' ')
data = out.split()
self.facts['swapfree_mb'] = long(data[-2].translate(swaptrans, "kmg")) / 1024
self.facts['swaptotal_mb'] = long(data[1].translate(swaptrans, "kmg")) / 1024
def get_processor_facts(self):
processor = []
dmesg_boot = get_file_content(OpenBSDHardware.DMESG_BOOT)
if not dmesg_boot:
rc, dmesg_boot, err = module.run_command("/sbin/dmesg")
i = 0
for line in dmesg_boot.splitlines():
if line.split(' ', 1)[0] == 'cpu%i:' % i:
processor.append(line.split(' ', 1)[1])
i = i + 1
processor_count = i
self.facts['processor'] = processor
self.facts['processor_count'] = processor_count
# I found no way to figure out the number of Cores per CPU in OpenBSD
self.facts['processor_cores'] = 'NA'
def get_device_facts(self):
devices = []
devices.extend(self.sysctl['hw.disknames'].split(','))
self.facts['devices'] = devices
class FreeBSDHardware(Hardware):
"""
FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'FreeBSD'
DMESG_BOOT = '/var/run/dmesg.boot'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
self.get_device_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = module.run_command("/sbin/sysctl -n hw.ncpu")
self.facts['processor_count'] = out.strip()
dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
if not dmesg_boot:
rc, dmesg_boot, err = module.run_command("/sbin/dmesg")
for line in dmesg_boot.split('\n'):
if 'CPU:' in line:
cpu = re.sub(r'CPU:\s+', r"", line)
self.facts['processor'].append(cpu.strip())
if 'Logical CPUs per core' in line:
self.facts['processor_cores'] = line.split()[4]
def get_memory_facts(self):
rc, out, err = module.run_command("/sbin/sysctl vm.stats")
for line in out.split('\n'):
data = line.split()
if 'vm.stats.vm.v_page_size' in line:
pagesize = long(data[1])
if 'vm.stats.vm.v_page_count' in line:
pagecount = long(data[1])
if 'vm.stats.vm.v_free_count' in line:
freecount = long(data[1])
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = module.run_command("/usr/sbin/swapinfo -m")
lines = out.split('\n')
if len(lines[-1]) == 0:
lines.pop()
data = lines[-1].split()
self.facts['swaptotal_mb'] = data[1]
self.facts['swapfree_mb'] = data[3]
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.split('\n'):
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
def get_device_facts(self):
sysdir = '/dev'
self.facts['devices'] = {}
drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = module.run_command("/sbin/sysctl kern.disks")
slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)')
if os.path.isdir(sysdir):
dirlist = sorted(os.listdir(sysdir))
for device in dirlist:
d = drives.match(device)
if d:
self.facts['devices'][d.group(1)] = []
s = slices.match(device)
if s:
self.facts['devices'][d.group(1)].append(s.group(1))
def get_dmi_facts(self):
''' learn dmi facts from system
Use dmidecode executable if available'''
# Fall back to using dmidecode, if available
dmi_bin = module.get_bin_path('dmidecode')
DMI_DICT = dict(
bios_date='bios-release-date',
bios_version='bios-version',
form_factor='chassis-type',
product_name='system-product-name',
product_serial='system-serial-number',
product_uuid='system-uuid',
product_version='system-version',
system_vendor='system-manufacturer'
)
for (k, v) in DMI_DICT.items():
if dmi_bin is not None:
(rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v))
if rc == 0:
# Strip out commented lines (specific dmidecode output)
self.facts[k] = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
try:
json.dumps(self.facts[k])
except UnicodeDecodeError:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
else:
self.facts[k] = 'NA'
class NetBSDHardware(Hardware):
"""
NetBSD-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
- devices
"""
platform = 'NetBSD'
MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
try:
self.get_mount_facts()
except TimeoutError:
pass
return self.facts
def get_cpu_facts(self):
i = 0
physid = 0
sockets = {}
if not os.access("/proc/cpuinfo", os.R_OK):
return
self.facts['processor'] = []
for line in open("/proc/cpuinfo").readlines():
data = line.split(":", 1)
key = data[0].strip()
# model name is for Intel arch, Processor (mind the uppercase P)
# works for some ARM devices, like the Sheevaplug.
if key == 'model name' or key == 'Processor':
if 'processor' not in self.facts:
self.facts['processor'] = []
self.facts['processor'].append(data[1].strip())
i += 1
elif key == 'physical id':
physid = data[1].strip()
if physid not in sockets:
sockets[physid] = 1
elif key == 'cpu cores':
sockets[physid] = int(data[1].strip())
if len(sockets) > 0:
self.facts['processor_count'] = len(sockets)
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
else:
self.facts['processor_count'] = i
self.facts['processor_cores'] = 'NA'
def get_memory_facts(self):
if not os.access("/proc/meminfo", os.R_OK):
return
for line in open("/proc/meminfo").readlines():
data = line.split(":", 1)
key = data[0]
if key in NetBSDHardware.MEMORY_FACTS:
val = data[1].strip().split(' ')[0]
self.facts["%s_mb" % key.lower()] = long(val) / 1024
@timeout(10)
def get_mount_facts(self):
self.facts['mounts'] = []
fstab = get_file_content('/etc/fstab')
if fstab:
for line in fstab.split('\n'):
if line.startswith('#') or line.strip() == '':
continue
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
class AIX(Hardware):
"""
AIX-specific subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor (a list)
- processor_cores
- processor_count
"""
platform = 'AIX'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_dmi_facts()
return self.facts
def get_cpu_facts(self):
self.facts['processor'] = []
rc, out, err = module.run_command("/usr/sbin/lsdev -Cc processor")
if out:
i = 0
for line in out.split('\n'):
if 'Available' in line:
if i == 0:
data = line.split(' ')
cpudev = data[0]
i += 1
self.facts['processor_count'] = int(i)
rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type")
data = out.split(' ')
self.facts['processor'] = data[1]
rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads")
data = out.split(' ')
self.facts['processor_cores'] = int(data[1])
def get_memory_facts(self):
pagesize = 4096
rc, out, err = module.run_command("/usr/bin/vmstat -v")
for line in out.split('\n'):
data = line.split()
if 'memory pages' in line:
pagecount = long(data[0])
if 'free pages' in line:
freecount = long(data[0])
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
# Get swapinfo. swapinfo output looks like:
# Device 1M-blocks Used Avail Capacity
# /dev/ada0p3 314368 0 314368 0%
#
rc, out, err = module.run_command("/usr/sbin/lsps -s")
if out:
lines = out.split('\n')
data = lines[1].split()
swaptotal_mb = long(data[0].rstrip('MB'))
percused = int(data[1].rstrip('%'))
self.facts['swaptotal_mb'] = swaptotal_mb
self.facts['swapfree_mb'] = long(swaptotal_mb * ( 100 - percused ) / 100)
def get_dmi_facts(self):
rc, out, err = module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
data = out.split()
self.facts['firmware_version'] = data[1].strip('IBM,')
class HPUX(Hardware):
"""
HP-UX-specifig subclass of Hardware. Defines memory and CPU facts:
- memfree_mb
- memtotal_mb
- swapfree_mb
- swaptotal_mb
- processor
- processor_cores
- processor_count
- model
- firmware
"""
platform = 'HP-UX'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.get_cpu_facts()
self.get_memory_facts()
self.get_hw_facts()
return self.facts
def get_cpu_facts(self):
if self.facts['architecture'] == '9000/800':
rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip())
#Working with machinfo mess
elif self.facts['architecture'] == 'ia64':
if self.facts['distribution_version'] == "B.11.23":
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'Number of CPUs'", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split('=')[1])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep 'processor family'", use_unsafe_shell=True)
self.facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip()
rc, out, err = module.run_command("ioscan -FkCprocessor | wc -l", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip())
if self.facts['distribution_version'] == "B.11.31":
#if machinfo return cores strings release B.11.31 > 1204
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep core | wc -l", use_unsafe_shell=True)
if out.strip()== '0':
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
#If hyperthreading is active divide cores by 2
rc, out, err = module.run_command("/usr/sbin/psrset | grep LCPU", use_unsafe_shell=True)
data = re.sub(' +',' ',out).strip().split(' ')
if len(data) == 1:
hyperthreading = 'OFF'
else:
hyperthreading = data[1]
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep logical", use_unsafe_shell=True)
data = out.strip().split(" ")
if hyperthreading == 'ON':
self.facts['processor_cores'] = int(data[0])/2
else:
if len(data) == 1:
self.facts['processor_cores'] = self.facts['processor_count']
else:
self.facts['processor_cores'] = int(data[0])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel |cut -d' ' -f4-", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
else:
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | egrep 'socket[s]?$' | tail -1", use_unsafe_shell=True)
self.facts['processor_count'] = int(out.strip().split(" ")[0])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep -e '[0-9] core' | tail -1", use_unsafe_shell=True)
self.facts['processor_cores'] = int(out.strip().split(" ")[0])
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Intel", use_unsafe_shell=True)
self.facts['processor'] = out.strip()
def get_memory_facts(self):
pagesize = 4096
rc, out, err = module.run_command("/usr/bin/vmstat | tail -1", use_unsafe_shell=True)
data = int(re.sub(' +',' ',out).split(' ')[5].strip())
self.facts['memfree_mb'] = pagesize * data / 1024 / 1024
if self.facts['architecture'] == '9000/800':
try:
rc, out, err = module.run_command("grep Physical /var/adm/syslog/syslog.log")
data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data) / 1024
except AttributeError:
#For systems where memory details aren't sent to syslog or the log has rotated, use parsed
#adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
if os.access("/dev/kmem", os.R_OK):
rc, out, err = module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True)
if not err:
data = out
self.facts['memtotal_mb'] = int(data) / 256
else:
rc, out, err = module.run_command("/usr/contrib/bin/machinfo | grep Memory", use_unsafe_shell=True)
data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip()
self.facts['memtotal_mb'] = int(data)
rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f -q")
self.facts['swaptotal_mb'] = int(out.strip())
rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f | egrep '^dev|^fs'", use_unsafe_shell=True)
swap = 0
for line in out.strip().split('\n'):
swap += int(re.sub(' +',' ',line).split(' ')[3].strip())
self.facts['swapfree_mb'] = swap
def get_hw_facts(self):
rc, out, err = module.run_command("model")
self.facts['model'] = out.strip()
if self.facts['architecture'] == 'ia64':
separator = ':'
if self.facts['distribution_version'] == "B.11.23":
separator = '='
rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' | grep -v BMC", use_unsafe_shell=True)
self.facts['firmware_version'] = out.split(separator)[1].strip()
class Darwin(Hardware):
"""
Darwin-specific subclass of Hardware. Defines memory and CPU facts:
- processor
- processor_cores
- memtotal_mb
- memfree_mb
- model
- osversion
- osrevision
"""
platform = 'Darwin'
def __init__(self):
Hardware.__init__(self)
def populate(self):
self.sysctl = self.get_sysctl()
self.get_mac_facts()
self.get_cpu_facts()
self.get_memory_facts()
return self.facts
def get_sysctl(self):
rc, out, err = module.run_command(["/usr/sbin/sysctl", "hw", "machdep", "kern"])
if rc != 0:
return dict()
sysctl = dict()
for line in out.splitlines():
if line.rstrip("\n"):
(key, value) = re.split(' = |: ', line, maxsplit=1)
sysctl[key] = value.strip()
return sysctl
def get_system_profile(self):
rc, out, err = module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
if rc != 0:
return dict()
system_profile = dict()
for line in out.splitlines():
if ': ' in line:
(key, value) = line.split(': ', 1)
system_profile[key.strip()] = ' '.join(value.strip().split())
return system_profile
def get_mac_facts(self):
rc, out, err = module.run_command("sysctl hw.model")
if rc == 0:
self.facts['model'] = out.splitlines()[-1].split()[1]
self.facts['osversion'] = self.sysctl['kern.osversion']
self.facts['osrevision'] = self.sysctl['kern.osrevision']
def get_cpu_facts(self):
if 'machdep.cpu.brand_string' in self.sysctl: # Intel
self.facts['processor'] = self.sysctl['machdep.cpu.brand_string']
self.facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
else: # PowerPC
system_profile = self.get_system_profile()
self.facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
self.facts['processor_cores'] = self.sysctl['hw.physicalcpu']
def get_memory_facts(self):
self.facts['memtotal_mb'] = long(self.sysctl['hw.memsize']) / 1024 / 1024
rc, out, err = module.run_command("sysctl hw.usermem")
if rc == 0:
self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[1]) / 1024 / 1024
class Network(Facts):
"""
This is a generic Network subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you must define:
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
All subclasses MUST define platform.
"""
platform = 'Generic'
IPV6_SCOPE = { '0' : 'global',
'10' : 'host',
'20' : 'link',
'40' : 'admin',
'50' : 'site',
'80' : 'organization' }
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in Network.__subclasses__():
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def __init__(self, module):
self.module = module
Facts.__init__(self)
def populate(self):
return self.facts
class LinuxNetwork(Network):
"""
This is a Linux-specific subclass of Network. It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
- ipv4_address and ipv6_address: the first non-local address for each family.
"""
platform = 'Linux'
def __init__(self, module):
Network.__init__(self, module)
def populate(self):
ip_path = self.module.get_bin_path('ip')
if ip_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path)
interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6)
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def get_default_interfaces(self, ip_path):
# Use the commands:
# ip -4 route get 8.8.8.8 -> Google public DNS
# ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [ip_path, '-4', 'route', 'get', '8.8.8.8'],
v6 = [ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if v == 'v6' and self.facts['os_family'] == 'RedHat' \
and self.facts['distribution_version'].startswith('4.'):
continue
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = module.run_command(command[v])
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
words = out.split('\n')[0].split()
# A valid output starts with the queried address on the first line
if len(words) > 0 and words[0] == command[v][-1]:
for i in range(len(words) - 1):
if words[i] == 'dev':
interface[v]['interface'] = words[i+1]
elif words[i] == 'src':
interface[v]['address'] = words[i+1]
elif words[i] == 'via' and words[i+1] != command[v][-1]:
interface[v]['gateway'] = words[i+1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6):
interfaces = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
for path in glob.glob('/sys/class/net/*'):
if not os.path.isdir(path):
continue
device = os.path.basename(path)
interfaces[device] = { 'device': device }
if os.path.exists(os.path.join(path, 'address')):
macaddress = open(os.path.join(path, 'address')).read().strip()
if macaddress and macaddress != '00:00:00:00:00:00':
interfaces[device]['macaddress'] = macaddress
if os.path.exists(os.path.join(path, 'mtu')):
interfaces[device]['mtu'] = int(open(os.path.join(path, 'mtu')).read().strip())
if os.path.exists(os.path.join(path, 'operstate')):
interfaces[device]['active'] = open(os.path.join(path, 'operstate')).read().strip() != 'down'
# if os.path.exists(os.path.join(path, 'carrier')):
# interfaces[device]['link'] = open(os.path.join(path, 'carrier')).read().strip() == '1'
if os.path.exists(os.path.join(path, 'device','driver', 'module')):
interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module')))
if os.path.exists(os.path.join(path, 'type')):
type = open(os.path.join(path, 'type')).read().strip()
if type == '1':
interfaces[device]['type'] = 'ether'
elif type == '512':
interfaces[device]['type'] = 'ppp'
elif type == '772':
interfaces[device]['type'] = 'loopback'
if os.path.exists(os.path.join(path, 'bridge')):
interfaces[device]['type'] = 'bridge'
interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ]
if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')):
interfaces[device]['id'] = open(os.path.join(path, 'bridge', 'bridge_id')).read().strip()
if os.path.exists(os.path.join(path, 'bridge', 'stp_state')):
interfaces[device]['stp'] = open(os.path.join(path, 'bridge', 'stp_state')).read().strip() == '1'
if os.path.exists(os.path.join(path, 'bonding')):
interfaces[device]['type'] = 'bonding'
interfaces[device]['slaves'] = open(os.path.join(path, 'bonding', 'slaves')).read().split()
interfaces[device]['mode'] = open(os.path.join(path, 'bonding', 'mode')).read().split()[0]
interfaces[device]['miimon'] = open(os.path.join(path, 'bonding', 'miimon')).read().split()[0]
interfaces[device]['lacp_rate'] = open(os.path.join(path, 'bonding', 'lacp_rate')).read().split()[0]
primary = open(os.path.join(path, 'bonding', 'primary')).read()
if primary:
interfaces[device]['primary'] = primary
path = os.path.join(path, 'bonding', 'all_slaves_active')
if os.path.exists(path):
interfaces[device]['all_slaves_active'] = open(path).read() == '1'
# Check whether an interface is in promiscuous mode
if os.path.exists(os.path.join(path,'flags')):
promisc_mode = False
# The second byte indicates whether the interface is in promiscuous mode.
# 1 = promisc
# 0 = no promisc
data = int(open(os.path.join(path, 'flags')).read().strip(),16)
promisc_mode = (data & 0x0100 > 0)
interfaces[device]['promisc'] = promisc_mode
def parse_ip_output(output, secondary=False):
for line in output.split('\n'):
if not line:
continue
words = line.split()
if words[0] == 'inet':
if '/' in words[1]:
address, netmask_length = words[1].split('/')
else:
# pointopoint interfaces do not have a prefix
address = words[1]
netmask_length = "32"
address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
netmask_bin = (1<<32) - (1<<32>>int(netmask_length))
netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
iface = words[-1]
if iface != device:
interfaces[iface] = {}
if not secondary and "ipv4" not in interfaces[iface]:
interfaces[iface]['ipv4'] = {'address': address,
'netmask': netmask,
'network': network}
else:
if "ipv4_secondaries" not in interfaces[iface]:
interfaces[iface]["ipv4_secondaries"] = []
interfaces[iface]["ipv4_secondaries"].append({
'address': address,
'netmask': netmask,
'network': network,
})
# add this secondary IP to the main device
if secondary:
if "ipv4_secondaries" not in interfaces[device]:
interfaces[device]["ipv4_secondaries"] = []
interfaces[device]["ipv4_secondaries"].append({
'address': address,
'netmask': netmask,
'network': network,
})
# If this is the default address, update default_ipv4
if 'address' in default_ipv4 and default_ipv4['address'] == address:
default_ipv4['netmask'] = netmask
default_ipv4['network'] = network
default_ipv4['macaddress'] = macaddress
default_ipv4['mtu'] = interfaces[device]['mtu']
default_ipv4['type'] = interfaces[device].get("type", "unknown")
default_ipv4['alias'] = words[-1]
if not address.startswith('127.'):
ips['all_ipv4_addresses'].append(address)
elif words[0] == 'inet6':
address, prefix = words[1].split('/')
scope = words[3]
if 'ipv6' not in interfaces[device]:
interfaces[device]['ipv6'] = []
interfaces[device]['ipv6'].append({
'address' : address,
'prefix' : prefix,
'scope' : scope
})
# If this is the default address, update default_ipv6
if 'address' in default_ipv6 and default_ipv6['address'] == address:
default_ipv6['prefix'] = prefix
default_ipv6['scope'] = scope
default_ipv6['macaddress'] = macaddress
default_ipv6['mtu'] = interfaces[device]['mtu']
default_ipv6['type'] = interfaces[device].get("type", "unknown")
if not address == '::1':
ips['all_ipv6_addresses'].append(address)
ip_path = module.get_bin_path("ip")
args = [ip_path, 'addr', 'show', 'primary', device]
rc, stdout, stderr = self.module.run_command(args)
primary_data = stdout
args = [ip_path, 'addr', 'show', 'secondary', device]
rc, stdout, stderr = self.module.run_command(args)
secondary_data = stdout
parse_ip_output(primary_data)
parse_ip_output(secondary_data, secondary=True)
# replace : by _ in interface name since they are hard to use in template
new_interfaces = {}
for i in interfaces:
if ':' in i:
new_interfaces[i.replace(':','_')] = interfaces[i]
else:
new_interfaces[i] = interfaces[i]
return new_interfaces, ips
class GenericBsdIfconfigNetwork(Network):
"""
This is a generic BSD subclass of Network using the ifconfig command.
It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
It currently does not define
- default_ipv4 and default_ipv6
- type, mtu and network on interfaces
"""
platform = 'Generic_BSD_Ifconfig'
def __init__(self, module):
Network.__init__(self, module)
def populate(self):
ifconfig_path = module.get_bin_path('ifconfig')
if ifconfig_path is None:
return self.facts
route_path = module.get_bin_path('route')
if route_path is None:
return self.facts
default_ipv4, default_ipv6 = self.get_default_interfaces(route_path)
interfaces, ips = self.get_interfaces_info(ifconfig_path)
self.merge_default_interface(default_ipv4, interfaces, 'ipv4')
self.merge_default_interface(default_ipv6, interfaces, 'ipv6')
self.facts['interfaces'] = interfaces.keys()
for iface in interfaces:
self.facts[iface] = interfaces[iface]
self.facts['default_ipv4'] = default_ipv4
self.facts['default_ipv6'] = default_ipv6
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return self.facts
def get_default_interfaces(self, route_path):
# Use the commands:
# route -n get 8.8.8.8 -> Google public DNS
# route -n get -inet6 2404:6800:400a:800::1012 -> ipv6.google.com
# to find out the default outgoing interface, address, and gateway
command = dict(
v4 = [route_path, '-n', 'get', '8.8.8.8'],
v6 = [route_path, '-n', 'get', '-inet6', '2404:6800:400a:800::1012']
)
interface = dict(v4 = {}, v6 = {})
for v in 'v4', 'v6':
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = module.run_command(command[v])
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
lines = out.split('\n')
for line in lines:
words = line.split()
# Collect output from route command
if len(words) > 1:
if words[0] == 'interface:':
interface[v]['interface'] = words[1]
if words[0] == 'gateway:':
interface[v]['gateway'] = words[1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ifconfig_path):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
# FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a'
# when running the command 'ifconfig'.
# Solaris must explicitly run the command 'ifconfig -a'.
rc, out, err = module.run_command([ifconfig_path, '-a'])
for line in out.split('\n'):
if line:
words = line.split()
if words[0] == 'pass':
continue
elif re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
return interfaces, ips
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
if len(words) >= 5 : # Newer FreeBSD versions
current_if['metric'] = words[3]
current_if['mtu'] = words[5]
else:
current_if['mtu'] = words[3]
return current_if
def parse_options_line(self, words, current_if, ips):
# Mac has options like this...
current_if['options'] = self.get_options(words[0])
def parse_nd6_line(self, words, current_if, ips):
# FreBSD has options like this...
current_if['options'] = self.get_options(words[1])
def parse_ether_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = words[1]
if len(words) > 2:
current_if['media_select'] = words[2]
if len(words) > 3:
current_if['media_type'] = words[3][1:]
if len(words) > 4:
current_if['media_options'] = self.get_options(words[4])
def parse_status_line(self, words, current_if, ips):
current_if['status'] = words[1]
def parse_lladdr_line(self, words, current_if, ips):
current_if['lladdr'] = words[1]
def parse_inet_line(self, words, current_if, ips):
address = {'address': words[1]}
# deal with hex netmask
if re.match('([0-9a-f]){8}', words[3]) and len(words[3]) == 8:
words[3] = '0x' + words[3]
if words[3].startswith('0x'):
address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16)))
else:
# otherwise assume this is a dotted quad
address['netmask'] = words[3]
# calculate the network
address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0]
netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0]
address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
# broadcast may be given or we need to calculate
if len(words) > 5:
address['broadcast'] = words[5]
else:
address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff)))
# add to our list of addresses
if not words[1].startswith('127.'):
ips['all_ipv4_addresses'].append(address['address'])
current_if['ipv4'].append(address)
def parse_inet6_line(self, words, current_if, ips):
address = {'address': words[1]}
if (len(words) >= 4) and (words[2] == 'prefixlen'):
address['prefix'] = words[3]
if (len(words) >= 6) and (words[4] == 'scopeid'):
address['scope'] = words[5]
localhost6 = ['::1', '::1/128', 'fe80::1%lo0']
if address['address'] not in localhost6:
ips['all_ipv6_addresses'].append(address['address'])
current_if['ipv6'].append(address)
def parse_unknown_line(self, words, current_if, ips):
# we are going to ignore unknown lines here - this may be
# a bad idea - but you can override it in your subclass
pass
def get_options(self, option_string):
start = option_string.find('<') + 1
end = option_string.rfind('>')
if (start > 0) and (end > 0) and (end > start + 1):
option_csv = option_string[start:end]
return option_csv.split(',')
else:
return []
def merge_default_interface(self, defaults, interfaces, ip_type):
if not 'interface' in defaults.keys():
return
if not defaults['interface'] in interfaces:
return
ifinfo = interfaces[defaults['interface']]
# copy all the interface values across except addresses
for item in ifinfo.keys():
if item != 'ipv4' and item != 'ipv6':
defaults[item] = ifinfo[item]
if len(ifinfo[ip_type]) > 0:
for item in ifinfo[ip_type][0].keys():
defaults[item] = ifinfo[ip_type][0][item]
class DarwinNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the Mac OS X/Darwin Network Class.
It uses the GenericBsdIfconfigNetwork unchanged
"""
platform = 'Darwin'
# media line is different to the default FreeBSD one
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = 'Unknown' # Mac does not give us this
current_if['media_select'] = words[1]
if len(words) > 2:
current_if['media_type'] = words[2][1:]
if len(words) > 3:
current_if['media_options'] = self.get_options(words[3])
class FreeBSDNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the FreeBSD Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'FreeBSD'
class AIXNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the AIX Network Class.
It uses the GenericBsdIfconfigNetwork unchanged.
"""
platform = 'AIX'
# AIX 'ifconfig -a' does not have three words in the interface line
def get_interfaces_info(self, ifconfig_path):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
rc, out, err = module.run_command([ifconfig_path, '-a'])
for line in out.split('\n'):
if line:
words = line.split()
# only this condition differs from GenericBsdIfconfigNetwork
if re.match('^\w*\d*:', line):
current_if = self.parse_interface_line(words)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
return interfaces, ips
# AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
class OpenBSDNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the OpenBSD Network Class.
It uses the GenericBsdIfconfigNetwork.
"""
platform = 'OpenBSD'
# Return macaddress instead of lladdr
def parse_lladdr_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
class SunOSNetwork(GenericBsdIfconfigNetwork, Network):
"""
This is the SunOS Network Class.
It uses the GenericBsdIfconfigNetwork.
Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface
so these facts have been moved inside the 'ipv4' and 'ipv6' lists.
"""
platform = 'SunOS'
# Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6.
# MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface.
# 'parse_interface_line()' checks for previously seen interfaces before defining
# 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa).
def get_interfaces_info(self, ifconfig_path):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses = [],
all_ipv6_addresses = [],
)
rc, out, err = module.run_command([ifconfig_path, '-a'])
for line in out.split('\n'):
if line:
words = line.split()
if re.match('^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words, current_if, interfaces)
interfaces[ current_if['device'] ] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
# 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the
# ipv4/ipv6 lists which is ugly and hard to read.
# This quick hack merges the dictionaries. Purely cosmetic.
for iface in interfaces:
for v in 'ipv4', 'ipv6':
combined_facts = {}
for facts in interfaces[iface][v]:
combined_facts.update(facts)
if len(combined_facts.keys()) > 0:
interfaces[iface][v] = [combined_facts]
return interfaces, ips
def parse_interface_line(self, words, current_if, interfaces):
device = words[0][0:-1]
if device not in interfaces.keys():
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
else:
current_if = interfaces[device]
flags = self.get_options(words[1])
v = 'ipv4'
if 'IPv6' in flags:
v = 'ipv6'
current_if[v].append({'flags': flags, 'mtu': words[3]})
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
# Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f
# Add leading zero to each octet where needed.
def parse_ether_line(self, words, current_if, ips):
macaddress = ''
for octet in words[1].split(':'):
octet = ('0' + octet)[-2:None]
macaddress += (octet + ':')
current_if['macaddress'] = macaddress[0:-1]
class Virtual(Facts):
"""
This is a generic Virtual subclass of Facts. This should be further
subclassed to implement per platform. If you subclass this,
you should define:
- virtualization_type
- virtualization_role
- container (e.g. solaris zones, freebsd jails, linux containers)
All subclasses MUST define platform.
"""
def __new__(cls, *arguments, **keyword):
subclass = cls
for sc in Virtual.__subclasses__():
if sc.platform == platform.system():
subclass = sc
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
def __init__(self):
Facts.__init__(self)
def populate(self):
return self.facts
class LinuxVirtual(Virtual):
"""
This is a Linux-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'Linux'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
# For more information, check: http://people.redhat.com/~rjones/virt-what/
def get_virtual_facts(self):
if os.path.exists("/proc/xen"):
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
try:
for line in open('/proc/xen/capabilities'):
if "control_d" in line:
self.facts['virtualization_role'] = 'host'
except IOError:
pass
return
if os.path.exists('/proc/vz'):
self.facts['virtualization_type'] = 'openvz'
if os.path.exists('/proc/bc'):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/1/cgroup'):
for line in open('/proc/1/cgroup').readlines():
if re.search('/docker/', line):
self.facts['virtualization_type'] = 'docker'
self.facts['virtualization_role'] = 'guest'
return
if re.search('/lxc/', line):
self.facts['virtualization_type'] = 'lxc'
self.facts['virtualization_role'] = 'guest'
return
product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
if product_name in ['KVM', 'Bochs']:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'RHEV Hypervisor':
self.facts['virtualization_type'] = 'RHEV'
self.facts['virtualization_role'] = 'guest'
return
if product_name == 'VMware Virtual Platform':
self.facts['virtualization_type'] = 'VMware'
self.facts['virtualization_role'] = 'guest'
return
bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
if bios_vendor == 'Xen':
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
return
if bios_vendor == 'innotek GmbH':
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
return
sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
# FIXME: This does also match hyperv
if sys_vendor == 'Microsoft Corporation':
self.facts['virtualization_type'] = 'VirtualPC'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'Parallels Software International Inc.':
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
return
if sys_vendor == 'QEMU':
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/self/status'):
for line in open('/proc/self/status').readlines():
if re.match('^VxID: \d+', line):
self.facts['virtualization_type'] = 'linux_vserver'
if re.match('^VxID: 0', line):
self.facts['virtualization_role'] = 'host'
else:
self.facts['virtualization_role'] = 'guest'
return
if os.path.exists('/proc/cpuinfo'):
for line in open('/proc/cpuinfo').readlines():
if re.match('^model name.*QEMU Virtual CPU', line):
self.facts['virtualization_type'] = 'kvm'
elif re.match('^vendor_id.*User Mode Linux', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^model name.*UML', line):
self.facts['virtualization_type'] = 'uml'
elif re.match('^vendor_id.*PowerVM Lx86', line):
self.facts['virtualization_type'] = 'powervm_lx86'
elif re.match('^vendor_id.*IBM/S390', line):
self.facts['virtualization_type'] = 'PR/SM'
lscpu = module.get_bin_path('lscpu')
if lscpu:
rc, out, err = module.run_command(["lscpu"])
if rc == 0:
for line in out.split("\n"):
data = line.split(":", 1)
key = data[0].strip()
if key == 'Hypervisor':
self.facts['virtualization_type'] = data[1].strip()
else:
self.facts['virtualization_type'] = 'ibm_systemz'
else:
continue
if self.facts['virtualization_type'] == 'PR/SM':
self.facts['virtualization_role'] = 'LPAR'
else:
self.facts['virtualization_role'] = 'guest'
return
# Beware that we can have both kvm and virtualbox running on a single system
if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
modules = []
for line in open("/proc/modules").readlines():
data = line.split(" ", 1)
modules.append(data[0])
if 'kvm' in modules:
self.facts['virtualization_type'] = 'kvm'
self.facts['virtualization_role'] = 'host'
return
if 'vboxdrv' in modules:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'host'
return
# If none of the above matches, return 'NA' for virtualization_type
# and virtualization_role. This allows for proper grouping.
self.facts['virtualization_type'] = 'NA'
self.facts['virtualization_role'] = 'NA'
return
class HPUXVirtual(Virtual):
"""
This is a HP-UX specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
"""
platform = 'HP-UX'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
if os.path.exists('/usr/sbin/vecheck'):
rc, out, err = module.run_command("/usr/sbin/vecheck")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP vPar'
if os.path.exists('/opt/hpvm/bin/hpvminfo'):
rc, out, err = module.run_command("/opt/hpvm/bin/hpvminfo")
if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM vPar'
elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HPVM IVM'
elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
self.facts['virtualization_type'] = 'host'
self.facts['virtualization_role'] = 'HPVM'
if os.path.exists('/usr/sbin/parstatus'):
rc, out, err = module.run_command("/usr/sbin/parstatus")
if rc == 0:
self.facts['virtualization_type'] = 'guest'
self.facts['virtualization_role'] = 'HP nPar'
class SunOSVirtual(Virtual):
"""
This is a SunOS-specific subclass of Virtual. It defines
- virtualization_type
- virtualization_role
- container
"""
platform = 'SunOS'
def __init__(self):
Virtual.__init__(self)
def populate(self):
self.get_virtual_facts()
return self.facts
def get_virtual_facts(self):
rc, out, err = module.run_command("/usr/sbin/prtdiag")
for line in out.split('\n'):
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
if 'Parallels' in line:
self.facts['virtualization_type'] = 'parallels'
self.facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
if 'HVM domU' in line:
self.facts['virtualization_type'] = 'xen'
self.facts['virtualization_role'] = 'guest'
# Check if it's a zone
if os.path.exists("/usr/bin/zonename"):
rc, out, err = module.run_command("/usr/bin/zonename")
if out.rstrip() != "global":
self.facts['container'] = 'zone'
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
if os.path.isdir('/.SUNWnative'):
self.facts['container'] = 'zone'
# If it's a zone check if we can detect if our global zone is itself virtualized.
# Relies on the "guest tools" (e.g. vmware tools) to be installed
if 'container' in self.facts and self.facts['container'] == 'zone':
rc, out, err = module.run_command("/usr/sbin/modinfo")
for line in out.split('\n'):
if 'VMware' in line:
self.facts['virtualization_type'] = 'vmware'
self.facts['virtualization_role'] = 'guest'
if 'VirtualBox' in line:
self.facts['virtualization_type'] = 'virtualbox'
self.facts['virtualization_role'] = 'guest'
def get_file_content(path, default=None):
data = default
if os.path.exists(path) and os.access(path, os.R_OK):
data = open(path).read().strip()
if len(data) == 0:
data = default
return data
def ansible_facts(module):
facts = {}
facts.update(Facts().populate())
facts.update(Hardware().populate())
facts.update(Network(module).populate())
facts.update(Virtual().populate())
return facts
# ===========================================
def get_all_facts(module):
setup_options = dict(module_setup=True)
facts = ansible_facts(module)
for (k, v) in facts.items():
setup_options["ansible_%s" % k.replace('-', '_')] = v
# Look for the path to the facter and ohai binary and set
# the variable to that path.
facter_path = module.get_bin_path('facter')
ohai_path = module.get_bin_path('ohai')
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
if facter_path is not None:
rc, out, err = module.run_command(facter_path + " --json")
facter = True
try:
facter_ds = json.loads(out)
except:
facter = False
if facter:
for (k,v) in facter_ds.items():
setup_options["facter_%s" % k] = v
# ditto for ohai
if ohai_path is not None:
rc, out, err = module.run_command(ohai_path)
ohai = True
try:
ohai_ds = json.loads(out)
except:
ohai = False
if ohai:
for (k,v) in ohai_ds.items():
k2 = "ohai_%s" % k.replace('-', '_')
setup_options[k2] = v
setup_result = { 'ansible_facts': {} }
for (k,v) in setup_options.items():
if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']):
setup_result['ansible_facts'][k] = v
# hack to keep --verbose from showing all the setup module results
setup_result['verbose_override'] = True
return setup_result
| gpl-3.0 |
minxuancao/shogun | examples/undocumented/python_modular/features_string_char_compressed_modular.py | 24 | 3676 | #!/usr/bin/env python
parameter_list = [['features_string_char_compressed_modular.py']]
def features_string_char_compressed_modular (fname):
from modshogun import StringCharFeatures, StringFileCharFeatures, RAWBYTE
from modshogun import UNCOMPRESSED,SNAPPY,LZO,GZIP,BZIP2,LZMA, MSG_DEBUG
from modshogun import DecompressCharString
f=StringFileCharFeatures(fname, RAWBYTE)
#print("original strings", f.get_features())
#uncompressed
f.save_compressed("tmp/foo_uncompressed.str", UNCOMPRESSED, 1)
f2=StringCharFeatures(RAWBYTE);
f2.load_compressed("tmp/foo_uncompressed.str", True)
#print("uncompressed strings", f2.get_features())
#print
# load compressed data and uncompress on load
#snappy - not stable yet?!
#f.save_compressed("tmp/foo_snappy.str", SNAPPY, 9)
#f2=StringCharFeatures(RAWBYTE);
#f2.load_compressed("tmp/foo_snappy.str", True)
#print("snappy strings", f2.get_features())
#print
#lzo
f.save_compressed("tmp/foo_lzo.str", LZO, 9)
f2=StringCharFeatures(RAWBYTE);
f2.load_compressed("tmp/foo_lzo.str", True)
#print("lzo strings", f2.get_features())
#print
##gzip
f.save_compressed("tmp/foo_gzip.str", GZIP, 9)
f2=StringCharFeatures(RAWBYTE);
f2.load_compressed("tmp/foo_gzip.str", True)
#print("gzip strings", f2.get_features())
#print
#bzip2
f.save_compressed("tmp/foo_bzip2.str", BZIP2, 9)
f2=StringCharFeatures(RAWBYTE);
f2.load_compressed("tmp/foo_bzip2.str", True)
#print("bzip2 strings", f2.get_features())
#print
#lzma
f.save_compressed("tmp/foo_lzma.str", LZMA, 9)
f2=StringCharFeatures(RAWBYTE);
f2.load_compressed("tmp/foo_lzma.str", True)
#print("lzma strings", f2.get_features())
#print
# load compressed data and uncompress via preprocessor
f2=StringCharFeatures(RAWBYTE);
f2.load_compressed("tmp/foo_lzo.str", False)
f2.add_preprocessor(DecompressCharString(LZO))
f2.apply_preprocessor()
#print("lzo strings", f2.get_features())
#print
# load compressed data and uncompress on-the-fly via preprocessor
f2=StringCharFeatures(RAWBYTE);
f2.load_compressed("tmp/foo_lzo.str", False)
#f2.io.set_loglevel(MSG_DEBUG)
f2.add_preprocessor(DecompressCharString(LZO))
f2.enable_on_the_fly_preprocessing()
#print("lzo strings", f2.get_features())
#print
#clean up
import os
for f in ['tmp/foo_uncompressed.str', 'tmp/foo_snappy.str', 'tmp/foo_lzo.str', 'tmp/foo_gzip.str',
'tmp/foo_bzip2.str', 'tmp/foo_lzma.str', 'tmp/foo_lzo.str', 'tmp/foo_lzo.str']:
if os.path.exists(f):
os.unlink(f)
##########################################################################################
# some perfectly compressible stuff follows
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
##########################################################################################
if __name__=='__main__':
print('Compressing StringCharFileFeatures')
features_string_char_compressed_modular(*parameter_list[0])
| gpl-3.0 |
jmahler/linux-next | tools/perf/scripts/python/event_analyzing_sample.py | 4719 | 7393 | # event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
| gpl-2.0 |
NullNoname/dolphin | Externals/scons-local/scons-local-2.0.1/SCons/Tool/f90.py | 61 | 2041 | """engine.SCons.Tool.f90
Tool-specific initialization for the generic Posix f90 Fortran compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/f90.py 5134 2010/08/16 23:02:40 bdeegan"
import SCons.Defaults
import SCons.Scanner.Fortran
import SCons.Tool
import SCons.Util
from SCons.Tool.FortranCommon import add_all_to_env, add_f90_to_env
compilers = ['f90']
def generate(env):
add_all_to_env(env)
add_f90_to_env(env)
fc = env.Detect(compilers) or 'f90'
env['F90'] = fc
env['SHF90'] = fc
env['FORTRAN'] = fc
env['SHFORTRAN'] = fc
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
tst2005googlecode/shcov | setup.py | 2 | 1262 | ######################################################################
##
## Copyright (C) 2008, Simon Kagstrom
##
## Filename: setup.py
## Author: Simon Kagstrom <[email protected]>
## Description: Installation script (from Dissy)
##
## $Id:$
##
######################################################################
import sys
sys.path.append(".")
from shcov import config
from distutils.core import setup
setup(name='%s' % (config.PROGRAM_NAME).lower(),
version='%s' % (config.PROGRAM_VERSION),
description="A gcov and lcov coverage test tool for bourne shell / bash scripts",
author="Simon Kagstrom",
url="%s" % (config.PROGRAM_URL),
author_email="[email protected]",
packages = ['shcov'],
scripts = ['scripts/shcov', 'scripts/shlcov'],
data_files = [('share/%s/data' % (config.PROGRAM_NAME.lower()),
['data/amber.png', 'data/gcov.css', 'data/ruby.png',
'data/emerald.png', 'data/glass.png', 'data/snow.png', ]),
('share/doc/%s/' % (config.PROGRAM_NAME.lower()), ['README']),
('share/doc/%s/' % (config.PROGRAM_NAME.lower()), ['COPYING']),
('share/man/man1/', ['shcov.1', 'shlcov.1']),
],
)
| gpl-2.0 |
poffuomo/spark | examples/src/main/python/sql/basic.py | 56 | 6270 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on:init_session$
from pyspark.sql import SparkSession
# $example off:init_session$
# $example on:schema_inferring$
from pyspark.sql import Row
# $example off:schema_inferring$
# $example on:programmatic_schema$
# Import data types
from pyspark.sql.types import *
# $example off:programmatic_schema$
"""
A simple example demonstrating basic Spark SQL features.
Run with:
./bin/spark-submit examples/src/main/python/sql/basic.py
"""
def basic_df_example(spark):
# $example on:create_df$
# spark is an existing SparkSession
df = spark.read.json("examples/src/main/resources/people.json")
# Displays the content of the DataFrame to stdout
df.show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# $example off:create_df$
# $example on:untyped_ops$
# spark, df are from the previous example
# Print the schema in a tree format
df.printSchema()
# root
# |-- age: long (nullable = true)
# |-- name: string (nullable = true)
# Select only the "name" column
df.select("name").show()
# +-------+
# | name|
# +-------+
# |Michael|
# | Andy|
# | Justin|
# +-------+
# Select everybody, but increment the age by 1
df.select(df['name'], df['age'] + 1).show()
# +-------+---------+
# | name|(age + 1)|
# +-------+---------+
# |Michael| null|
# | Andy| 31|
# | Justin| 20|
# +-------+---------+
# Select people older than 21
df.filter(df['age'] > 21).show()
# +---+----+
# |age|name|
# +---+----+
# | 30|Andy|
# +---+----+
# Count people by age
df.groupBy("age").count().show()
# +----+-----+
# | age|count|
# +----+-----+
# | 19| 1|
# |null| 1|
# | 30| 1|
# +----+-----+
# $example off:untyped_ops$
# $example on:run_sql$
# Register the DataFrame as a SQL temporary view
df.createOrReplaceTempView("people")
sqlDF = spark.sql("SELECT * FROM people")
sqlDF.show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# $example off:run_sql$
# $example on:global_temp_view$
# Register the DataFrame as a global temporary view
df.createGlobalTempView("people")
# Global temporary view is tied to a system preserved database `global_temp`
spark.sql("SELECT * FROM global_temp.people").show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# Global temporary view is cross-session
spark.newSession().sql("SELECT * FROM global_temp.people").show()
# +----+-------+
# | age| name|
# +----+-------+
# |null|Michael|
# | 30| Andy|
# | 19| Justin|
# +----+-------+
# $example off:global_temp_view$
def schema_inference_example(spark):
# $example on:schema_inferring$
sc = spark.sparkContext
# Load a text file and convert each line to a Row.
lines = sc.textFile("examples/src/main/resources/people.txt")
parts = lines.map(lambda l: l.split(","))
people = parts.map(lambda p: Row(name=p[0], age=int(p[1])))
# Infer the schema, and register the DataFrame as a table.
schemaPeople = spark.createDataFrame(people)
schemaPeople.createOrReplaceTempView("people")
# SQL can be run over DataFrames that have been registered as a table.
teenagers = spark.sql("SELECT name FROM people WHERE age >= 13 AND age <= 19")
# The results of SQL queries are Dataframe objects.
# rdd returns the content as an :class:`pyspark.RDD` of :class:`Row`.
teenNames = teenagers.rdd.map(lambda p: "Name: " + p.name).collect()
for name in teenNames:
print(name)
# Name: Justin
# $example off:schema_inferring$
def programmatic_schema_example(spark):
# $example on:programmatic_schema$
sc = spark.sparkContext
# Load a text file and convert each line to a Row.
lines = sc.textFile("examples/src/main/resources/people.txt")
parts = lines.map(lambda l: l.split(","))
# Each line is converted to a tuple.
people = parts.map(lambda p: (p[0], p[1].strip()))
# The schema is encoded in a string.
schemaString = "name age"
fields = [StructField(field_name, StringType(), True) for field_name in schemaString.split()]
schema = StructType(fields)
# Apply the schema to the RDD.
schemaPeople = spark.createDataFrame(people, schema)
# Creates a temporary view using the DataFrame
schemaPeople.createOrReplaceTempView("people")
# SQL can be run over DataFrames that have been registered as a table.
results = spark.sql("SELECT name FROM people")
results.show()
# +-------+
# | name|
# +-------+
# |Michael|
# | Andy|
# | Justin|
# +-------+
# $example off:programmatic_schema$
if __name__ == "__main__":
# $example on:init_session$
spark = SparkSession \
.builder \
.appName("Python Spark SQL basic example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
# $example off:init_session$
basic_df_example(spark)
schema_inference_example(spark)
programmatic_schema_example(spark)
spark.stop()
| apache-2.0 |
nwchandler/ansible | lib/ansible/modules/monitoring/zabbix_maintenance.py | 35 | 12164 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Alexander Bulimov <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: zabbix_maintenance
short_description: Create Zabbix maintenance windows
description:
- This module will let you create Zabbix maintenance windows.
version_added: "1.8"
author: "Alexander Bulimov (@abulimov)"
requirements:
- "python >= 2.6"
- zabbix-api
options:
state:
description:
- Create or remove a maintenance window.
required: false
default: present
choices: [ "present", "absent" ]
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
C(url) is an alias for C(server_url).
required: true
default: null
aliases: [ "url" ]
login_user:
description:
- Zabbix user name.
required: true
login_password:
description:
- Zabbix user password.
required: true
http_login_user:
description:
- Basic Auth login
required: false
default: None
version_added: "2.1"
http_login_password:
description:
- Basic Auth password
required: false
default: None
version_added: "2.1"
host_names:
description:
- Hosts to manage maintenance window for.
Separate multiple hosts with commas.
C(host_name) is an alias for C(host_names).
B(Required) option when C(state) is I(present)
and no C(host_groups) specified.
required: false
default: null
aliases: [ "host_name" ]
host_groups:
description:
- Host groups to manage maintenance window for.
Separate multiple groups with commas.
C(host_group) is an alias for C(host_groups).
B(Required) option when C(state) is I(present)
and no C(host_names) specified.
required: false
default: null
aliases: [ "host_group" ]
minutes:
description:
- Length of maintenance window in minutes.
required: false
default: 10
name:
description:
- Unique name of maintenance window.
required: true
desc:
description:
- Short description of maintenance window.
required: true
default: Created by Ansible
collect_data:
description:
- Type of maintenance. With data collection, or without.
required: false
default: "true"
timeout:
description:
- The timeout of API request (seconds).
default: 10
version_added: "2.1"
required: false
notes:
- Useful for setting hosts in maintenance mode before big update,
and removing maintenance window after update.
- Module creates maintenance window from now() to now() + minutes,
so if Zabbix server's time and host's time are not synchronized,
you will get strange results.
- Install required module with 'pip install zabbix-api' command.
- Checks existence only by maintenance name.
'''
EXAMPLES = '''
- name: Create a named maintenance window for host www1 for 90 minutes
zabbix_maintenance:
name: Update of www1
host_name: www1.example.com
state: present
minutes: 90
server_url: https://monitoring.example.com
login_user: ansible
login_password: pAsSwOrD
- name: Create a named maintenance window for host www1 and host groups Office and Dev
zabbix_maintenance:
name: Update of www1
host_name: www1.example.com
host_groups:
- Office
- Dev
state: present
server_url: https://monitoring.example.com
login_user: ansible
login_password: pAsSwOrD
- name: Create a named maintenance window for hosts www1 and db1, without data collection.
zabbix_maintenance:
name: update
host_names:
- www1.example.com
- db1.example.com
state: present
collect_data: False
server_url: https://monitoring.example.com
login_user: ansible
login_password: pAsSwOrD
- name: Remove maintenance window by name
zabbix_maintenance:
name: Test1
state: absent
server_url: https://monitoring.example.com
login_user: ansible
login_password: pAsSwOrD
'''
import datetime
import time
try:
from zabbix_api import ZabbixAPI
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
def create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc):
end_time = start_time + period
try:
zbx.maintenance.create(
{
"groupids": group_ids,
"hostids": host_ids,
"name": name,
"maintenance_type": maintenance_type,
"active_since": str(start_time),
"active_till": str(end_time),
"description": desc,
"timeperiods": [{
"timeperiod_type": "0",
"start_date": str(start_time),
"period": str(period),
}]
}
)
except BaseException as e:
return 1, None, str(e)
return 0, None, None
def get_maintenance_id(zbx, name):
try:
result = zbx.maintenance.get(
{
"filter":
{
"name": name,
}
}
)
except BaseException as e:
return 1, None, str(e)
maintenance_ids = []
for res in result:
maintenance_ids.append(res["maintenanceid"])
return 0, maintenance_ids, None
def delete_maintenance(zbx, maintenance_id):
try:
zbx.maintenance.delete(maintenance_id)
except BaseException as e:
return 1, None, str(e)
return 0, None, None
def get_group_ids(zbx, host_groups):
group_ids = []
for group in host_groups:
try:
result = zbx.hostgroup.get(
{
"output": "extend",
"filter":
{
"name": group
}
}
)
except BaseException as e:
return 1, None, str(e)
if not result:
return 1, None, "Group id for group %s not found" % group
group_ids.append(result[0]["groupid"])
return 0, group_ids, None
def get_host_ids(zbx, host_names):
host_ids = []
for host in host_names:
try:
result = zbx.host.get(
{
"output": "extend",
"filter":
{
"name": host
}
}
)
except BaseException as e:
return 1, None, str(e)
if not result:
return 1, None, "Host id for host %s not found" % host
host_ids.append(result[0]["hostid"])
return 0, host_ids, None
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(required=False, default='present', choices=['present', 'absent']),
server_url=dict(type='str', required=True, default=None, aliases=['url']),
host_names=dict(type='list', required=False, default=None, aliases=['host_name']),
minutes=dict(type='int', required=False, default=10),
host_groups=dict(type='list', required=False, default=None, aliases=['host_group']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
name=dict(type='str', required=True),
desc=dict(type='str', required=False, default="Created by Ansible"),
collect_data=dict(type='bool', required=False, default=True),
timeout=dict(type='int', default=10),
),
supports_check_mode=True,
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing required zabbix-api module (check docs or install with: pip install zabbix-api)")
host_names = module.params['host_names']
host_groups = module.params['host_groups']
state = module.params['state']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
minutes = module.params['minutes']
name = module.params['name']
desc = module.params['desc']
server_url = module.params['server_url']
collect_data = module.params['collect_data']
timeout = module.params['timeout']
if collect_data:
maintenance_type = 0
else:
maintenance_type = 1
try:
zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
zbx.login(login_user, login_password)
except BaseException as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
changed = False
if state == "present":
now = datetime.datetime.now()
start_time = time.mktime(now.timetuple())
period = 60 * int(minutes) # N * 60 seconds
if host_groups:
(rc, group_ids, error) = get_group_ids(zbx, host_groups)
if rc != 0:
module.fail_json(msg="Failed to get group_ids: %s" % error)
else:
group_ids = []
if host_names:
(rc, host_ids, error) = get_host_ids(zbx, host_names)
if rc != 0:
module.fail_json(msg="Failed to get host_ids: %s" % error)
else:
host_ids = []
(rc, maintenance, error) = get_maintenance_id(zbx, name)
if rc != 0:
module.fail_json(msg="Failed to check maintenance %s existence: %s" % (name, error))
if not maintenance:
if not host_names and not host_groups:
module.fail_json(msg="At least one host_name or host_group must be defined for each created maintenance.")
if module.check_mode:
changed = True
else:
(rc, _, error) = create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc)
if rc == 0:
changed = True
else:
module.fail_json(msg="Failed to create maintenance: %s" % error)
if state == "absent":
(rc, maintenance, error) = get_maintenance_id(zbx, name)
if rc != 0:
module.fail_json(msg="Failed to check maintenance %s existence: %s" % (name, error))
if maintenance:
if module.check_mode:
changed = True
else:
(rc, _, error) = delete_maintenance(zbx, maintenance)
if rc == 0:
changed = True
else:
module.fail_json(msg="Failed to remove maintenance: %s" % error)
module.exit_json(changed=changed)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
newmanne/kaggle_clickthrough_rate | hash_to_int.py | 1 | 3078 | import os
from collections import Counter
import numpy as np
import pandas as pd
COLS_TO_TRANLSATE = "site_id,site_domain,site_category,app_id,app_domain,app_category,device_id,device_ip,device_model,C1,C14,C15,C16,C17,C18,C19,C20,C21".split(",")
COLS = "id,click,day,hour,C1,banner_pos,site_id,site_domain,site_category,app_id,app_domain,app_category,device_id,device_ip,device_model,device_type,device_conn_type,C14,C15,C16,C17,C18,C19,C20,C21".split(",")
TRAIN_DATA = "../train.csv"
TEST_DATA = "../test.csv"
TRAIN_MERGED_DATA = '../merged_train.csv'
TEST_MERGED_DATA = "../merged_test.csv"
COUNTERS_FILE = '../counters.txt'
CHUNK_SIZE = 1000000
THRESH = 100
def create_counters():
counters = {}
for col in COLS_TO_TRANLSATE:
counters[col] = Counter()
return counters
def update_counter(path,counters):
df = pd.read_csv(path,chunksize=CHUNK_SIZE,iterator=True)
for chunk in df:
for col in COLS_TO_TRANLSATE:
counters[col].update(chunk.ix[:,col])
print chunk.id.max()
def convert_counts_to_id(counters):
ids = {}
for col in COLS_TO_TRANLSATE:
ids[col] = {}
imax = 0
highest_seen = 0
for i,(val,count) in enumerate(counters[col].most_common()):
if imax == 0 and count <= THRESH:
imax = i
index = i if count > THRESH else imax
highest_seen = max(index, highest_seen)
ids[col][val] = index
print "Col " + col + " highest seen is " + str(highest_seen)
return ids
def write_translated(input_path,output_path,ids,mode="w",start_id=0):
df = pd.read_csv(input_path,chunksize=CHUNK_SIZE,iterator=True)
for i, chunk in enumerate(df):
for col in COLS_TO_TRANLSATE:
chunk.ix[:,col] = chunk.ix[:,col].map(ids[col])
chunk["id"] = np.arange(chunk.shape[0]) + i*CHUNK_SIZE + 1 + start_id
chunk["day"] = chunk["hour"].map(lambda v: int(str(v)[-4:-2]))
chunk["hour"] = chunk["hour"].map(lambda v: int(str(v)[-2:]))
if "click" not in chunk.columns:
chunk["click"] = 0
chunk = chunk.ix[:,COLS]
if i == 0 and mode == "w":
chunk.to_csv(output_path,index=False)
else:
chunk.to_csv(output_path,index=False,mode="a",header=False)
print chunk.id.max()
return chunk.id.max()
def write_counters(file_path, counters):
with open(file_path, 'w') as outfile:
for counter in counters.values():
outfile.write('%s\n' % (counter))
if __name__ == "__main__":
counters = create_counters()
update_counter(TRAIN_DATA,counters)
update_counter(TEST_DATA,counters)
write_counters(COUNTERS_FILE, counters)
ids = convert_counts_to_id(counters)
max_id = write_translated(TRAIN_DATA,TRAIN_MERGED_DATA,ids)
_ = write_translated(TRAIN_DATA,TEST_MERGED_DATA,ids, start_id=max_id)
max_id = write_translated(TEST_DATA,TEST_MERGED_DATA,ids)
_ = write_translated(TEST_DATA,TEST_MERGED_DATA,ids, start_id=max_id) | apache-2.0 |
sharifulgeo/google-appengine-wx-launcher | launcher/addnew_controller.py | 28 | 6957 | #!/usr/bin/env python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Controller (MVC) for the add new (project) dialog.
A Google App Engine Application is called a 'project' internally to
the launcher to prevent confusion. Class App is for the launcher
application itself, and class Project is for an App Engine
Application (a 'project')."""
import fileinput
import os
import shutil
import wx
import addexisting_controller
import launcher
from wxgladegen import project_dialogs
class AddNewController(addexisting_controller.AddExistingController):
"""Controller for an Add New Project dialog.
The controller is responsible for displaying the dialog, filling it
in, and (if not cancelled) reading data back. It will then create a
new project (on disk), and can return a new launcher.Project for it.
This is much like AddExisting, except:
- we can specify the name explicitly
- we actually create the project on disk
"""
# Error string if runtime not happy
_NO_SDK_STRING = """
Cannot create a new project. App Engine SDK not found.
Please install the App Engine SDK, or set its location in the Preferences.
Preferences can be edited from the Edit -> Preferences menu.
"""
def __init__(self):
"""Init the base class, but specify our extended dialog."""
add_new_project_dialog = project_dialogs.AddNewProjectDialog
super(AddNewController, self).__init__(add_new_project_dialog(None))
self._SetDefaults()
def _SetDefaults(self):
"""Set some default values for a new project.
The idea is that a simple YES will work for these default values.
"""
wxsp = wx.StandardPaths.Get()
docdir = wxsp.GetDocumentsDir()
self.SetPath(docdir)
newname = self._NewProjectNameInDirectory(docdir)
self.SetName(newname)
def _NewProjectNameInDirectory(self, dirname):
"""Return a unique name for a project in the directory.
Args:
dirname: parent directory to inspect
Returns:
A unique name for a project (App) in the directory.
The name is NOT fully-qualified!
"""
existing_files = os.listdir(dirname)
newname = 'engineapp' # what's a good default name?
x = 1
while newname in existing_files:
newname = 'engineapp-%d' % x
x += 1
return newname
def _BrowseForDirectory(self, evt):
"""Browse for a parent directory for the app, then set this in the dialog.
Called directly from UI. Override of AddExisting behavior (new message,
don't require it to already exist).
"""
message = 'Pick a parent directory for the new App Engine App'
path = self.GetPath() # parent directory of project
if not os.path.exists(path):
path = ''
dirname = wx.DirSelector(message=message,
defaultPath=path)
if dirname:
self.SetPath(dirname)
def SetName(self, name):
"""Set the project name in the dialog."""
self.dialog.app_name_text_ctrl.SetValue(name)
def GetName(self):
"""Get the project name from the dialog."""
return self.dialog.app_name_text_ctrl.GetValue()
def _SanityCheckName(self, name):
"""Sanity check the name (presumably taken from the dialog).
Args:
name: a project name to check.
Returns:
True if we should make a project from these values.
"""
if not name:
self.FailureMessage('Name missing or empty; cannot make project.',
'Add New Application')
return False
return True
def _SanityCheckPathDoesNotExist(self, path):
"""Make sure path does not exist.
Args:
path: path to check
Returns:
True if path does NOT exist.
"""
if os.path.exists(path):
self.FailureMessage('Name invalid (already exists)',
'Add New Application')
return False
return True
def _NewProjectTemplate(self, preferences=None):
"""Return the new project template directory.
Args:
preferences: the preference object to use to find our App Engine SDK.
If None, a default is chosen.
Returns:
A directory name the new project template.
(Its correctness isn't verified.)
"""
preferences = preferences or launcher.Preferences()
basedir = preferences[launcher.Preferences.PREF_APPENGINE]
if not basedir:
self.FailureMessage(self._NO_SDK_STRING,
'Create new Project');
return
templatedir = os.path.join(basedir, 'new_project_template')
return templatedir
def _CreateProjectOnDisk(self, newpath, name):
"""Using template files, actually create a project on disk.
Assumes the path (a directory) already exists.
Args:
newpath: directory for the project (to be created)
name: name to put in the project's app.yaml
Returns:
True if successful.
"""
new_project_template = self._NewProjectTemplate()
try:
shutil.copytree(new_project_template, newpath)
except OSError:
self.FailureMessage(('Cannot copy template files from %s to %s' %
(new_project_template, newpath)),
'Create New Project')
return False
# Set the project name in the app.yaml.
# module fileinput magically sets stdout when inplace=1 (see the
# docs), which makes the logic of these two statements hard to
# figure out just by looking. Also note trailing comma in print
# command so we don't add a 2nd newline. Quite perlrific!
for line in fileinput.input(os.path.join(newpath, 'app.yaml'), inplace=1):
print line.replace('new-project-template', name),
return True
def Project(self):
"""Return a project created from interaction with this dialog.
Returns:
a launcher.Project, or None.
Side effect:
Actually creates the project on disk from the template files.
"""
if self._dialog_return_value != wx.ID_OK:
return None
path = self.GetPath() # parent directory of project
port = self.GetPort()
name = self.GetName()
if not (self._SanityCheckPort(port) and
self._SanityCheckPath(path, check_contents=False)):
return None
newpath = os.path.join(path, name)
if not self._SanityCheckPathDoesNotExist(newpath):
return None
if not self._CreateProjectOnDisk(newpath, name):
return None
return launcher.Project(newpath, port)
| apache-2.0 |
weboo/kernel-nexus-s | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 802 | 2710 | # Core.py - Python extension for perf trace, core functions
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
| gpl-2.0 |
UO-CAES/paparazzi | sw/tools/parrot/bebop.py | 26 | 8380 | #!/usr/bin/env python
#
# Copyright (C) 2012-2014 The Paparazzi Team
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with paparazzi; see the file COPYING. If not, see
# <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import re
import argparse
import os
from time import sleep
import parrot_utils
# Read from config.ini TODO
def read_from_config(name, config=''):
if config == '':
config = parrot_utils.execute_command('cat /data/config.ini')
search = re.search(name + '[^=]+=[\r\n\t ]([^\r\n\t ]+)', config)
if search is None:
return ''
else:
return search.group(1)
# Write to config TODO
def write_to_config(name, value):
if read_from_config(name) == '':
parrot_utils.execute_command('echo "' + name + ' = ' + value + '\" >> /data/config.ini')
else:
parrot_utils.execute_command('sed -i "s/\(' + name + ' *= *\).*/\\1' + value + '/g" /data/config.ini')
def bebop_status():
#config_ini = parrot_utils.execute_command(tn, 'cat /data/config.ini')
print('======================== Bebop Status ========================')
print('Version:\t\t' + parrot_utils.check_version(tn, ''))
# Request the filesystem status
print('\n=================== Filesystem Status =======================')
print(parrot_utils.check_filesystem(tn))
# Parse the arguments
parser = argparse.ArgumentParser(description='Bebop python helper. Use bebop.py -h for help')
parser.add_argument('--host', metavar='HOST', default='192.168.42.1',
help='the ip address of bebop')
subparsers = parser.add_subparsers(title='Command to execute', metavar='command', dest='command')
# All the subcommands and arguments
subparsers.add_parser('status', help='Request the status of the Bebop')
subparsers.add_parser('reboot', help='Reboot the Bebop')
subparser_upload_and_run = subparsers.add_parser('upload_file_and_run', help='Upload and run software (for instance the Paparazzi autopilot)')
subparser_upload_and_run.add_argument('file', help='Filename of an executable')
subparser_upload_and_run.add_argument('folder', help='Destination subfolder (raw for Paparazzi autopilot)')
subparser_upload = subparsers.add_parser('upload_file', help='Upload a file to the Bebop')
subparser_upload.add_argument('file', help='Filename')
subparser_upload.add_argument('folder', help='Destination subfolder (base destination folder is /data/ftp)')
subparser_download = subparsers.add_parser('download_file', help='Download a file from the Bebop')
subparser_download.add_argument('file', help='Filename (with the path on the local machine)')
subparser_download.add_argument('folder', help='Remote subfolder (base folder is /data/ftp)')
subparser_download_dir = subparsers.add_parser('download_dir', help='Download all files from a folder from the Bebop')
subparser_download_dir.add_argument('dest', help='destination folder (on the local machine)')
subparser_download_dir.add_argument('folder', help='Remote subfolder (base folder is /data/ftp)')
subparser_rm_dir = subparsers.add_parser('rm_dir', help='Remove a directory and all its files from the Bebop')
subparser_rm_dir.add_argument('folder', help='Remote subfolder (base folder is /data/ftp)')
subparser_insmod = subparsers.add_parser('insmod', help='Upload and insert kernel module')
subparser_insmod.add_argument('file', help='Filename of *.ko kernel module')
subparser_start = subparsers.add_parser('start', help='Start a program on the Bebop')
subparser_start.add_argument('program', help='the program to start')
subparser_kill = subparsers.add_parser('kill', help='Kill a program on the Bebop')
subparser_kill.add_argument('program', help='the program to kill')
args = parser.parse_args()
# Connect with telnet and ftp
tn, ftp = parrot_utils.connect(args.host)
# Check the Bebop status
if args.command == 'status':
print("Connected to Bebop at " + args.host)
bebop_status()
# Reboot the drone
elif args.command == 'reboot':
parrot_utils.reboot(tn)
print('The Bebop is rebooting...')
# Kill a program
elif args.command == 'kill':
parrot_utils.execute_command(tn, 'killall -9 ' + args.program)
print('Program "' + args.program + '" is now killed')
# Start a program
elif args.command == 'start':
parrot_utils.execute_command(tn, args.start + ' &')
print('Program "' + args.start + '" is now started')
elif args.command == 'insmod':
modfile = parrot_utils.split_into_path_and_file(args.file)
print('Uploading \'' + modfile[1])
parrot_utils.uploadfile(ftp, modfile[1], file(args.file, "rb"))
print(parrot_utils.execute_command(tn, "insmod /data/ftp/" + modfile[1]))
elif args.command == 'upload_file_and_run':
# Split filename and path
f = parrot_utils.split_into_path_and_file(args.file)
print("Kill running " + f[1] + " and make folder " + args.folder)
parrot_utils.execute_command(tn,"killall -9 " + f[1])
sleep(1)
parrot_utils.execute_command(tn, "mkdir -p /data/ftp/" + args.folder)
print('Uploading \'' + f[1] + "\' from " + f[0] + " to " + args.folder)
parrot_utils.uploadfile(ftp, args.folder + "/" + f[1], file(args.file, "rb"))
sleep(0.5)
parrot_utils.execute_command(tn, "chmod 777 /data/ftp/" + args.folder + "/" + f[1])
parrot_utils.execute_command(tn, "/data/ftp/" + args.folder + "/" + f[1] + " > /dev/null 2>&1 &")
print("#pragma message: Upload and Start of ap.elf to Bebop Succes!")
elif args.command == 'upload_file':
# Split filename and path
f = parrot_utils.split_into_path_and_file(args.file)
parrot_utils.execute_command(tn, "mkdir -p /data/ftp/" + args.folder)
print('Uploading \'' + f[1] + "\' from " + f[0] + " to /data/ftp/" + args.folder)
parrot_utils.uploadfile(ftp, args.folder + "/" + f[1], file(args.file, "rb"))
print("#pragma message: Upload of " + f[1] + " to Bebop Succes!")
elif args.command == 'download_file':
# Split filename and path
f = parrot_utils.split_into_path_and_file(args.file)
# Open file and download
try:
fd = open(args.file, 'wb')
print('Downloading \'' + f[1] + "\' from " + args.folder + " to " + f[0])
ftp.retrbinary("RETR " + args.folder + "/" + f[1], fd.write)
print("#pragma message: Download of " + f[1] + " from Bebop Succes!")
except IOError:
print("#pragma message: Fail to open file " + args.file)
except:
os.remove(args.file)
print("#pragma message: Download of " + f[1] + " from Bebop Failed!")
else:
fd.close()
elif args.command == 'download_dir':
# Split filename and path
files = parrot_utils.execute_command(tn, 'find /data/ftp/' + args.folder + ' -name \'*.*\'')
# Create dest dir if needed
if not os.path.exists(args.dest):
os.mkdir(args.dest)
# Open file and download
for f in files.split():
file_name = parrot_utils.split_into_path_and_file(f)
file_source = args.folder + '/' + file_name[1]
file_dest = args.dest + '/' + file_name[1]
try:
fd = open(file_dest, 'wb')
print('Downloading \'' + f + "\' to " + file_dest)
ftp.retrbinary("RETR " + file_source, fd.write)
except IOError:
print("#pragma message: Fail to open file " + file_dest)
except:
os.remove(file_dest)
print("#pragma message: Download of " + f + " from Bebop Failed!")
else:
fd.close()
print("#pragma message: End download of folder " + args.folder + " from Bebop")
elif args.command == 'rm_dir':
# Split filename and path
print("Deleting folder /data/ftp/" + args.folder + " from Bebop")
print(parrot_utils.execute_command(tn, 'rm -r /data/ftp/' + args.folder))
# Close the telnet and python script
parrot_utils.disconnect(tn, ftp)
exit(0)
| gpl-2.0 |
cesargtz/YecoraOdoo | addons/l10n_fr/report/compute_resultant_report.py | 374 | 4004 | # -*- coding: utf-8 -*-
#
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
import base_report
from openerp.osv import osv
class cdr(base_report.base_report):
def __init__(self, cr, uid, name, context):
super(cdr, self).__init__(cr, uid, name, context)
def set_context(self, objects, data, ids):
super(cdr, self).set_context(objects, data, ids)
self._load('cdr', self.localcontext['data']['form'])
self._set_variable(
'ct1',
self.localcontext['cdrc1']+self.localcontext['cdrc2']+self.localcontext['cdrc3']+
self.localcontext['cdrc4']+self.localcontext['cdrc5']+self.localcontext['cdrc6']+
self.localcontext['cdrc7']+self.localcontext['cdrc8']+self.localcontext['cdrc9']+
self.localcontext['cdrc10']+self.localcontext['cdrc11']+self.localcontext['cdrc12']+
self.localcontext['cdrc13']+self.localcontext['cdrc14']+self.localcontext['cdrc15']
)
self._set_variable(
'ct3',
self.localcontext['cdrc17']+self.localcontext['cdrc18']+self.localcontext['cdrc19']+
self.localcontext['cdrc20']
)
self._set_variable(
'ct4',
self.localcontext['cdrc21']+self.localcontext['cdrc22']+self.localcontext['cdrc23']
)
self._set_variable(
'charges',
self.localcontext['ct1']+self.localcontext['cdrc16']+self.localcontext['ct3']+
self.localcontext['ct4']+self.localcontext['cdrc24']+self.localcontext['cdrc25']
)
self._set_variable(
'pta',
self.localcontext['cdrp1']+self.localcontext['cdrp2']
)
self._set_variable(
'ptb',
self.localcontext['cdrp3']+self.localcontext['cdrp4']+self.localcontext['cdrp5']+
self.localcontext['cdrp6']+self.localcontext['cdrp7']
)
self._set_variable(
'pt1',
self.localcontext['pta']+self.localcontext['ptb']
)
self._set_variable(
'pt3',
self.localcontext['cdrp9']+self.localcontext['cdrp10']+self.localcontext['cdrp11']+
self.localcontext['cdrp12']+self.localcontext['cdrp13']+self.localcontext['cdrp14']
)
self._set_variable(
'pt4',
self.localcontext['cdrp15']+self.localcontext['cdrp16']+self.localcontext['cdrp17']
)
self._set_variable(
'produits',
self.localcontext['pt1']+self.localcontext['cdrp8']+self.localcontext['pt3']+
self.localcontext['pt4']
)
class wrapped_report_resultat(osv.AbstractModel):
_name = 'report.l10n_fr.report_l10nfrresultat'
_inherit = 'report.abstract_report'
_template = 'l10n_fr.report_l10nfrresultat'
_wrapped_report_class = cdr
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
eenchev/idea-note-taking-app | env/lib/python2.7/site-packages/gunicorn/_compat.py | 35 | 8719 | import sys
from gunicorn import six
PY26 = (sys.version_info[:2] == (2, 6))
PY33 = (sys.version_info >= (3, 3))
def _check_if_pyc(fname):
"""Return True if the extension is .pyc, False if .py
and None if otherwise"""
from imp import find_module
from os.path import realpath, dirname, basename, splitext
# Normalize the file-path for the find_module()
filepath = realpath(fname)
dirpath = dirname(filepath)
module_name = splitext(basename(filepath))[0]
# Validate and fetch
try:
fileobj, fullpath, (_, _, pytype) = find_module(module_name, [dirpath])
except ImportError:
raise IOError("Cannot find config file. "
"Path maybe incorrect! : {0}".format(filepath))
return pytype, fileobj, fullpath
def _get_codeobj(pyfile):
""" Returns the code object, given a python file """
from imp import PY_COMPILED, PY_SOURCE
result, fileobj, fullpath = _check_if_pyc(pyfile)
# WARNING:
# fp.read() can blowup if the module is extremely large file.
# Lookout for overflow errors.
try:
data = fileobj.read()
finally:
fileobj.close()
# This is a .pyc file. Treat accordingly.
if result is PY_COMPILED:
# .pyc format is as follows:
# 0 - 4 bytes: Magic number, which changes with each create of .pyc file.
# First 2 bytes change with each marshal of .pyc file. Last 2 bytes is "\r\n".
# 4 - 8 bytes: Datetime value, when the .py was last changed.
# 8 - EOF: Marshalled code object data.
# So to get code object, just read the 8th byte onwards till EOF, and
# UN-marshal it.
import marshal
code_obj = marshal.loads(data[8:])
elif result is PY_SOURCE:
# This is a .py file.
code_obj = compile(data, fullpath, 'exec')
else:
# Unsupported extension
raise Exception("Input file is unknown format: {0}".format(fullpath))
# Return code object
return code_obj
if six.PY3:
def execfile_(fname, *args):
if fname.endswith(".pyc"):
code = _get_codeobj(fname)
else:
code = compile(open(fname, 'rb').read(), fname, 'exec')
return six.exec_(code, *args)
def bytes_to_str(b):
if isinstance(b, six.text_type):
return b
return str(b, 'latin1')
import urllib.parse
def unquote_to_wsgi_str(string):
return _unquote_to_bytes(string).decode('latin-1')
_unquote_to_bytes = urllib.parse.unquote_to_bytes
else:
def execfile_(fname, *args):
""" Overriding PY2 execfile() implementation to support .pyc files """
if fname.endswith(".pyc"):
return six.exec_(_get_codeobj(fname), *args)
return execfile(fname, *args)
def bytes_to_str(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return s
import urllib
unquote_to_wsgi_str = urllib.unquote
# The following code adapted from trollius.py33_exceptions
def _wrap_error(exc, mapping, key):
if key not in mapping:
return
new_err_cls = mapping[key]
new_err = new_err_cls(*exc.args)
# raise a new exception with the original traceback
six.reraise(new_err_cls, new_err,
exc.__traceback__ if hasattr(exc, '__traceback__') else sys.exc_info()[2])
if PY33:
import builtins
BlockingIOError = builtins.BlockingIOError
BrokenPipeError = builtins.BrokenPipeError
ChildProcessError = builtins.ChildProcessError
ConnectionRefusedError = builtins.ConnectionRefusedError
ConnectionResetError = builtins.ConnectionResetError
InterruptedError = builtins.InterruptedError
ConnectionAbortedError = builtins.ConnectionAbortedError
PermissionError = builtins.PermissionError
FileNotFoundError = builtins.FileNotFoundError
ProcessLookupError = builtins.ProcessLookupError
def wrap_error(func, *args, **kw):
return func(*args, **kw)
else:
import errno
import select
import socket
class BlockingIOError(OSError):
pass
class BrokenPipeError(OSError):
pass
class ChildProcessError(OSError):
pass
class ConnectionRefusedError(OSError):
pass
class InterruptedError(OSError):
pass
class ConnectionResetError(OSError):
pass
class ConnectionAbortedError(OSError):
pass
class PermissionError(OSError):
pass
class FileNotFoundError(OSError):
pass
class ProcessLookupError(OSError):
pass
_MAP_ERRNO = {
errno.EACCES: PermissionError,
errno.EAGAIN: BlockingIOError,
errno.EALREADY: BlockingIOError,
errno.ECHILD: ChildProcessError,
errno.ECONNABORTED: ConnectionAbortedError,
errno.ECONNREFUSED: ConnectionRefusedError,
errno.ECONNRESET: ConnectionResetError,
errno.EINPROGRESS: BlockingIOError,
errno.EINTR: InterruptedError,
errno.ENOENT: FileNotFoundError,
errno.EPERM: PermissionError,
errno.EPIPE: BrokenPipeError,
errno.ESHUTDOWN: BrokenPipeError,
errno.EWOULDBLOCK: BlockingIOError,
errno.ESRCH: ProcessLookupError,
}
def wrap_error(func, *args, **kw):
"""
Wrap socket.error, IOError, OSError, select.error to raise new specialized
exceptions of Python 3.3 like InterruptedError (PEP 3151).
"""
try:
return func(*args, **kw)
except (socket.error, IOError, OSError) as exc:
if hasattr(exc, 'winerror'):
_wrap_error(exc, _MAP_ERRNO, exc.winerror)
# _MAP_ERRNO does not contain all Windows errors.
# For some errors like "file not found", exc.errno should
# be used (ex: ENOENT).
_wrap_error(exc, _MAP_ERRNO, exc.errno)
raise
except select.error as exc:
if exc.args:
_wrap_error(exc, _MAP_ERRNO, exc.args[0])
raise
if PY26:
from urlparse import (
_parse_cache, MAX_CACHE_SIZE, clear_cache, _splitnetloc, SplitResult,
scheme_chars,
)
def urlsplit(url, scheme='', allow_fragments=True):
"""Parse a URL into 5 components:
<scheme>://<netloc>/<path>?<query>#<fragment>
Return a 5-tuple: (scheme, netloc, path, query, fragment).
Note that we don't break the components up in smaller bits
(e.g. netloc is a single string) and we don't expand % escapes."""
allow_fragments = bool(allow_fragments)
key = url, scheme, allow_fragments, type(url), type(scheme)
cached = _parse_cache.get(key, None)
if cached:
return cached
if len(_parse_cache) >= MAX_CACHE_SIZE: # avoid runaway growth
clear_cache()
netloc = query = fragment = ''
i = url.find(':')
if i > 0:
if url[:i] == 'http': # optimize the common case
scheme = url[:i].lower()
url = url[i+1:]
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
for c in url[:i]:
if c not in scheme_chars:
break
else:
# make sure "url" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i+1:]
if not rest or any(c not in '0123456789' for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == '//':
netloc, url = _splitnetloc(url, 2)
if (('[' in netloc and ']' not in netloc) or
(']' in netloc and '[' not in netloc)):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and '#' in url:
url, fragment = url.split('#', 1)
if '?' in url:
url, query = url.split('?', 1)
v = SplitResult(scheme, netloc, url, query, fragment)
_parse_cache[key] = v
return v
else:
from gunicorn.six.moves.urllib.parse import urlsplit
| mit |
ClustyROM/Galaxy_Note | tools/perf/scripts/python/sctop.py | 895 | 1936 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import thread
import time
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf trace -s syscall-counts.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40d %10d\n" % (id, val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
lafayette/JBTT | framework/python/Lib/encodings/iso8859_7.py | 593 | 13100 | """ Python Character Mapping Codec iso8859_7 generated from 'MAPPINGS/ISO8859/8859-7.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-7',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u2018' # 0xA1 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xA2 -> RIGHT SINGLE QUOTATION MARK
u'\xa3' # 0xA3 -> POUND SIGN
u'\u20ac' # 0xA4 -> EURO SIGN
u'\u20af' # 0xA5 -> DRACHMA SIGN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\xa8' # 0xA8 -> DIAERESIS
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u037a' # 0xAA -> GREEK YPOGEGRAMMENI
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\ufffe'
u'\u2015' # 0xAF -> HORIZONTAL BAR
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\xb2' # 0xB2 -> SUPERSCRIPT TWO
u'\xb3' # 0xB3 -> SUPERSCRIPT THREE
u'\u0384' # 0xB4 -> GREEK TONOS
u'\u0385' # 0xB5 -> GREEK DIALYTIKA TONOS
u'\u0386' # 0xB6 -> GREEK CAPITAL LETTER ALPHA WITH TONOS
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u0388' # 0xB8 -> GREEK CAPITAL LETTER EPSILON WITH TONOS
u'\u0389' # 0xB9 -> GREEK CAPITAL LETTER ETA WITH TONOS
u'\u038a' # 0xBA -> GREEK CAPITAL LETTER IOTA WITH TONOS
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u038c' # 0xBC -> GREEK CAPITAL LETTER OMICRON WITH TONOS
u'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
u'\u038e' # 0xBE -> GREEK CAPITAL LETTER UPSILON WITH TONOS
u'\u038f' # 0xBF -> GREEK CAPITAL LETTER OMEGA WITH TONOS
u'\u0390' # 0xC0 -> GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
u'\u0391' # 0xC1 -> GREEK CAPITAL LETTER ALPHA
u'\u0392' # 0xC2 -> GREEK CAPITAL LETTER BETA
u'\u0393' # 0xC3 -> GREEK CAPITAL LETTER GAMMA
u'\u0394' # 0xC4 -> GREEK CAPITAL LETTER DELTA
u'\u0395' # 0xC5 -> GREEK CAPITAL LETTER EPSILON
u'\u0396' # 0xC6 -> GREEK CAPITAL LETTER ZETA
u'\u0397' # 0xC7 -> GREEK CAPITAL LETTER ETA
u'\u0398' # 0xC8 -> GREEK CAPITAL LETTER THETA
u'\u0399' # 0xC9 -> GREEK CAPITAL LETTER IOTA
u'\u039a' # 0xCA -> GREEK CAPITAL LETTER KAPPA
u'\u039b' # 0xCB -> GREEK CAPITAL LETTER LAMDA
u'\u039c' # 0xCC -> GREEK CAPITAL LETTER MU
u'\u039d' # 0xCD -> GREEK CAPITAL LETTER NU
u'\u039e' # 0xCE -> GREEK CAPITAL LETTER XI
u'\u039f' # 0xCF -> GREEK CAPITAL LETTER OMICRON
u'\u03a0' # 0xD0 -> GREEK CAPITAL LETTER PI
u'\u03a1' # 0xD1 -> GREEK CAPITAL LETTER RHO
u'\ufffe'
u'\u03a3' # 0xD3 -> GREEK CAPITAL LETTER SIGMA
u'\u03a4' # 0xD4 -> GREEK CAPITAL LETTER TAU
u'\u03a5' # 0xD5 -> GREEK CAPITAL LETTER UPSILON
u'\u03a6' # 0xD6 -> GREEK CAPITAL LETTER PHI
u'\u03a7' # 0xD7 -> GREEK CAPITAL LETTER CHI
u'\u03a8' # 0xD8 -> GREEK CAPITAL LETTER PSI
u'\u03a9' # 0xD9 -> GREEK CAPITAL LETTER OMEGA
u'\u03aa' # 0xDA -> GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
u'\u03ab' # 0xDB -> GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
u'\u03ac' # 0xDC -> GREEK SMALL LETTER ALPHA WITH TONOS
u'\u03ad' # 0xDD -> GREEK SMALL LETTER EPSILON WITH TONOS
u'\u03ae' # 0xDE -> GREEK SMALL LETTER ETA WITH TONOS
u'\u03af' # 0xDF -> GREEK SMALL LETTER IOTA WITH TONOS
u'\u03b0' # 0xE0 -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
u'\u03b1' # 0xE1 -> GREEK SMALL LETTER ALPHA
u'\u03b2' # 0xE2 -> GREEK SMALL LETTER BETA
u'\u03b3' # 0xE3 -> GREEK SMALL LETTER GAMMA
u'\u03b4' # 0xE4 -> GREEK SMALL LETTER DELTA
u'\u03b5' # 0xE5 -> GREEK SMALL LETTER EPSILON
u'\u03b6' # 0xE6 -> GREEK SMALL LETTER ZETA
u'\u03b7' # 0xE7 -> GREEK SMALL LETTER ETA
u'\u03b8' # 0xE8 -> GREEK SMALL LETTER THETA
u'\u03b9' # 0xE9 -> GREEK SMALL LETTER IOTA
u'\u03ba' # 0xEA -> GREEK SMALL LETTER KAPPA
u'\u03bb' # 0xEB -> GREEK SMALL LETTER LAMDA
u'\u03bc' # 0xEC -> GREEK SMALL LETTER MU
u'\u03bd' # 0xED -> GREEK SMALL LETTER NU
u'\u03be' # 0xEE -> GREEK SMALL LETTER XI
u'\u03bf' # 0xEF -> GREEK SMALL LETTER OMICRON
u'\u03c0' # 0xF0 -> GREEK SMALL LETTER PI
u'\u03c1' # 0xF1 -> GREEK SMALL LETTER RHO
u'\u03c2' # 0xF2 -> GREEK SMALL LETTER FINAL SIGMA
u'\u03c3' # 0xF3 -> GREEK SMALL LETTER SIGMA
u'\u03c4' # 0xF4 -> GREEK SMALL LETTER TAU
u'\u03c5' # 0xF5 -> GREEK SMALL LETTER UPSILON
u'\u03c6' # 0xF6 -> GREEK SMALL LETTER PHI
u'\u03c7' # 0xF7 -> GREEK SMALL LETTER CHI
u'\u03c8' # 0xF8 -> GREEK SMALL LETTER PSI
u'\u03c9' # 0xF9 -> GREEK SMALL LETTER OMEGA
u'\u03ca' # 0xFA -> GREEK SMALL LETTER IOTA WITH DIALYTIKA
u'\u03cb' # 0xFB -> GREEK SMALL LETTER UPSILON WITH DIALYTIKA
u'\u03cc' # 0xFC -> GREEK SMALL LETTER OMICRON WITH TONOS
u'\u03cd' # 0xFD -> GREEK SMALL LETTER UPSILON WITH TONOS
u'\u03ce' # 0xFE -> GREEK SMALL LETTER OMEGA WITH TONOS
u'\ufffe'
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
xin3liang/platform_external_chromium_org | chrome/common/extensions/docs/server2/path_canonicalizer.py | 16 | 4879 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
import posixpath
from future import Future
from path_util import SplitParent
from special_paths import SITE_VERIFICATION_FILE
def _Normalize(file_name, splittext=False):
normalized = file_name
if splittext:
normalized = posixpath.splitext(file_name)[0]
normalized = normalized.replace('.', '').replace('-', '').replace('_', '')
return normalized.lower()
def _CommonNormalizedPrefix(first_file, second_file):
return posixpath.commonprefix((_Normalize(first_file),
_Normalize(second_file)))
class PathCanonicalizer(object):
'''Transforms paths into their canonical forms. Since the docserver has had
many incarnations - e.g. there didn't use to be apps/ - there may be old
paths lying around the webs. We try to redirect those to where they are now.
'''
def __init__(self,
file_system,
object_store_creator,
strip_extensions):
# |strip_extensions| is a list of file extensions (e.g. .html) that should
# be stripped for a path's canonical form.
self._cache = object_store_creator.Create(
PathCanonicalizer, category=file_system.GetIdentity())
self._file_system = file_system
self._strip_extensions = strip_extensions
def _LoadCache(self):
cached_future = self._cache.GetMulti(('canonical_paths',
'simplified_paths_map'))
def resolve():
# |canonical_paths| is the pre-calculated set of canonical paths.
# |simplified_paths_map| is a lazily populated mapping of simplified file
# names to a list of full paths that contain them. For example,
# - browseraction: [extensions/browserAction.html]
# - storage: [apps/storage.html, extensions/storage.html]
cached = cached_future.Get()
canonical_paths, simplified_paths_map = (
cached.get('canonical_paths'), cached.get('simplified_paths_map'))
if canonical_paths is None:
assert simplified_paths_map is None
canonical_paths = set()
simplified_paths_map = defaultdict(list)
for base, dirs, files in self._file_system.Walk(''):
for path in dirs + files:
path_without_ext, ext = posixpath.splitext(path)
canonical_path = posixpath.join(base, path_without_ext)
if (ext not in self._strip_extensions or
path == SITE_VERIFICATION_FILE):
canonical_path += ext
canonical_paths.add(canonical_path)
simplified_paths_map[_Normalize(path, splittext=True)].append(
canonical_path)
# Store |simplified_paths_map| sorted. Ties in length are broken by
# taking the shortest, lexicographically smallest path.
for path_list in simplified_paths_map.itervalues():
path_list.sort(key=lambda p: (len(p), p))
self._cache.SetMulti({
'canonical_paths': canonical_paths,
'simplified_paths_map': simplified_paths_map,
})
else:
assert simplified_paths_map is not None
return canonical_paths, simplified_paths_map
return Future(callback=resolve)
def Canonicalize(self, path):
'''Returns the canonical path for |path|.
'''
canonical_paths, simplified_paths_map = self._LoadCache().Get()
# Path may already be the canonical path.
if path in canonical_paths:
return path
# Path not found. Our single heuristic: find |base| in the directory
# structure with the longest common prefix of |path|.
_, base = SplitParent(path)
# Paths with a non-extension dot separator lose information in
# _SimplifyFileName, so we try paths both with and without the dot to
# maximize the possibility of finding the right path.
potential_paths = (
simplified_paths_map.get(_Normalize(base), []) +
simplified_paths_map.get(_Normalize(base, splittext=True), []))
if potential_paths == []:
# There is no file with anything close to that name.
return path
# The most likely canonical file is the one with the longest common prefix
# with |path|. This is slightly weaker than it could be; |path| is
# compared without symbols, not the simplified form of |path|,
# which may matter.
max_prefix = potential_paths[0]
max_prefix_length = len(_CommonNormalizedPrefix(max_prefix, path))
for path_for_file in potential_paths[1:]:
prefix_length = len(_CommonNormalizedPrefix(path_for_file, path))
if prefix_length > max_prefix_length:
max_prefix, max_prefix_length = path_for_file, prefix_length
return max_prefix
def Cron(self):
return self._LoadCache()
| bsd-3-clause |
briceburg/airflow | airflow/executors/base_executor.py | 30 | 3817 | from builtins import range
from builtins import object
import logging
from airflow.utils import State
from airflow.configuration import conf
PARALLELISM = conf.getint('core', 'PARALLELISM')
class BaseExecutor(object):
def __init__(self, parallelism=PARALLELISM):
"""
Class to derive in order to interface with executor-type systems
like Celery, Mesos, Yarn and the likes.
:param parallelism: how many jobs should run at one time. Set to
``0`` for infinity
:type parallelism: int
"""
self.parallelism = parallelism
self.queued_tasks = {}
self.running = {}
self.event_buffer = {}
def start(self): # pragma: no cover
"""
Executors may need to get things started. For example LocalExecutor
starts N workers.
"""
pass
def queue_command(self, key, command, priority=1, queue=None):
if key not in self.queued_tasks and key not in self.running:
logging.info("Adding to queue: " + command)
self.queued_tasks[key] = (command, priority, queue)
def queue_task_instance(
self, task_instance, mark_success=False, pickle_id=None,
force=False, ignore_dependencies=False, task_start_date=None):
command = task_instance.command(
local=True,
mark_success=mark_success,
force=force,
ignore_dependencies=ignore_dependencies,
task_start_date=task_start_date,
pickle_id=pickle_id)
self.queue_command(
task_instance.key,
command,
priority=task_instance.task.priority_weight_total,
queue=task_instance.task.queue)
def sync(self):
"""
Sync will get called periodically by the heartbeat method.
Executors should override this to perform gather statuses.
"""
pass
def heartbeat(self):
# Calling child class sync method
logging.debug("Calling the {} sync method".format(self.__class__))
self.sync()
# Triggering new jobs
if not self.parallelism:
open_slots = len(self.queued_tasks)
else:
open_slots = self.parallelism - len(self.running)
logging.debug("{} running task instances".format(len(self.running)))
logging.debug("{} in queue".format(len(self.queued_tasks)))
logging.debug("{} open slots".format(open_slots))
sorted_queue = sorted(
[(k, v) for k, v in self.queued_tasks.items()],
key=lambda x: x[1][1],
reverse=True)
for i in range(min((open_slots, len(self.queued_tasks)))):
key, (command, priority, queue) = sorted_queue.pop(0)
self.running[key] = command
del self.queued_tasks[key]
self.execute_async(key, command=command, queue=queue)
def change_state(self, key, state):
del self.running[key]
self.event_buffer[key] = state
def fail(self, key):
self.change_state(key, State.FAILED)
def success(self, key):
self.change_state(key, State.SUCCESS)
def get_event_buffer(self):
"""
Returns and flush the event buffer
"""
d = self.event_buffer
self.event_buffer = {}
return d
def execute_async(self, key, command, queue=None): # pragma: no cover
"""
This method will execute the command asynchronously.
"""
raise NotImplementedError()
def end(self): # pragma: no cover
"""
This method is called when the caller is done submitting job and is
wants to wait synchronously for the job submitted previously to be
all done.
"""
raise NotImplementedError()
| apache-2.0 |
alxgu/ansible | lib/ansible/modules/cloud/hcloud/hcloud_floating_ip_facts.py | 3 | 4929 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Hetzner Cloud GmbH <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: hcloud_floating_ip_facts
short_description: Gather facts about the Hetzner Cloud Floating IPs.
version_added: "2.8"
description:
- Gather facts about your Hetzner Cloud Floating IPs.
author:
- Lukas Kaemmerling (@lkaemmerling)
options:
id:
description:
- The ID of the Floating IP you want to get.
type: int
label_selector:
description:
- The label selector for the Floating IP you want to get.
type: str
extends_documentation_fragment: hcloud
"""
EXAMPLES = """
- name: Gather hcloud Floating ip facts
local_action:
module: hcloud_floating_ip_facts
- name: Print the gathered facts
debug:
var: ansible_facts.hcloud_floating_ip_facts
"""
RETURN = """
hcloud_floating_ip_facts:
description: The Floating ip facts as list
returned: always
type: complex
contains:
id:
description: Numeric identifier of the Floating IP
returned: always
type: int
sample: 1937415
description:
description: Description of the Floating IP
returned: always
type: str
sample: Falkenstein DC 8
ip:
description: IP address of the Floating IP
returned: always
type: str
sample: 131.232.99.1
type:
description: Type of the Floating IP
returned: always
type: str
sample: ipv4
server:
description: Name of the server where the Floating IP is assigned to.
returned: always
type: str
sample: my-server
home_location:
description: Location the Floating IP was created in
returned: always
type: str
sample: fsn1
labels:
description: User-defined labels (key-value pairs)
returned: always
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.hcloud import Hcloud
try:
from hcloud import APIException
except ImportError:
pass
class AnsibleHcloudFloatingIPFacts(Hcloud):
def __init__(self, module):
Hcloud.__init__(self, module, "hcloud_floating_ip_facts")
self.hcloud_floating_ip_facts = None
def _prepare_result(self):
tmp = []
for floating_ip in self.hcloud_floating_ip_facts:
if floating_ip is not None:
server_name = None
if floating_ip.server is not None:
server_name = floating_ip.server.name
tmp.append({
"id": to_native(floating_ip.id),
"description": to_native(floating_ip.description),
"ip": to_native(floating_ip.ip),
"type": to_native(floating_ip.type),
"server": to_native(server_name),
"home_location": to_native(floating_ip.home_location.name),
"labels": floating_ip.labels,
})
return tmp
def get_floating_ips(self):
try:
if self.module.params.get("id") is not None:
self.hcloud_floating_ip_facts = [self.client.floating_ips.get_by_id(
self.module.params.get("id")
)]
elif self.module.params.get("label_selector") is not None:
self.hcloud_floating_ip_facts = self.client.floating_ips.get_all(
label_selector=self.module.params.get("label_selector"))
else:
self.hcloud_floating_ip_facts = self.client.floating_ips.get_all()
except APIException as e:
self.module.fail_json(msg=e.message)
@staticmethod
def define_module():
return AnsibleModule(
argument_spec=dict(
id={"type": "int"},
label_selector={"type": "str"},
**Hcloud.base_module_arguments()
),
supports_check_mode=True,
)
def main():
module = AnsibleHcloudFloatingIPFacts.define_module()
hcloud = AnsibleHcloudFloatingIPFacts(module)
hcloud.get_floating_ips()
result = hcloud.get_result()
ansible_facts = {
'hcloud_floating_ip_facts': result['hcloud_floating_ip_facts']
}
module.exit_json(ansible_facts=ansible_facts)
if __name__ == "__main__":
main()
| gpl-3.0 |
CollinsIchigo/hdx_2 | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/gb2312freq.py | 3132 | 36011 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# GB2312 most frequently used character table
#
# Char to FreqOrder table , from hz6763
# 512 --> 0.79 -- 0.79
# 1024 --> 0.92 -- 0.13
# 2048 --> 0.98 -- 0.06
# 6768 --> 1.00 -- 0.02
#
# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
# Random Distribution Ration = 512 / (3755 - 512) = 0.157
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
GB2312_TABLE_SIZE = 3760
GB2312CharToFreqOrder = (
1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, # last 512
#Everything below is of no interest for detection purpose
5508,6484,3900,3414,3974,4441,4024,3537,4037,5628,5099,3633,6485,3148,6486,3636,
5509,3257,5510,5973,5445,5872,4941,4403,3174,4627,5873,6276,2286,4230,5446,5874,
5122,6102,6103,4162,5447,5123,5323,4849,6277,3980,3851,5066,4246,5774,5067,6278,
3001,2807,5695,3346,5775,5974,5158,5448,6487,5975,5976,5776,3598,6279,5696,4806,
4211,4154,6280,6488,6489,6490,6281,4212,5037,3374,4171,6491,4562,4807,4722,4827,
5977,6104,4532,4079,5159,5324,5160,4404,3858,5359,5875,3975,4288,4610,3486,4512,
5325,3893,5360,6282,6283,5560,2522,4231,5978,5186,5449,2569,3878,6284,5401,3578,
4415,6285,4656,5124,5979,2506,4247,4449,3219,3417,4334,4969,4329,6492,4576,4828,
4172,4416,4829,5402,6286,3927,3852,5361,4369,4830,4477,4867,5876,4173,6493,6105,
4657,6287,6106,5877,5450,6494,4155,4868,5451,3700,5629,4384,6288,6289,5878,3189,
4881,6107,6290,6495,4513,6496,4692,4515,4723,5100,3356,6497,6291,3810,4080,5561,
3570,4430,5980,6498,4355,5697,6499,4724,6108,6109,3764,4050,5038,5879,4093,3226,
6292,5068,5217,4693,3342,5630,3504,4831,4377,4466,4309,5698,4431,5777,6293,5778,
4272,3706,6110,5326,3752,4676,5327,4273,5403,4767,5631,6500,5699,5880,3475,5039,
6294,5562,5125,4348,4301,4482,4068,5126,4593,5700,3380,3462,5981,5563,3824,5404,
4970,5511,3825,4738,6295,6501,5452,4516,6111,5881,5564,6502,6296,5982,6503,4213,
4163,3454,6504,6112,4009,4450,6113,4658,6297,6114,3035,6505,6115,3995,4904,4739,
4563,4942,4110,5040,3661,3928,5362,3674,6506,5292,3612,4791,5565,4149,5983,5328,
5259,5021,4725,4577,4564,4517,4364,6298,5405,4578,5260,4594,4156,4157,5453,3592,
3491,6507,5127,5512,4709,4922,5984,5701,4726,4289,6508,4015,6116,5128,4628,3424,
4241,5779,6299,4905,6509,6510,5454,5702,5780,6300,4365,4923,3971,6511,5161,3270,
3158,5985,4100, 867,5129,5703,6117,5363,3695,3301,5513,4467,6118,6512,5455,4232,
4242,4629,6513,3959,4478,6514,5514,5329,5986,4850,5162,5566,3846,4694,6119,5456,
4869,5781,3779,6301,5704,5987,5515,4710,6302,5882,6120,4392,5364,5705,6515,6121,
6516,6517,3736,5988,5457,5989,4695,2457,5883,4551,5782,6303,6304,6305,5130,4971,
6122,5163,6123,4870,3263,5365,3150,4871,6518,6306,5783,5069,5706,3513,3498,4409,
5330,5632,5366,5458,5459,3991,5990,4502,3324,5991,5784,3696,4518,5633,4119,6519,
4630,5634,4417,5707,4832,5992,3418,6124,5993,5567,4768,5218,6520,4595,3458,5367,
6125,5635,6126,4202,6521,4740,4924,6307,3981,4069,4385,6308,3883,2675,4051,3834,
4302,4483,5568,5994,4972,4101,5368,6309,5164,5884,3922,6127,6522,6523,5261,5460,
5187,4164,5219,3538,5516,4111,3524,5995,6310,6311,5369,3181,3386,2484,5188,3464,
5569,3627,5708,6524,5406,5165,4677,4492,6312,4872,4851,5885,4468,5996,6313,5709,
5710,6128,2470,5886,6314,5293,4882,5785,3325,5461,5101,6129,5711,5786,6525,4906,
6526,6527,4418,5887,5712,4808,2907,3701,5713,5888,6528,3765,5636,5331,6529,6530,
3593,5889,3637,4943,3692,5714,5787,4925,6315,6130,5462,4405,6131,6132,6316,5262,
6531,6532,5715,3859,5716,5070,4696,5102,3929,5788,3987,4792,5997,6533,6534,3920,
4809,5000,5998,6535,2974,5370,6317,5189,5263,5717,3826,6536,3953,5001,4883,3190,
5463,5890,4973,5999,4741,6133,6134,3607,5570,6000,4711,3362,3630,4552,5041,6318,
6001,2950,2953,5637,4646,5371,4944,6002,2044,4120,3429,6319,6537,5103,4833,6538,
6539,4884,4647,3884,6003,6004,4758,3835,5220,5789,4565,5407,6540,6135,5294,4697,
4852,6320,6321,3206,4907,6541,6322,4945,6542,6136,6543,6323,6005,4631,3519,6544,
5891,6545,5464,3784,5221,6546,5571,4659,6547,6324,6137,5190,6548,3853,6549,4016,
4834,3954,6138,5332,3827,4017,3210,3546,4469,5408,5718,3505,4648,5790,5131,5638,
5791,5465,4727,4318,6325,6326,5792,4553,4010,4698,3439,4974,3638,4335,3085,6006,
5104,5042,5166,5892,5572,6327,4356,4519,5222,5573,5333,5793,5043,6550,5639,5071,
4503,6328,6139,6551,6140,3914,3901,5372,6007,5640,4728,4793,3976,3836,4885,6552,
4127,6553,4451,4102,5002,6554,3686,5105,6555,5191,5072,5295,4611,5794,5296,6556,
5893,5264,5894,4975,5466,5265,4699,4976,4370,4056,3492,5044,4886,6557,5795,4432,
4769,4357,5467,3940,4660,4290,6141,4484,4770,4661,3992,6329,4025,4662,5022,4632,
4835,4070,5297,4663,4596,5574,5132,5409,5895,6142,4504,5192,4664,5796,5896,3885,
5575,5797,5023,4810,5798,3732,5223,4712,5298,4084,5334,5468,6143,4052,4053,4336,
4977,4794,6558,5335,4908,5576,5224,4233,5024,4128,5469,5225,4873,6008,5045,4729,
4742,4633,3675,4597,6559,5897,5133,5577,5003,5641,5719,6330,6560,3017,2382,3854,
4406,4811,6331,4393,3964,4946,6561,2420,3722,6562,4926,4378,3247,1736,4442,6332,
5134,6333,5226,3996,2918,5470,4319,4003,4598,4743,4744,4485,3785,3902,5167,5004,
5373,4394,5898,6144,4874,1793,3997,6334,4085,4214,5106,5642,4909,5799,6009,4419,
4189,3330,5899,4165,4420,5299,5720,5227,3347,6145,4081,6335,2876,3930,6146,3293,
3786,3910,3998,5900,5300,5578,2840,6563,5901,5579,6147,3531,5374,6564,6565,5580,
4759,5375,6566,6148,3559,5643,6336,6010,5517,6337,6338,5721,5902,3873,6011,6339,
6567,5518,3868,3649,5722,6568,4771,4947,6569,6149,4812,6570,2853,5471,6340,6341,
5644,4795,6342,6012,5723,6343,5724,6013,4349,6344,3160,6150,5193,4599,4514,4493,
5168,4320,6345,4927,3666,4745,5169,5903,5005,4928,6346,5725,6014,4730,4203,5046,
4948,3395,5170,6015,4150,6016,5726,5519,6347,5047,3550,6151,6348,4197,4310,5904,
6571,5581,2965,6152,4978,3960,4291,5135,6572,5301,5727,4129,4026,5905,4853,5728,
5472,6153,6349,4533,2700,4505,5336,4678,3583,5073,2994,4486,3043,4554,5520,6350,
6017,5800,4487,6351,3931,4103,5376,6352,4011,4321,4311,4190,5136,6018,3988,3233,
4350,5906,5645,4198,6573,5107,3432,4191,3435,5582,6574,4139,5410,6353,5411,3944,
5583,5074,3198,6575,6354,4358,6576,5302,4600,5584,5194,5412,6577,6578,5585,5413,
5303,4248,5414,3879,4433,6579,4479,5025,4854,5415,6355,4760,4772,3683,2978,4700,
3797,4452,3965,3932,3721,4910,5801,6580,5195,3551,5907,3221,3471,3029,6019,3999,
5908,5909,5266,5267,3444,3023,3828,3170,4796,5646,4979,4259,6356,5647,5337,3694,
6357,5648,5338,4520,4322,5802,3031,3759,4071,6020,5586,4836,4386,5048,6581,3571,
4679,4174,4949,6154,4813,3787,3402,3822,3958,3215,3552,5268,4387,3933,4950,4359,
6021,5910,5075,3579,6358,4234,4566,5521,6359,3613,5049,6022,5911,3375,3702,3178,
4911,5339,4521,6582,6583,4395,3087,3811,5377,6023,6360,6155,4027,5171,5649,4421,
4249,2804,6584,2270,6585,4000,4235,3045,6156,5137,5729,4140,4312,3886,6361,4330,
6157,4215,6158,3500,3676,4929,4331,3713,4930,5912,4265,3776,3368,5587,4470,4855,
3038,4980,3631,6159,6160,4132,4680,6161,6362,3923,4379,5588,4255,6586,4121,6587,
6363,4649,6364,3288,4773,4774,6162,6024,6365,3543,6588,4274,3107,3737,5050,5803,
4797,4522,5589,5051,5730,3714,4887,5378,4001,4523,6163,5026,5522,4701,4175,2791,
3760,6589,5473,4224,4133,3847,4814,4815,4775,3259,5416,6590,2738,6164,6025,5304,
3733,5076,5650,4816,5590,6591,6165,6592,3934,5269,6593,3396,5340,6594,5804,3445,
3602,4042,4488,5731,5732,3525,5591,4601,5196,6166,6026,5172,3642,4612,3202,4506,
4798,6366,3818,5108,4303,5138,5139,4776,3332,4304,2915,3415,4434,5077,5109,4856,
2879,5305,4817,6595,5913,3104,3144,3903,4634,5341,3133,5110,5651,5805,6167,4057,
5592,2945,4371,5593,6596,3474,4182,6367,6597,6168,4507,4279,6598,2822,6599,4777,
4713,5594,3829,6169,3887,5417,6170,3653,5474,6368,4216,2971,5228,3790,4579,6369,
5733,6600,6601,4951,4746,4555,6602,5418,5475,6027,3400,4665,5806,6171,4799,6028,
5052,6172,3343,4800,4747,5006,6370,4556,4217,5476,4396,5229,5379,5477,3839,5914,
5652,5807,4714,3068,4635,5808,6173,5342,4192,5078,5419,5523,5734,6174,4557,6175,
4602,6371,6176,6603,5809,6372,5735,4260,3869,5111,5230,6029,5112,6177,3126,4681,
5524,5915,2706,3563,4748,3130,6178,4018,5525,6604,6605,5478,4012,4837,6606,4534,
4193,5810,4857,3615,5479,6030,4082,3697,3539,4086,5270,3662,4508,4931,5916,4912,
5811,5027,3888,6607,4397,3527,3302,3798,2775,2921,2637,3966,4122,4388,4028,4054,
1633,4858,5079,3024,5007,3982,3412,5736,6608,3426,3236,5595,3030,6179,3427,3336,
3279,3110,6373,3874,3039,5080,5917,5140,4489,3119,6374,5812,3405,4494,6031,4666,
4141,6180,4166,6032,5813,4981,6609,5081,4422,4982,4112,3915,5653,3296,3983,6375,
4266,4410,5654,6610,6181,3436,5082,6611,5380,6033,3819,5596,4535,5231,5306,5113,
6612,4952,5918,4275,3113,6613,6376,6182,6183,5814,3073,4731,4838,5008,3831,6614,
4888,3090,3848,4280,5526,5232,3014,5655,5009,5737,5420,5527,6615,5815,5343,5173,
5381,4818,6616,3151,4953,6617,5738,2796,3204,4360,2989,4281,5739,5174,5421,5197,
3132,5141,3849,5142,5528,5083,3799,3904,4839,5480,2880,4495,3448,6377,6184,5271,
5919,3771,3193,6034,6035,5920,5010,6036,5597,6037,6378,6038,3106,5422,6618,5423,
5424,4142,6619,4889,5084,4890,4313,5740,6620,3437,5175,5307,5816,4199,5198,5529,
5817,5199,5656,4913,5028,5344,3850,6185,2955,5272,5011,5818,4567,4580,5029,5921,
3616,5233,6621,6622,6186,4176,6039,6379,6380,3352,5200,5273,2908,5598,5234,3837,
5308,6623,6624,5819,4496,4323,5309,5201,6625,6626,4983,3194,3838,4167,5530,5922,
5274,6381,6382,3860,3861,5599,3333,4292,4509,6383,3553,5481,5820,5531,4778,6187,
3955,3956,4324,4389,4218,3945,4325,3397,2681,5923,4779,5085,4019,5482,4891,5382,
5383,6040,4682,3425,5275,4094,6627,5310,3015,5483,5657,4398,5924,3168,4819,6628,
5925,6629,5532,4932,4613,6041,6630,4636,6384,4780,4204,5658,4423,5821,3989,4683,
5822,6385,4954,6631,5345,6188,5425,5012,5384,3894,6386,4490,4104,6632,5741,5053,
6633,5823,5926,5659,5660,5927,6634,5235,5742,5824,4840,4933,4820,6387,4859,5928,
4955,6388,4143,3584,5825,5346,5013,6635,5661,6389,5014,5484,5743,4337,5176,5662,
6390,2836,6391,3268,6392,6636,6042,5236,6637,4158,6638,5744,5663,4471,5347,3663,
4123,5143,4293,3895,6639,6640,5311,5929,5826,3800,6189,6393,6190,5664,5348,3554,
3594,4749,4603,6641,5385,4801,6043,5827,4183,6642,5312,5426,4761,6394,5665,6191,
4715,2669,6643,6644,5533,3185,5427,5086,5930,5931,5386,6192,6044,6645,4781,4013,
5745,4282,4435,5534,4390,4267,6045,5746,4984,6046,2743,6193,3501,4087,5485,5932,
5428,4184,4095,5747,4061,5054,3058,3862,5933,5600,6646,5144,3618,6395,3131,5055,
5313,6396,4650,4956,3855,6194,3896,5202,4985,4029,4225,6195,6647,5828,5486,5829,
3589,3002,6648,6397,4782,5276,6649,6196,6650,4105,3803,4043,5237,5830,6398,4096,
3643,6399,3528,6651,4453,3315,4637,6652,3984,6197,5535,3182,3339,6653,3096,2660,
6400,6654,3449,5934,4250,4236,6047,6401,5831,6655,5487,3753,4062,5832,6198,6199,
6656,3766,6657,3403,4667,6048,6658,4338,2897,5833,3880,2797,3780,4326,6659,5748,
5015,6660,5387,4351,5601,4411,6661,3654,4424,5935,4339,4072,5277,4568,5536,6402,
6662,5238,6663,5349,5203,6200,5204,6201,5145,4536,5016,5056,4762,5834,4399,4957,
6202,6403,5666,5749,6664,4340,6665,5936,5177,5667,6666,6667,3459,4668,6404,6668,
6669,4543,6203,6670,4276,6405,4480,5537,6671,4614,5205,5668,6672,3348,2193,4763,
6406,6204,5937,5602,4177,5669,3419,6673,4020,6205,4443,4569,5388,3715,3639,6407,
6049,4058,6206,6674,5938,4544,6050,4185,4294,4841,4651,4615,5488,6207,6408,6051,
5178,3241,3509,5835,6208,4958,5836,4341,5489,5278,6209,2823,5538,5350,5206,5429,
6675,4638,4875,4073,3516,4684,4914,4860,5939,5603,5389,6052,5057,3237,5490,3791,
6676,6409,6677,4821,4915,4106,5351,5058,4243,5539,4244,5604,4842,4916,5239,3028,
3716,5837,5114,5605,5390,5940,5430,6210,4332,6678,5540,4732,3667,3840,6053,4305,
3408,5670,5541,6410,2744,5240,5750,6679,3234,5606,6680,5607,5671,3608,4283,4159,
4400,5352,4783,6681,6411,6682,4491,4802,6211,6412,5941,6413,6414,5542,5751,6683,
4669,3734,5942,6684,6415,5943,5059,3328,4670,4144,4268,6685,6686,6687,6688,4372,
3603,6689,5944,5491,4373,3440,6416,5543,4784,4822,5608,3792,4616,5838,5672,3514,
5391,6417,4892,6690,4639,6691,6054,5673,5839,6055,6692,6056,5392,6212,4038,5544,
5674,4497,6057,6693,5840,4284,5675,4021,4545,5609,6418,4454,6419,6213,4113,4472,
5314,3738,5087,5279,4074,5610,4959,4063,3179,4750,6058,6420,6214,3476,4498,4716,
5431,4960,4685,6215,5241,6694,6421,6216,6695,5841,5945,6422,3748,5946,5179,3905,
5752,5545,5947,4374,6217,4455,6423,4412,6218,4803,5353,6696,3832,5280,6219,4327,
4702,6220,6221,6059,4652,5432,6424,3749,4751,6425,5753,4986,5393,4917,5948,5030,
5754,4861,4733,6426,4703,6697,6222,4671,5949,4546,4961,5180,6223,5031,3316,5281,
6698,4862,4295,4934,5207,3644,6427,5842,5950,6428,6429,4570,5843,5282,6430,6224,
5088,3239,6060,6699,5844,5755,6061,6431,2701,5546,6432,5115,5676,4039,3993,3327,
4752,4425,5315,6433,3941,6434,5677,4617,4604,3074,4581,6225,5433,6435,6226,6062,
4823,5756,5116,6227,3717,5678,4717,5845,6436,5679,5846,6063,5847,6064,3977,3354,
6437,3863,5117,6228,5547,5394,4499,4524,6229,4605,6230,4306,4500,6700,5951,6065,
3693,5952,5089,4366,4918,6701,6231,5548,6232,6702,6438,4704,5434,6703,6704,5953,
4168,6705,5680,3420,6706,5242,4407,6066,3812,5757,5090,5954,4672,4525,3481,5681,
4618,5395,5354,5316,5955,6439,4962,6707,4526,6440,3465,4673,6067,6441,5682,6708,
5435,5492,5758,5683,4619,4571,4674,4804,4893,4686,5493,4753,6233,6068,4269,6442,
6234,5032,4705,5146,5243,5208,5848,6235,6443,4963,5033,4640,4226,6236,5849,3387,
6444,6445,4436,4437,5850,4843,5494,4785,4894,6709,4361,6710,5091,5956,3331,6237,
4987,5549,6069,6711,4342,3517,4473,5317,6070,6712,6071,4706,6446,5017,5355,6713,
6714,4988,5436,6447,4734,5759,6715,4735,4547,4456,4754,6448,5851,6449,6450,3547,
5852,5318,6451,6452,5092,4205,6716,6238,4620,4219,5611,6239,6072,4481,5760,5957,
5958,4059,6240,6453,4227,4537,6241,5761,4030,4186,5244,5209,3761,4457,4876,3337,
5495,5181,6242,5959,5319,5612,5684,5853,3493,5854,6073,4169,5613,5147,4895,6074,
5210,6717,5182,6718,3830,6243,2798,3841,6075,6244,5855,5614,3604,4606,5496,5685,
5118,5356,6719,6454,5960,5357,5961,6720,4145,3935,4621,5119,5962,4261,6721,6455,
4786,5963,4375,4582,6245,6246,6247,6076,5437,4877,5856,3376,4380,6248,4160,6722,
5148,6456,5211,6457,6723,4718,6458,6724,6249,5358,4044,3297,6459,6250,5857,5615,
5497,5245,6460,5498,6725,6251,6252,5550,3793,5499,2959,5396,6461,6462,4572,5093,
5500,5964,3806,4146,6463,4426,5762,5858,6077,6253,4755,3967,4220,5965,6254,4989,
5501,6464,4352,6726,6078,4764,2290,5246,3906,5438,5283,3767,4964,2861,5763,5094,
6255,6256,4622,5616,5859,5860,4707,6727,4285,4708,4824,5617,6257,5551,4787,5212,
4965,4935,4687,6465,6728,6466,5686,6079,3494,4413,2995,5247,5966,5618,6729,5967,
5764,5765,5687,5502,6730,6731,6080,5397,6467,4990,6258,6732,4538,5060,5619,6733,
4719,5688,5439,5018,5149,5284,5503,6734,6081,4607,6259,5120,3645,5861,4583,6260,
4584,4675,5620,4098,5440,6261,4863,2379,3306,4585,5552,5689,4586,5285,6735,4864,
6736,5286,6082,6737,4623,3010,4788,4381,4558,5621,4587,4896,3698,3161,5248,4353,
4045,6262,3754,5183,4588,6738,6263,6739,6740,5622,3936,6741,6468,6742,6264,5095,
6469,4991,5968,6743,4992,6744,6083,4897,6745,4256,5766,4307,3108,3968,4444,5287,
3889,4343,6084,4510,6085,4559,6086,4898,5969,6746,5623,5061,4919,5249,5250,5504,
5441,6265,5320,4878,3242,5862,5251,3428,6087,6747,4237,5624,5442,6266,5553,4539,
6748,2585,3533,5398,4262,6088,5150,4736,4438,6089,6267,5505,4966,6749,6268,6750,
6269,5288,5554,3650,6090,6091,4624,6092,5690,6751,5863,4270,5691,4277,5555,5864,
6752,5692,4720,4865,6470,5151,4688,4825,6753,3094,6754,6471,3235,4653,6755,5213,
5399,6756,3201,4589,5865,4967,6472,5866,6473,5019,3016,6757,5321,4756,3957,4573,
6093,4993,5767,4721,6474,6758,5625,6759,4458,6475,6270,6760,5556,4994,5214,5252,
6271,3875,5768,6094,5034,5506,4376,5769,6761,2120,6476,5253,5770,6762,5771,5970,
3990,5971,5557,5558,5772,6477,6095,2787,4641,5972,5121,6096,6097,6272,6763,3703,
5867,5507,6273,4206,6274,4789,6098,6764,3619,3646,3833,3804,2394,3788,4936,3978,
4866,4899,6099,6100,5559,6478,6765,3599,5868,6101,5869,5870,6275,6766,4527,6767)
# flake8: noqa
| mit |
mottosso/deplish | deplish/util.py | 1 | 10218 | #
# Depends
# Copyright (C) 2014 by Andrew Gardner & Jonas Unger. All rights reserved.
# BSD license (LICENSE.txt for details).
#
import os
import re
import imp
import sys
import glob
import inspect
import node
"""
A utility module for the Depends software. Much of the functionality relates
to plugin loading, but string manipulation, software restarting, and file
sequence manipulation are all present as well.
"""
###############################################################################
## Utility
###############################################################################
def allClassChildren(inputClass):
"""
Returns a list of all a class' children and its childrens' children, using
a while loop as its recursion method.
"""
subclasses = set()
work = [inputClass]
while work:
parent = work.pop()
for child in parent.__subclasses__():
if child not in subclasses:
subclasses.add(child)
work.append(child)
return list(subclasses)
def classTypeNamedFromModule(typeString, moduleName):
"""
Creates a node of a given type (string) from a loaded module specified by name.
"""
moduleMembers = inspect.getmembers(globals()[moduleName])
classIndex = [i for i, t in enumerate(moduleMembers) if t[0] == typeString][0]
defaultConstructedNode = (moduleMembers[classIndex][1])()
return defaultConstructedNode
def allClassesOfInheritedTypeFromDir(fromDir, classType):
"""
Given a directory on-disk, dig through each .py module, looking for classes
that inherit from the given classType. Return a dictionary with class
names as keys and the class objects as values.
"""
returnDict = dict()
fileList = glob.glob(os.path.join(fromDir, "*.py"))
for filename in fileList:
basename = os.path.basename(filename)
basenameWithoutExtension = basename[:-3]
try:
foo = imp.load_source(basenameWithoutExtension, filename)
except Exception, err:
print "Module '%s' raised the following exception when trying to load." % (basenameWithoutExtension)
print ' "%s"' % (str(err))
print "Skipping..."
continue
for x in inspect.getmembers(foo):
name = x[0]
data = x[1]
if type(data) is type and data in allClassChildren(classType):
returnDict[name] = data
return returnDict
def namedFunctionFromPluginFile(filename, functionName):
"""
Load a Python module specified by its filename and return a function
matching the given function name.
"""
if not os.path.exists(filename):
raise RuntimeError("Plugin file %s does not exist." % filename)
try:
basename = os.path.basename(filename)
basenameWithoutExtension = basename[:-3]
module = imp.load_source(basenameWithoutExtension, filename)
except Exception, err:
print "Module '%s' raised the following exception when trying to load." % (basenameWithoutExtension)
print ' "%s"' % (str(err))
print "Plugin is not loaded."
return None
for x in inspect.getmembers(module):
name = x[0]
function = x[1]
if name == functionName:
return function
return None
def dagSnapshotDiff(snapshotLeft, snapshotRight):
"""
A function that detects differences in the "important" parts of a
DAG snapshot. Returns a tuple containing a list of modified nodes
and a list of modified edges).
"""
modifiedNodes = list()
if snapshotLeft['NODES'] != snapshotRight['NODES']:
for node in snapshotRight['NODES']:
if node not in snapshotLeft['NODES']:
modifiedNodes.append(node['NAME'])
for node in snapshotLeft['NODES']:
if node not in snapshotRight['NODES']:
modifiedNodes.append(node['NAME'])
modifiedEdges = list()
if snapshotLeft['EDGES'] != snapshotRight['EDGES']:
for edge in snapshotRight['EDGES']:
if edge not in snapshotLeft['EDGES']:
modifiedEdges.append((edge['FROM'], edge['TO']))
for edge in snapshotLeft['EDGES']:
if edge not in snapshotRight['EDGES']:
modifiedEdges.append((edge['FROM'], edge['TO']))
return (modifiedNodes, modifiedEdges)
def restartProgram(newArgs):
"""
Restarts the current program.
"""
python = sys.executable
os.execl(python, python, *newArgs)
def nextFilenameVersion(filename):
"""
Given an existing filename, return a string representing the next 'version' of that
filename. All versions are presumed to be 3-zero padded, and if the version doesn't
yet exist, one will be added.
"""
(path, baseName) = os.path.split(filename)
splitBase = baseName.split('.')
# VersionIndex identifies which segment of the split string [should] contains a version number
versionIndex = -1
if len(splitBase) > 1:
versionIndex = -2
# Ignore all trailing ".[#+]."s
for i in range(len(splitBase)):
minusIndex = -(i+1)
if all(p == '#' for p in splitBase[minusIndex]):
versionIndex = minusIndex-1
# If there are still #s or _s hanging onto the end of the versionIndex string, remove them for now
poundCount = 0
if splitBase[versionIndex].find('#') != -1:
poundCount = len(splitBase[versionIndex]) - splitBase[versionIndex].find('#')
splitBase[versionIndex] = splitBase[versionIndex][:-poundCount]
trailingUnderscores = len(splitBase[versionIndex]) - len(splitBase[versionIndex].rstrip('_'))
if trailingUnderscores:
splitBase[versionIndex] = splitBase[versionIndex][:-trailingUnderscores]
# Match all ending digits and strip them off
versionNumber = 0
numberString = '000'
matchObj = re.match(r'.*?(\d+)$', splitBase[versionIndex])
if matchObj:
numberString = matchObj.group(1)
versionNumber = int(numberString)
splitBase[versionIndex] = splitBase[versionIndex][:-len(numberString)]
# Increment and attach the number string to the end again
versionNumber += 1
splitBase[versionIndex] += "%s" % (str(versionNumber).zfill(len(numberString)))
if poundCount:
splitBase[versionIndex] += '_'
splitBase[versionIndex] += "_"*(trailingUnderscores-1)
splitBase[versionIndex] += "#"*poundCount
newBase = '.'.join(splitBase)
return os.path.join(path, newBase)
def generateUniqueNameSimiarToExisting(prefix, existingNames):
"""
Given a name prefix known to exist in a list of existing names, figure out
a new name that doesn't exist in the existing list yet is similar to the
rest. This is primarily accomplished by appending a 2-zero-padded number
to the end of the prefix.
"""
nameIndices = list()
for en in existingNames:
numberLop = en[len(prefix):]
nameIndices.append(int(numberLop))
nameIndices.sort()
if not nameIndices or min(nameIndices) != 1:
return "%s%02d" % (prefix, 1)
for i in range(len(nameIndices)):
if len(nameIndices) == i+1:
return "%s%02d" % (prefix, nameIndices[i]+1)
if nameIndices[i+1] != nameIndices[i]+1:
return "%s%02d" % (prefix, nameIndices[i]+1)
class framespec(object):
"""
This class defines a sequence of files on disk as a filename containing
one or more "#" characters, a start frame integer, and an end frame integer.
It is valid to set a start frame or end frame to "None". Escaping #s allows
them to pass through as # characters.
This is done in the "Nuke" style, meaning the following substitutions will
occur:
FILENAME FRAME NUM RESULT
foo.#.txt 1 foo.1.txt
foo.##.txt 1 foo.01.txt
foo.#.txt 100 foo.100.txt
foo\#.#.txt 5 foo#.5.txt
"""
def __init__(self, fileString, fileRange):
"""
"""
self.filename = fileString
self.startFrame = None
self.endFrame = None
if fileRange:
self.setFramerange(*fileRange)
def setFramerange(self, startFrame, endFrame):
"""
Set the start and end frames given two strings or ints.
"""
self.startFrame = int(startFrame) if startFrame else None
self.endFrame = int(endFrame) if endFrame else None
def frames(self):
"""
Return a complete list of filenames this framespec object represents.
"""
frameList = list()
if self.startFrame is None or self.endFrame is None:
return [self.filename]
for i in range(self.startFrame, self.endFrame+1):
frameList.append(self.replaceFrameSymbols(self.filename, i))
return frameList
@staticmethod
def hasFrameSymbols(checkString):
"""
Return a boolean stating whether or not the given string contains
known frame symbols ('#').
"""
matchObj = re.finditer(r'((?<!\\)\#+)', checkString)
i = next(matchObj, None)
if not i:
return False
return True
@staticmethod
def replaceFrameSymbols(replaceString, frameNumber):
"""
This function replaces Nuke-style frame sequence markers in a string
with a given frame number. Meaning, any string of # characters gets
padded to the number of #s. Escaped #s with a backslash (\#) will be
replaced with a single # character in this function.
"""
matchObj = re.finditer(r'((?<!\\)\#+)', replaceString)
i = next(matchObj, None)
while i:
padString = "%s" % str(frameNumber).zfill(len(i.group(0)))
replaceString = replaceString[:i.start()] + padString + replaceString[i.end():]
matchObj = re.finditer(r'((?<!\\)\#+)', replaceString)
i = next(matchObj, None)
replaceString = replaceString.replace('\#', '#')
return replaceString
| bsd-3-clause |
joariasl/odoo | addons/l10n_fr_hr_payroll/report/__init__.py | 424 | 1091 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import fiche_paye
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
etherkit/OpenBeacon2 | client/macos/venv/lib/python3.8/site-packages/pip/_internal/req/req_file.py | 11 | 19448 | """
Requirements file parsing
"""
from __future__ import absolute_import
import optparse
import os
import re
import shlex
import sys
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._internal.cli import cmdoptions
from pip._internal.exceptions import (
InstallationError,
RequirementsFileParseError,
)
from pip._internal.models.search_scope import SearchScope
from pip._internal.network.utils import raise_for_status
from pip._internal.utils.encoding import auto_decode
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import get_url_scheme
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import (
Any, Callable, Dict, Iterator, List, NoReturn, Optional, Text, Tuple,
)
from pip._internal.index.package_finder import PackageFinder
from pip._internal.network.session import PipSession
ReqFileLines = Iterator[Tuple[int, Text]]
LineParser = Callable[[Text], Tuple[str, Values]]
__all__ = ['parse_requirements']
SCHEME_RE = re.compile(r'^(http|https|file):', re.I)
COMMENT_RE = re.compile(r'(^|\s+)#.*$')
# Matches environment variable-style values in '${MY_VARIABLE_1}' with the
# variable name consisting of only uppercase letters, digits or the '_'
# (underscore). This follows the POSIX standard defined in IEEE Std 1003.1,
# 2013 Edition.
ENV_VAR_RE = re.compile(r'(?P<var>\$\{(?P<name>[A-Z0-9_]+)\})')
SUPPORTED_OPTIONS = [
cmdoptions.index_url,
cmdoptions.extra_index_url,
cmdoptions.no_index,
cmdoptions.constraints,
cmdoptions.requirements,
cmdoptions.editable,
cmdoptions.find_links,
cmdoptions.no_binary,
cmdoptions.only_binary,
cmdoptions.prefer_binary,
cmdoptions.require_hashes,
cmdoptions.pre,
cmdoptions.trusted_host,
cmdoptions.use_new_feature,
] # type: List[Callable[..., optparse.Option]]
# options to be passed to requirements
SUPPORTED_OPTIONS_REQ = [
cmdoptions.install_options,
cmdoptions.global_options,
cmdoptions.hash,
] # type: List[Callable[..., optparse.Option]]
# the 'dest' string values
SUPPORTED_OPTIONS_REQ_DEST = [str(o().dest) for o in SUPPORTED_OPTIONS_REQ]
class ParsedRequirement(object):
def __init__(
self,
requirement, # type:str
is_editable, # type: bool
comes_from, # type: str
constraint, # type: bool
options=None, # type: Optional[Dict[str, Any]]
line_source=None, # type: Optional[str]
):
# type: (...) -> None
self.requirement = requirement
self.is_editable = is_editable
self.comes_from = comes_from
self.options = options
self.constraint = constraint
self.line_source = line_source
class ParsedLine(object):
def __init__(
self,
filename, # type: str
lineno, # type: int
comes_from, # type: Optional[str]
args, # type: str
opts, # type: Values
constraint, # type: bool
):
# type: (...) -> None
self.filename = filename
self.lineno = lineno
self.comes_from = comes_from
self.opts = opts
self.constraint = constraint
if args:
self.is_requirement = True
self.is_editable = False
self.requirement = args
elif opts.editables:
self.is_requirement = True
self.is_editable = True
# We don't support multiple -e on one line
self.requirement = opts.editables[0]
else:
self.is_requirement = False
def parse_requirements(
filename, # type: str
session, # type: PipSession
finder=None, # type: Optional[PackageFinder]
comes_from=None, # type: Optional[str]
options=None, # type: Optional[optparse.Values]
constraint=False, # type: bool
):
# type: (...) -> Iterator[ParsedRequirement]
"""Parse a requirements file and yield ParsedRequirement instances.
:param filename: Path or url of requirements file.
:param session: PipSession instance.
:param finder: Instance of pip.index.PackageFinder.
:param comes_from: Origin description of requirements.
:param options: cli options.
:param constraint: If true, parsing a constraint file rather than
requirements file.
"""
line_parser = get_line_parser(finder)
parser = RequirementsFileParser(session, line_parser, comes_from)
for parsed_line in parser.parse(filename, constraint):
parsed_req = handle_line(
parsed_line,
options=options,
finder=finder,
session=session
)
if parsed_req is not None:
yield parsed_req
def preprocess(content):
# type: (Text) -> ReqFileLines
"""Split, filter, and join lines, and return a line iterator
:param content: the content of the requirements file
"""
lines_enum = enumerate(content.splitlines(), start=1) # type: ReqFileLines
lines_enum = join_lines(lines_enum)
lines_enum = ignore_comments(lines_enum)
lines_enum = expand_env_variables(lines_enum)
return lines_enum
def handle_requirement_line(
line, # type: ParsedLine
options=None, # type: Optional[optparse.Values]
):
# type: (...) -> ParsedRequirement
# preserve for the nested code path
line_comes_from = '{} {} (line {})'.format(
'-c' if line.constraint else '-r', line.filename, line.lineno,
)
assert line.is_requirement
if line.is_editable:
# For editable requirements, we don't support per-requirement
# options, so just return the parsed requirement.
return ParsedRequirement(
requirement=line.requirement,
is_editable=line.is_editable,
comes_from=line_comes_from,
constraint=line.constraint,
)
else:
if options:
# Disable wheels if the user has specified build options
cmdoptions.check_install_build_global(options, line.opts)
# get the options that apply to requirements
req_options = {}
for dest in SUPPORTED_OPTIONS_REQ_DEST:
if dest in line.opts.__dict__ and line.opts.__dict__[dest]:
req_options[dest] = line.opts.__dict__[dest]
line_source = 'line {} of {}'.format(line.lineno, line.filename)
return ParsedRequirement(
requirement=line.requirement,
is_editable=line.is_editable,
comes_from=line_comes_from,
constraint=line.constraint,
options=req_options,
line_source=line_source,
)
def handle_option_line(
opts, # type: Values
filename, # type: str
lineno, # type: int
finder=None, # type: Optional[PackageFinder]
options=None, # type: Optional[optparse.Values]
session=None, # type: Optional[PipSession]
):
# type: (...) -> None
if options:
# percolate options upward
if opts.require_hashes:
options.require_hashes = opts.require_hashes
if opts.features_enabled:
options.features_enabled.extend(
f for f in opts.features_enabled
if f not in options.features_enabled
)
# set finder options
if finder:
find_links = finder.find_links
index_urls = finder.index_urls
if opts.index_url:
index_urls = [opts.index_url]
if opts.no_index is True:
index_urls = []
if opts.extra_index_urls:
index_urls.extend(opts.extra_index_urls)
if opts.find_links:
# FIXME: it would be nice to keep track of the source
# of the find_links: support a find-links local path
# relative to a requirements file.
value = opts.find_links[0]
req_dir = os.path.dirname(os.path.abspath(filename))
relative_to_reqs_file = os.path.join(req_dir, value)
if os.path.exists(relative_to_reqs_file):
value = relative_to_reqs_file
find_links.append(value)
search_scope = SearchScope(
find_links=find_links,
index_urls=index_urls,
)
finder.search_scope = search_scope
if opts.pre:
finder.set_allow_all_prereleases()
if opts.prefer_binary:
finder.set_prefer_binary()
if session:
for host in opts.trusted_hosts or []:
source = 'line {} of {}'.format(lineno, filename)
session.add_trusted_host(host, source=source)
def handle_line(
line, # type: ParsedLine
options=None, # type: Optional[optparse.Values]
finder=None, # type: Optional[PackageFinder]
session=None, # type: Optional[PipSession]
):
# type: (...) -> Optional[ParsedRequirement]
"""Handle a single parsed requirements line; This can result in
creating/yielding requirements, or updating the finder.
:param line: The parsed line to be processed.
:param options: CLI options.
:param finder: The finder - updated by non-requirement lines.
:param session: The session - updated by non-requirement lines.
Returns a ParsedRequirement object if the line is a requirement line,
otherwise returns None.
For lines that contain requirements, the only options that have an effect
are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
requirement. Other options from SUPPORTED_OPTIONS may be present, but are
ignored.
For lines that do not contain requirements, the only options that have an
effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
be present, but are ignored. These lines may contain multiple options
(although our docs imply only one is supported), and all our parsed and
affect the finder.
"""
if line.is_requirement:
parsed_req = handle_requirement_line(line, options)
return parsed_req
else:
handle_option_line(
line.opts,
line.filename,
line.lineno,
finder,
options,
session,
)
return None
class RequirementsFileParser(object):
def __init__(
self,
session, # type: PipSession
line_parser, # type: LineParser
comes_from, # type: Optional[str]
):
# type: (...) -> None
self._session = session
self._line_parser = line_parser
self._comes_from = comes_from
def parse(self, filename, constraint):
# type: (str, bool) -> Iterator[ParsedLine]
"""Parse a given file, yielding parsed lines.
"""
for line in self._parse_and_recurse(filename, constraint):
yield line
def _parse_and_recurse(self, filename, constraint):
# type: (str, bool) -> Iterator[ParsedLine]
for line in self._parse_file(filename, constraint):
if (
not line.is_requirement and
(line.opts.requirements or line.opts.constraints)
):
# parse a nested requirements file
if line.opts.requirements:
req_path = line.opts.requirements[0]
nested_constraint = False
else:
req_path = line.opts.constraints[0]
nested_constraint = True
# original file is over http
if SCHEME_RE.search(filename):
# do a url join so relative paths work
req_path = urllib_parse.urljoin(filename, req_path)
# original file and nested file are paths
elif not SCHEME_RE.search(req_path):
# do a join so relative paths work
req_path = os.path.join(
os.path.dirname(filename), req_path,
)
for inner_line in self._parse_and_recurse(
req_path, nested_constraint,
):
yield inner_line
else:
yield line
def _parse_file(self, filename, constraint):
# type: (str, bool) -> Iterator[ParsedLine]
_, content = get_file_content(
filename, self._session, comes_from=self._comes_from
)
lines_enum = preprocess(content)
for line_number, line in lines_enum:
try:
args_str, opts = self._line_parser(line)
except OptionParsingError as e:
# add offending line
msg = 'Invalid requirement: {}\n{}'.format(line, e.msg)
raise RequirementsFileParseError(msg)
yield ParsedLine(
filename,
line_number,
self._comes_from,
args_str,
opts,
constraint,
)
def get_line_parser(finder):
# type: (Optional[PackageFinder]) -> LineParser
def parse_line(line):
# type: (Text) -> Tuple[str, Values]
# Build new parser for each line since it accumulates appendable
# options.
parser = build_parser()
defaults = parser.get_default_values()
defaults.index_url = None
if finder:
defaults.format_control = finder.format_control
args_str, options_str = break_args_options(line)
# Prior to 2.7.3, shlex cannot deal with unicode entries
if sys.version_info < (2, 7, 3):
# https://github.com/python/mypy/issues/1174
options_str = options_str.encode('utf8') # type: ignore
# https://github.com/python/mypy/issues/1174
opts, _ = parser.parse_args(
shlex.split(options_str), defaults) # type: ignore
return args_str, opts
return parse_line
def break_args_options(line):
# type: (Text) -> Tuple[str, Text]
"""Break up the line into an args and options string. We only want to shlex
(and then optparse) the options, not the args. args can contain markers
which are corrupted by shlex.
"""
tokens = line.split(' ')
args = []
options = tokens[:]
for token in tokens:
if token.startswith('-') or token.startswith('--'):
break
else:
args.append(token)
options.pop(0)
return ' '.join(args), ' '.join(options) # type: ignore
class OptionParsingError(Exception):
def __init__(self, msg):
# type: (str) -> None
self.msg = msg
def build_parser():
# type: () -> optparse.OptionParser
"""
Return a parser for parsing requirement lines
"""
parser = optparse.OptionParser(add_help_option=False)
option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
for option_factory in option_factories:
option = option_factory()
parser.add_option(option)
# By default optparse sys.exits on parsing errors. We want to wrap
# that in our own exception.
def parser_exit(self, msg):
# type: (Any, str) -> NoReturn
raise OptionParsingError(msg)
# NOTE: mypy disallows assigning to a method
# https://github.com/python/mypy/issues/2427
parser.exit = parser_exit # type: ignore
return parser
def join_lines(lines_enum):
# type: (ReqFileLines) -> ReqFileLines
"""Joins a line ending in '\' with the previous line (except when following
comments). The joined line takes on the index of the first line.
"""
primary_line_number = None
new_line = [] # type: List[Text]
for line_number, line in lines_enum:
if not line.endswith('\\') or COMMENT_RE.match(line):
if COMMENT_RE.match(line):
# this ensures comments are always matched later
line = ' ' + line
if new_line:
new_line.append(line)
assert primary_line_number is not None
yield primary_line_number, ''.join(new_line)
new_line = []
else:
yield line_number, line
else:
if not new_line:
primary_line_number = line_number
new_line.append(line.strip('\\'))
# last line contains \
if new_line:
assert primary_line_number is not None
yield primary_line_number, ''.join(new_line)
# TODO: handle space after '\'.
def ignore_comments(lines_enum):
# type: (ReqFileLines) -> ReqFileLines
"""
Strips comments and filter empty lines.
"""
for line_number, line in lines_enum:
line = COMMENT_RE.sub('', line)
line = line.strip()
if line:
yield line_number, line
def expand_env_variables(lines_enum):
# type: (ReqFileLines) -> ReqFileLines
"""Replace all environment variables that can be retrieved via `os.getenv`.
The only allowed format for environment variables defined in the
requirement file is `${MY_VARIABLE_1}` to ensure two things:
1. Strings that contain a `$` aren't accidentally (partially) expanded.
2. Ensure consistency across platforms for requirement files.
These points are the result of a discussion on the `github pull
request #3514 <https://github.com/pypa/pip/pull/3514>`_.
Valid characters in variable names follow the `POSIX standard
<http://pubs.opengroup.org/onlinepubs/9699919799/>`_ and are limited
to uppercase letter, digits and the `_` (underscore).
"""
for line_number, line in lines_enum:
for env_var, var_name in ENV_VAR_RE.findall(line):
value = os.getenv(var_name)
if not value:
continue
line = line.replace(env_var, value)
yield line_number, line
def get_file_content(url, session, comes_from=None):
# type: (str, PipSession, Optional[str]) -> Tuple[str, Text]
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode.
Respects # -*- coding: declarations on the retrieved files.
:param url: File path or url.
:param session: PipSession instance.
:param comes_from: Origin description of requirements.
"""
scheme = get_url_scheme(url)
if scheme in ['http', 'https']:
# FIXME: catch some errors
resp = session.get(url)
raise_for_status(resp)
return resp.url, resp.text
elif scheme == 'file':
if comes_from and comes_from.startswith('http'):
raise InstallationError(
'Requirements file {} references URL {}, '
'which is local'.format(comes_from, url)
)
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib_parse.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
try:
with open(url, 'rb') as f:
content = auto_decode(f.read())
except IOError as exc:
raise InstallationError(
'Could not open requirements file: {}'.format(exc)
)
return url, content
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
| gpl-3.0 |
kressi/erpnext | erpnext/selling/report/customer_acquisition_and_loyalty/customer_acquisition_and_loyalty.py | 96 | 2099 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import getdate, cint
import calendar
def execute(filters=None):
# key yyyy-mm
new_customers_in = {}
repeat_customers_in = {}
customers = []
company_condition = ""
if filters.get("company"):
company_condition = ' and company=%(company)s'
for si in frappe.db.sql("""select posting_date, customer, base_grand_total from `tabSales Invoice`
where docstatus=1 and posting_date <= %(to_date)s
{company_condition} order by posting_date""".format(company_condition=company_condition),
filters, as_dict=1):
key = si.posting_date.strftime("%Y-%m")
if not si.customer in customers:
new_customers_in.setdefault(key, [0, 0.0])
new_customers_in[key][0] += 1
new_customers_in[key][1] += si.base_grand_total
customers.append(si.customer)
else:
repeat_customers_in.setdefault(key, [0, 0.0])
repeat_customers_in[key][0] += 1
repeat_customers_in[key][1] += si.base_grand_total
# time series
from_year, from_month, temp = filters.get("from_date").split("-")
to_year, to_month, temp = filters.get("to_date").split("-")
from_year, from_month, to_year, to_month = \
cint(from_year), cint(from_month), cint(to_year), cint(to_month)
out = []
for year in xrange(from_year, to_year+1):
for month in xrange(from_month if year==from_year else 1, (to_month+1) if year==to_year else 13):
key = "{year}-{month:02d}".format(year=year, month=month)
new = new_customers_in.get(key, [0,0.0])
repeat = repeat_customers_in.get(key, [0,0.0])
out.append([year, calendar.month_name[month],
new[0], repeat[0], new[0] + repeat[0],
new[1], repeat[1], new[1] + repeat[1]])
return [
_("Year"), _("Month"),
_("New Customers") + ":Int",
_("Repeat Customers") + ":Int",
_("Total") + ":Int",
_("New Customer Revenue") + ":Currency:150",
_("Repeat Customer Revenue") + ":Currency:150",
_("Total Revenue") + ":Currency:150"
], out
| gpl-3.0 |
dlazz/ansible | lib/ansible/modules/cloud/amazon/s3_website.py | 39 | 10634 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: s3_website
short_description: Configure an s3 bucket as a website
description:
- Configure an s3 bucket as a website
version_added: "2.2"
requirements: [ boto3 ]
author: Rob White (@wimnat)
options:
name:
description:
- "Name of the s3 bucket"
required: true
error_key:
description:
- "The object key name to use when a 4XX class error occurs. To remove an error key, set to None."
redirect_all_requests:
description:
- "Describes the redirect behavior for every request to this s3 bucket website endpoint"
region:
description:
- >
AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked,
followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the
S3 Location: US Standard.
state:
description:
- "Add or remove s3 website configuration"
default: present
choices: [ 'present', 'absent' ]
suffix:
description:
- >
Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to
samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash
character.
default: index.html
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Configure an s3 bucket to redirect all requests to example.com
- s3_website:
name: mybucket.com
redirect_all_requests: example.com
state: present
# Remove website configuration from an s3 bucket
- s3_website:
name: mybucket.com
state: absent
# Configure an s3 bucket as a website with index and error pages
- s3_website:
name: mybucket.com
suffix: home.htm
error_key: errors/404.htm
state: present
'''
RETURN = '''
index_document:
description: index document
type: complex
returned: always
contains:
suffix:
description: suffix that is appended to a request that is for a directory on the website endpoint
returned: success
type: str
sample: index.html
error_document:
description: error document
type: complex
returned: always
contains:
key:
description: object key name to use when a 4XX class error occurs
returned: when error_document parameter set
type: str
sample: error.html
redirect_all_requests_to:
description: where to redirect requests
type: complex
returned: always
contains:
host_name:
description: name of the host where requests will be redirected.
returned: when redirect all requests parameter set
type: str
sample: ansible.com
routing_rules:
description: routing rules
type: complex
returned: always
contains:
routing_rule:
host_name:
description: name of the host where requests will be redirected.
returned: when host name set as part of redirect rule
type: str
sample: ansible.com
condition:
key_prefix_equals:
description: object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be
ExamplePage.html
returned: when routing rule present
type: str
sample: docs/
redirect:
replace_key_prefix_with:
description: object key prefix to use in the redirect request
returned: when routing rule present
type: str
sample: documents/
'''
import time
try:
import boto3
from botocore.exceptions import ClientError, ParamValidationError
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (HAS_BOTO3, boto3_conn, camel_dict_to_snake_dict, ec2_argument_spec,
get_aws_connection_info)
def _create_redirect_dict(url):
redirect_dict = {}
url_split = url.split(':')
# Did we split anything?
if len(url_split) == 2:
redirect_dict[u'Protocol'] = url_split[0]
redirect_dict[u'HostName'] = url_split[1].replace('//', '')
elif len(url_split) == 1:
redirect_dict[u'HostName'] = url_split[0]
else:
raise ValueError('Redirect URL appears invalid')
return redirect_dict
def _create_website_configuration(suffix, error_key, redirect_all_requests):
website_configuration = {}
if error_key is not None:
website_configuration['ErrorDocument'] = {'Key': error_key}
if suffix is not None:
website_configuration['IndexDocument'] = {'Suffix': suffix}
if redirect_all_requests is not None:
website_configuration['RedirectAllRequestsTo'] = _create_redirect_dict(redirect_all_requests)
return website_configuration
def enable_or_update_bucket_as_website(client_connection, resource_connection, module):
bucket_name = module.params.get("name")
redirect_all_requests = module.params.get("redirect_all_requests")
# If redirect_all_requests is set then don't use the default suffix that has been set
if redirect_all_requests is not None:
suffix = None
else:
suffix = module.params.get("suffix")
error_key = module.params.get("error_key")
changed = False
try:
bucket_website = resource_connection.BucketWebsite(bucket_name)
except ClientError as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
try:
website_config = client_connection.get_bucket_website(Bucket=bucket_name)
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration':
website_config = None
else:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
if website_config is None:
try:
bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
changed = True
except (ClientError, ParamValidationError) as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
except ValueError as e:
module.fail_json(msg=str(e))
else:
try:
if (suffix is not None and website_config['IndexDocument']['Suffix'] != suffix) or \
(error_key is not None and website_config['ErrorDocument']['Key'] != error_key) or \
(redirect_all_requests is not None and website_config['RedirectAllRequestsTo'] != _create_redirect_dict(redirect_all_requests)):
try:
bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
changed = True
except (ClientError, ParamValidationError) as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
except KeyError as e:
try:
bucket_website.put(WebsiteConfiguration=_create_website_configuration(suffix, error_key, redirect_all_requests))
changed = True
except (ClientError, ParamValidationError) as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
except ValueError as e:
module.fail_json(msg=str(e))
# Wait 5 secs before getting the website_config again to give it time to update
time.sleep(5)
website_config = client_connection.get_bucket_website(Bucket=bucket_name)
module.exit_json(changed=changed, **camel_dict_to_snake_dict(website_config))
def disable_bucket_as_website(client_connection, module):
changed = False
bucket_name = module.params.get("name")
try:
client_connection.get_bucket_website(Bucket=bucket_name)
except ClientError as e:
if e.response['Error']['Code'] == 'NoSuchWebsiteConfiguration':
module.exit_json(changed=changed)
else:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
try:
client_connection.delete_bucket_website(Bucket=bucket_name)
changed = True
except ClientError as e:
module.fail_json(msg=e.message, **camel_dict_to_snake_dict(e.response))
module.exit_json(changed=changed)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['present', 'absent']),
suffix=dict(type='str', required=False, default='index.html'),
error_key=dict(type='str', required=False),
redirect_all_requests=dict(type='str', required=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
['redirect_all_requests', 'suffix'],
['redirect_all_requests', 'error_key']
])
if not HAS_BOTO3:
module.fail_json(msg='boto3 required for this module')
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
if region:
client_connection = boto3_conn(module, conn_type='client', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params)
resource_connection = boto3_conn(module, conn_type='resource', resource='s3', region=region, endpoint=ec2_url, **aws_connect_params)
else:
module.fail_json(msg="region must be specified")
state = module.params.get("state")
if state == 'present':
enable_or_update_bucket_as_website(client_connection, resource_connection, module)
elif state == 'absent':
disable_bucket_as_website(client_connection, module)
if __name__ == '__main__':
main()
| gpl-3.0 |
johnraz/faker | faker/providers/python/__init__.py | 8 | 4613 | # coding=utf-8
from __future__ import unicode_literals
from decimal import Decimal
import sys
from faker.providers.lorem.la import Provider as Lorem
from .. import BaseProvider
if sys.version_info[0] == 2:
string_types = (basestring,)
elif sys.version_info[0] == 3:
string_types = (str, bytes)
else:
raise SystemError("Unrecognized python version: {}".format(sys.version_info[0]))
class Provider(BaseProvider):
@classmethod
def pybool(cls):
return cls.random_int(0, 1) == 1
@classmethod
def pystr(cls, max_chars=20):
return Lorem.text(max_chars)
@classmethod
def pyfloat(cls, left_digits=None, right_digits=None, positive=False):
left_digits = left_digits or cls.random_int(1, sys.float_info.dig)
right_digits = right_digits or cls.random_int(0, sys.float_info.dig - left_digits)
sign = 1 if positive or cls.random_int(0, 1) else -1
return float("{0}.{1}".format(
sign * cls.random_number(left_digits), cls.random_number(right_digits)
))
@classmethod
def pyint(cls):
return cls.random_int()
@classmethod
def pydecimal(cls, left_digits=None, right_digits=None, positive=False):
return Decimal(str(cls.pyfloat(left_digits, right_digits, positive)))
def pytuple(self, nb_elements=10, variable_nb_elements=True, *value_types):
return tuple(self.pyset(nb_elements, variable_nb_elements, *value_types))
def pyset(self, nb_elements=10, variable_nb_elements=True, *value_types):
return set(self._pyiterable(nb_elements, variable_nb_elements, *value_types))
def pylist(self, nb_elements=10, variable_nb_elements=True, *value_types):
return list(self._pyiterable(nb_elements, variable_nb_elements, *value_types))
def pyiterable(self, nb_elements=10, variable_nb_elements=True, *value_types):
return self.random_element([self.pylist, self.pytuple, self.pyset])(nb_elements, variable_nb_elements, *value_types)
def _random_type(self, type_list):
value_type = self.random_element(type_list)
method_name = "py{0}".format(value_type)
if hasattr(self, method_name):
value_type = method_name
return self.generator.format(value_type)
def _pyiterable(self, nb_elements=10, variable_nb_elements=True, *value_types):
value_types = [t if isinstance(t, string_types) else getattr(t, '__name__', type(t).__name__).lower()
for t in value_types
# avoid recursion
if t not in ['iterable', 'list', 'tuple', 'dict', 'set']]
if not value_types:
value_types = ['str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal', 'date_time', 'uri', 'email']
if variable_nb_elements:
nb_elements = self.randomize_nb_elements(nb_elements)
for f in range(nb_elements):
yield self._random_type(value_types)
def pydict(self, nb_elements=10, variable_nb_elements=True, *value_types):
"""
Use this function to generate data, returns a touple containing
a list, a dictionary and a nested dictionary.
"""
if variable_nb_elements:
nb_elements = self.randomize_nb_elements(nb_elements)
return dict(zip(
Lorem.words(nb_elements),
self._pyiterable(nb_elements, False, *value_types)
))
def pystruct(self, count=10, *value_types):
value_types = [t if isinstance(t, string_types) else getattr(t, '__name__', type(t).__name__).lower()
for t in value_types
# avoid recursion
if t != 'struct']
if not value_types:
value_types = ['str', 'str', 'str', 'str', 'float', 'int', 'int', 'decimal', 'date_time', 'uri', 'email']
l = []
d = {}
nd = {}
for i in range(count):
d[Lorem.word()] = self._random_type(value_types)
l.append(self._random_type(value_types))
nd[Lorem.word()] = {
i: self._random_type(value_types),
i + 1: [self._random_type(value_types), self._random_type(value_types), self._random_type(value_types)],
i + 2: {
i: self._random_type(value_types),
i + 1: self._random_type(value_types),
i + 2: [
self._random_type(value_types),
self._random_type(value_types)
]
}
}
return l, d, nd
| mit |
mgedmin/ansible | lib/ansible/plugins/terminal/iosxr.py | 5 | 2002 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_prompts_re = [
re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(r"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_errors_re = [
re.compile(r"% ?Error"),
re.compile(r"% ?Bad secret"),
re.compile(r"invalid input", re.I),
re.compile(r"(?:incomplete|ambiguous) command", re.I),
re.compile(r"connection timed out", re.I),
re.compile(r"[^\r\n]+ not found", re.I),
re.compile(r"'[^']' +returned error code: ?\d+"),
]
supports_multiplexing = False
def on_open_shell(self):
try:
for cmd in ['terminal length 0', 'terminal exec prompt no-timestamp']:
self._connection.exec_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
@staticmethod
def guess_network_os(conn):
stdin, stdout, stderr = conn.exec_command('show version')
if 'Cisco IOS XR' in stdout.read():
return 'iosxr'
| gpl-3.0 |
ysekky/chainer | chainer/functions/util/forget.py | 4 | 4081 | from chainer import cuda
from chainer import function
from chainer import variable
class _DummyFunction(function.Function):
def __init__(self, grads):
self.grads = grads
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
return xp.array(0),
def backward(self, inputs, outputs):
return self.grads
class Forget(function.Function):
def __init__(self, func):
if not callable(func):
raise TypeError('func must be callable')
self.func = func
def _call_func(self, xs):
outs = self.func(*xs)
if isinstance(outs, tuple):
for i, out in enumerate(outs):
if isinstance(out, variable.Variable):
continue
n = i + 1
suffix = {1: 'st', 2: 'nd', 3: 'rd'}.get(
n if n < 20 else n % 10, 'th')
msg = ('{}{} element of a returned tuple is not Variable, '
'but is {}').format(n, suffix, type(out))
raise RuntimeError(msg)
elif isinstance(outs, variable.Variable):
outs = (outs,)
else:
msg = ('A tuple of Variables or a Variable are expected, but {} '
'is returned.'.format(type(outs)))
raise RuntimeError(msg)
return outs
def forward(self, inputs):
with function.no_backprop_mode():
xs = [variable.Variable(x) for x in inputs]
outs = self._call_func(xs)
return tuple(out.data for out in outs)
def backward(self, inputs, grads):
with function.force_backprop_mode():
xs = [variable.Variable(x) for x in inputs]
outs = self._call_func(xs)
_DummyFunction(grads)(*outs).backward()
return tuple(x.grad for x in xs)
def forget(func, *xs):
"""Call a function without storing internal results.
On a forward propagation Chainer stores all internal results of
:class:`Function` on a computational graph as they are required on
backward-propagation. These results consume too much memory when the
internal results are too large. This method **forgets** such internal
results on forward propagation, and still supports back-propagation with
recalculation.
In a forward propagation, this method calls a given function with given
variables without creating a computational graph. That means, no internal
results are stored. In a backward propagation this method calls the given
function again to create a computational graph to execute back-propagation.
This method reduces internal memory usage. Instead it requires more
calculation time as it calls the function twice.
.. admonition:: Example
Let ``f`` be a function defined as:
>>> def f(a, b):
... return a + b + a
and, ``x`` and ``y`` be :class:`~chainer.Variable`:
>>> x = chainer.Variable(np.random.uniform(-1, 1, 5).astype('f'))
>>> y = chainer.Variable(np.random.uniform(-1, 1, 5).astype('f'))
When ``z`` is calculated as ``z = f(x, y)``, its internal result
``x + y`` is stored in memory. Instead if you call ``f`` with
:meth:`forget`:
>>> z = F.forget(f, x, y)
internal ``x + y`` is forgotten.
.. note::
The method does not support functions behaving randomly, such as
:meth:`~chainer.functions.dropout` and
:meth:`~chainer.functions.negative_sampling`. It is because first results
of these function differ from the second one.
Args:
func (callable): A function to call. It needs to be called with
:class:`~chainer.Variable` object(s) and to return a
:class:`~chainer.Variable` object or a tuple of
:class:`~chainer.Variable` objects.
xs (~chainer.Variable): Argument variables of the function.
Returns:
~chainer.Variable: A variable ``func`` returns. If it returns a tuple,
the method returns a tuple too.
"""
return Forget(func)(*xs)
| mit |
Henning-Klatt/Robot | Application/PiVideoStream.py | 1 | 3226 | #!/usr/bin/python3
import socket
import struct
import pickle
import time
from PIL import Image, ImageTk
try:
import tkinter as tk
from tkinter import messagebox
except ImportError:
import Tkinter as tk
from Tkinter import messagebox
class PiVideoStream(object):
def __init__(self, gui, host='127.0.0.1', port=8000):
self.gui = gui
self.host = host
self.port = port
self.running = False
def start(self, host='127.0.0.1'):
self.client_socket = socket.socket()
self.client_socket.settimeout(2)
try:
self.client_socket.connect((host, self.port))
except (socket.timeout, ConnectionRefusedError) as Error:
print(Error)
self.gui.streamDummy.place(x=360, y=180)
tk.messagebox.showerror("Connection Error", str(host) + ": " + str(Error))
self.stop()
return
self.client_socket.settimeout(None)
# makefile Object aus der Verbindung
self.connection = self.client_socket.makefile('rb')
self.running = True
#self.t = Thread(target=self.update, args=())
#self.t.setDaemon(1)
#self.t.start()
self.update()
time.sleep(0.2)
def update(self):
data_len = struct.unpack('<L', self.connection.read(struct.calcsize('<L')))[0]
if data_len:
printD('Updating...')
printD('data_len: %s' % data_len)
data = self.connection.read(data_len)
deserialized_data = pickle.loads(data)
printD('Frame received')
#print(deserialized_data)
img = Image.fromarray(deserialized_data)
img = img.resize((640,480), Image.ANTIALIAS)
newImage = ImageTk.PhotoImage(img)
self.gui.stream_label.configure(image=newImage)
self.gui.stream_label.image = newImage
printD("image updated")
else:
time.sleep(0.1)
if(self.running):
self.gui.stream_label.after(66, self.update)
def update_2(self):
if self.running == False:
return
# LΓ€nge des Bildes als ein 32-bit Int
data_len = struct.unpack('<L', self.connection.read(struct.calcsize('<L')))[0]
if data_len:
printD('Updating...')
printD('data_len: %s' % data_len)
data = self.connection.read(data_len)
deserialized_data = pickle.loads(data)
printD('Frame received')
#print(deserialized_data)
stdout.flush()
img = Image.fromarray(deserialized_data)
newImage = ImageTk.PhotoImage(img)
self.master.stream_label.configure(image=newImage)
self.master.stream_label.image = newImage
self.gui.master.after(70, self.update_2)
def quit(self):
try: self.stop()
except: pass
def stop(self):
self.gui.startstop_button.config(bg="green", text="Start")
self.gui.ip.config(state="normal")
self.running = False
try: self.connection.close()
except: pass
try: self.client_socket.close()
except: pass
self.client_socket = None
| gpl-3.0 |
fabianp/scikit-learn | examples/applications/wikipedia_principal_eigenvector.py | 233 | 7819 | """
===============================
Wikipedia principal eigenvector
===============================
A classical way to assert the relative importance of vertices in a
graph is to compute the principal eigenvector of the adjacency matrix
so as to assign to each vertex the values of the components of the first
eigenvector as a centrality score:
http://en.wikipedia.org/wiki/Eigenvector_centrality
On the graph of webpages and links those values are called the PageRank
scores by Google.
The goal of this example is to analyze the graph of links inside
wikipedia articles to rank articles by relative importance according to
this eigenvector centrality.
The traditional way to compute the principal eigenvector is to use the
power iteration method:
http://en.wikipedia.org/wiki/Power_iteration
Here the computation is achieved thanks to Martinsson's Randomized SVD
algorithm implemented in the scikit.
The graph data is fetched from the DBpedia dumps. DBpedia is an extraction
of the latent structured data of the Wikipedia content.
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from bz2 import BZ2File
import os
from datetime import datetime
from pprint import pprint
from time import time
import numpy as np
from scipy import sparse
from sklearn.decomposition import randomized_svd
from sklearn.externals.joblib import Memory
from sklearn.externals.six.moves.urllib.request import urlopen
from sklearn.externals.six import iteritems
print(__doc__)
###############################################################################
# Where to download the data, if not already on disk
redirects_url = "http://downloads.dbpedia.org/3.5.1/en/redirects_en.nt.bz2"
redirects_filename = redirects_url.rsplit("/", 1)[1]
page_links_url = "http://downloads.dbpedia.org/3.5.1/en/page_links_en.nt.bz2"
page_links_filename = page_links_url.rsplit("/", 1)[1]
resources = [
(redirects_url, redirects_filename),
(page_links_url, page_links_filename),
]
for url, filename in resources:
if not os.path.exists(filename):
print("Downloading data from '%s', please wait..." % url)
opener = urlopen(url)
open(filename, 'wb').write(opener.read())
print()
###############################################################################
# Loading the redirect files
memory = Memory(cachedir=".")
def index(redirects, index_map, k):
"""Find the index of an article name after redirect resolution"""
k = redirects.get(k, k)
return index_map.setdefault(k, len(index_map))
DBPEDIA_RESOURCE_PREFIX_LEN = len("http://dbpedia.org/resource/")
SHORTNAME_SLICE = slice(DBPEDIA_RESOURCE_PREFIX_LEN + 1, -1)
def short_name(nt_uri):
"""Remove the < and > URI markers and the common URI prefix"""
return nt_uri[SHORTNAME_SLICE]
def get_redirects(redirects_filename):
"""Parse the redirections and build a transitively closed map out of it"""
redirects = {}
print("Parsing the NT redirect file")
for l, line in enumerate(BZ2File(redirects_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
redirects[short_name(split[0])] = short_name(split[2])
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
# compute the transitive closure
print("Computing the transitive closure of the redirect relation")
for l, source in enumerate(redirects.keys()):
transitive_target = None
target = redirects[source]
seen = set([source])
while True:
transitive_target = target
target = redirects.get(target)
if target is None or target in seen:
break
seen.add(target)
redirects[source] = transitive_target
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
return redirects
# disabling joblib as the pickling of large dicts seems much too slow
#@memory.cache
def get_adjacency_matrix(redirects_filename, page_links_filename, limit=None):
"""Extract the adjacency graph as a scipy sparse matrix
Redirects are resolved first.
Returns X, the scipy sparse adjacency matrix, redirects as python
dict from article names to article names and index_map a python dict
from article names to python int (article indexes).
"""
print("Computing the redirect map")
redirects = get_redirects(redirects_filename)
print("Computing the integer index map")
index_map = dict()
links = list()
for l, line in enumerate(BZ2File(page_links_filename)):
split = line.split()
if len(split) != 4:
print("ignoring malformed line: " + line)
continue
i = index(redirects, index_map, short_name(split[0]))
j = index(redirects, index_map, short_name(split[2]))
links.append((i, j))
if l % 1000000 == 0:
print("[%s] line: %08d" % (datetime.now().isoformat(), l))
if limit is not None and l >= limit - 1:
break
print("Computing the adjacency matrix")
X = sparse.lil_matrix((len(index_map), len(index_map)), dtype=np.float32)
for i, j in links:
X[i, j] = 1.0
del links
print("Converting to CSR representation")
X = X.tocsr()
print("CSR conversion done")
return X, redirects, index_map
# stop after 5M links to make it possible to work in RAM
X, redirects, index_map = get_adjacency_matrix(
redirects_filename, page_links_filename, limit=5000000)
names = dict((i, name) for name, i in iteritems(index_map))
print("Computing the principal singular vectors using randomized_svd")
t0 = time()
U, s, V = randomized_svd(X, 5, n_iter=3)
print("done in %0.3fs" % (time() - t0))
# print the names of the wikipedia related strongest compenents of the the
# principal singular vector which should be similar to the highest eigenvector
print("Top wikipedia pages according to principal singular vectors")
pprint([names[i] for i in np.abs(U.T[0]).argsort()[-10:]])
pprint([names[i] for i in np.abs(V[0]).argsort()[-10:]])
def centrality_scores(X, alpha=0.85, max_iter=100, tol=1e-10):
"""Power iteration computation of the principal eigenvector
This method is also known as Google PageRank and the implementation
is based on the one from the NetworkX project (BSD licensed too)
with copyrights by:
Aric Hagberg <[email protected]>
Dan Schult <[email protected]>
Pieter Swart <[email protected]>
"""
n = X.shape[0]
X = X.copy()
incoming_counts = np.asarray(X.sum(axis=1)).ravel()
print("Normalizing the graph")
for i in incoming_counts.nonzero()[0]:
X.data[X.indptr[i]:X.indptr[i + 1]] *= 1.0 / incoming_counts[i]
dangle = np.asarray(np.where(X.sum(axis=1) == 0, 1.0 / n, 0)).ravel()
scores = np.ones(n, dtype=np.float32) / n # initial guess
for i in range(max_iter):
print("power iteration #%d" % i)
prev_scores = scores
scores = (alpha * (scores * X + np.dot(dangle, prev_scores))
+ (1 - alpha) * prev_scores.sum() / n)
# check convergence: normalized l_inf norm
scores_max = np.abs(scores).max()
if scores_max == 0.0:
scores_max = 1.0
err = np.abs(scores - prev_scores).max() / scores_max
print("error: %0.6f" % err)
if err < n * tol:
return scores
return scores
print("Computing principal eigenvector score using a power iteration method")
t0 = time()
scores = centrality_scores(X, max_iter=100, tol=1e-10)
print("done in %0.3fs" % (time() - t0))
pprint([names[i] for i in np.abs(scores).argsort()[-10:]])
| bsd-3-clause |
liorvh/raspberry_pwn | src/pentest/metagoofil/hachoir_core/field/basic_field_set.py | 21 | 4746 | from hachoir_core.field import Field, FieldError
from hachoir_core.stream import InputStream
from hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN
from hachoir_core.event_handler import EventHandler
class ParserError(FieldError):
"""
Error raised by a field set.
@see: L{FieldError}
"""
pass
class MatchError(FieldError):
"""
Error raised by a field set when the stream content doesn't
match to file format.
@see: L{FieldError}
"""
pass
class BasicFieldSet(Field):
_event_handler = None
is_field_set = True
endian = None
def __init__(self, parent, name, stream, description, size):
# Sanity checks (preconditions)
assert not parent or issubclass(parent.__class__, BasicFieldSet)
assert issubclass(stream.__class__, InputStream)
# Set field set size
if size is None and self.static_size:
assert isinstance(self.static_size, (int, long))
size = self.static_size
# Set Field attributes
self._parent = parent
self._name = name
self._size = size
self._description = description
self.stream = stream
self._field_array_count = {}
# Set endian
if not self.endian:
assert parent and parent.endian
self.endian = parent.endian
if parent:
# This field set is one of the root leafs
self._address = parent.nextFieldAddress()
self.root = parent.root
assert id(self.stream) == id(parent.stream)
else:
# This field set is the root
self._address = 0
self.root = self
self._global_event_handler = None
# Sanity checks (post-conditions)
assert self.endian in (BIG_ENDIAN, LITTLE_ENDIAN)
if (self._size is not None) and (self._size <= 0):
raise ParserError("Invalid parser '%s' size: %s" % (self.path, self._size))
def reset(self):
self._field_array_count = {}
def createValue(self):
return None
def connectEvent(self, event_name, handler, local=True):
assert event_name in (
# Callback prototype: def f(field)
# Called when new value is already set
"field-value-changed",
# Callback prototype: def f(field)
# Called when field size is already set
"field-resized",
# A new field has been inserted in the field set
# Callback prototype: def f(index, new_field)
"field-inserted",
# Callback prototype: def f(old_field, new_field)
# Called when new field is already in field set
"field-replaced",
# Callback prototype: def f(field, new_value)
# Called to ask to set new value
"set-field-value"
), "Event name %r is invalid" % event_name
if local:
if self._event_handler is None:
self._event_handler = EventHandler()
self._event_handler.connect(event_name, handler)
else:
if self.root._global_event_handler is None:
self.root._global_event_handler = EventHandler()
self.root._global_event_handler.connect(event_name, handler)
def raiseEvent(self, event_name, *args):
# Transfer event to local listeners
if self._event_handler is not None:
self._event_handler.raiseEvent(event_name, *args)
# Transfer event to global listeners
if self.root._global_event_handler is not None:
self.root._global_event_handler.raiseEvent(event_name, *args)
def setUniqueFieldName(self, field):
key = field._name[:-2]
try:
self._field_array_count[key] += 1
except KeyError:
self._field_array_count[key] = 0
field._name = key + "[%u]" % self._field_array_count[key]
def readFirstFields(self, number):
"""
Read first number fields if they are not read yet.
Returns number of new added fields.
"""
number = number - self.current_length
if 0 < number:
return self.readMoreFields(number)
else:
return 0
def createFields(self):
raise NotImplementedError()
def __iter__(self):
raise NotImplementedError()
def __len__(self):
raise NotImplementedError()
def getField(self, key, const=True):
raise NotImplementedError()
def nextFieldAddress(self):
raise NotImplementedError()
def getFieldIndex(self, field):
raise NotImplementedError()
def readMoreFields(self, number):
raise NotImplementedError()
| gpl-3.0 |
BT-jmichaud/account-invoicing | account_invoice_rounding/account.py | 10 | 10110 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Vaucher
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from openerp.tools.float_utils import float_round, float_compare
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class AccountInvoice(models.Model):
_inherit = "account.invoice"
def _swedish_add_invoice_line(self, cr, uid, invoice,
rounded_total, delta, context=None):
""" Create a invoice_line with the diff of rounding """
invoice_line_obj = self.pool.get('account.invoice.line')
obj_precision = self.pool.get('decimal.precision')
prec = obj_precision.precision_get(cr, uid, 'Account')
company = invoice.company_id
if not invoice.global_round_line_id.id:
new_invoice_line = {
'name': _('Rounding'),
'price_unit': -delta,
'account_id': company.tax_calculation_rounding_account_id.id,
'invoice_id': invoice.id,
'is_rounding': True,
}
invoice_line_obj.create(cr, uid, new_invoice_line, context=context)
elif float_compare(invoice.global_round_line_id.price_unit, -delta,
precision_digits=prec) != 0:
invoice_line_obj.write(
cr, uid, invoice.global_round_line_id.id,
{'price_unit': -delta}, context=context)
amount_untaxed = float_round(invoice.amount_untaxed - delta,
precision_digits=prec)
return {'amount_total': rounded_total,
'amount_untaxed': amount_untaxed}
@staticmethod
def _all_invoice_tax_line_computed(invoice):
""" Check if all taxes have been computed on invoice lines
:return boolean True if all tax were computed
"""
tax_ids = set()
for line in invoice.invoice_line:
# invoice_line_tax_id is a many2many if you wonder about it
for tax in line.invoice_line_tax_id:
if not tax.price_include:
tax_ids.add(tax.id)
computed_tax_ids = [tax.id for tax in invoice.tax_line]
return len(tax_ids) == len(computed_tax_ids)
def _swedish_round_globally(self, cr, uid, invoice,
rounded_total, delta, context=None):
""" Add the diff to the biggest tax line
This ajustment must be done only after all tax are computed
"""
# Here we identify that all taxe lines have been computed
if not self._all_invoice_tax_line_computed(invoice):
return {}
obj_precision = self.pool.get('decimal.precision')
prec = obj_precision.precision_get(cr, uid, 'Account')
inv_tax_obj = self.pool.get('account.invoice.tax')
ajust_line = None
for tax_line in invoice.tax_line:
if not ajust_line or tax_line.amount > ajust_line.amount:
ajust_line = tax_line
if ajust_line:
amount = ajust_line.amount - delta
vals = inv_tax_obj.amount_change(
cr, uid, [ajust_line.id],
amount,
currency_id=invoice.currency_id.id,
company_id=invoice.company_id.id,
date_invoice=invoice.date_invoice)['value']
ajust_line.write({'amount': amount,
'tax_amount': vals['tax_amount']})
amount_tax = float_round(invoice.amount_tax - delta,
precision_digits=prec)
return {'amount_total': rounded_total,
'amount_tax': amount_tax}
return {}
def _compute_swedish_rounding(self, cr, uid, invoice, context=None):
"""
Depending on the method defined, we add an invoice line or adapt the
tax lines to have a rounded total amount on the invoice
:param invoice: invoice browse record
:return dict: updated values for _amount_all
"""
obj_precision = self.pool.get('decimal.precision')
# avoid recusivity
if 'swedish_write' in context:
return {}
company = invoice.company_id
round_method = company.tax_calculation_rounding_method
if round_method[:7] != 'swedish':
return {}
prec = obj_precision.precision_get(cr, uid, 'Account')
rounding_prec = company.tax_calculation_rounding
rounded_total = float_round(invoice.amount_total,
precision_rounding=rounding_prec)
if float_compare(rounded_total, invoice.amount_total,
precision_digits=prec) == 0:
return {}
# To avoid recursivity as we need to write on invoice or
# on objects triggering computation of _amount_all
ctx = context.copy()
ctx['swedish_write'] = True
delta = float_round(invoice.amount_total - rounded_total,
precision_digits=prec)
if round_method == 'swedish_add_invoice_line':
return self._swedish_add_invoice_line(cr, uid, invoice,
rounded_total, delta,
context=ctx)
elif round_method == 'swedish_round_globally':
return self._swedish_round_globally(cr, uid, invoice,
rounded_total, delta,
context=ctx)
return {}
@api.one
@api.depends('invoice_line.price_subtotal', 'tax_line.amount')
def _compute_amount(self):
""" Add swedish rounding computing
Makes sure invoice line for rounding is not computed in totals
"""
super(AccountInvoice, self)._compute_amount()
if self.type in ('out_invoice', 'out_refund'):
if self.global_round_line_id.id:
line = self.global_round_line_id
if line:
self.amount_untaxed -= line.price_subtotal
self.amount_total = self.amount_tax + self.amount_untaxed
swedish_rounding = self._compute_swedish_rounding(self)
if swedish_rounding:
self.amount_total = swedish_rounding['amount_total']
if 'amount_tax' in swedish_rounding:
self.amount_tax = swedish_rounding['amount_tax']
elif 'amount_untaxed' in swedish_rounding:
self.amount_untaxed = (
swedish_rounding['amount_untaxed'])
@api.one
def _get_rounding_invoice_line_id(self):
lines = self.env['account.invoice.line'].search(
[('invoice_id', '=', self.id),
('is_rounding', '=', True)])
self.global_round_line_id = lines
global_round_line_id = fields.Many2one(
'account.invoice.line',
string='Invoice Line for total rounding',
compute=_get_rounding_invoice_line_id,
readonly=True)
amount_untaxed = fields.Float(
digits_compute=dp.get_precision('Account'),
string='Subtotal',
track_visibility='always',
compute=_compute_amount,
store=True)
amount_tax = fields.Float(
compute=_compute_amount,
digits_compute=dp.get_precision('Account'),
string='Tax',
store=True)
amount_total = fields.Float(
compute=_compute_amount,
digits_compute=dp.get_precision('Account'),
string='Total',
store=True)
class AccountInvoiceLine(models.Model):
_inherit = 'account.invoice.line'
is_rounding = fields.Boolean('Rounding Line')
class AccountTax(models.Model):
_inherit = 'account.tax'
def compute_inv(self, cr, uid, taxes, price_unit, quantity,
product=None, partner=None, precision=None):
"""
Using swedish rounding we want to keep standard global precision
so we add precision to do global computation
"""
if taxes and taxes[0].company_id.tax_calculation_rounding_method[:7] \
== 'swedish':
if not precision:
precision = self.pool['decimal.precision'].precision_get(
cr, uid, 'Account')
precision += 5
return super(AccountTax, self).compute_inv(
cr, uid, taxes, price_unit, quantity, product=product,
partner=partner, precision=precision)
def _compute(self, cr, uid, taxes, price_unit, quantity,
product=None, partner=None, precision=None):
"""Using swedish rounding we want to keep standard global precision
so we add precision to do global computation
"""
if taxes and taxes[0].company_id.tax_calculation_rounding_method[:7] \
== 'swedish':
if not precision:
precision = self.pool['decimal.precision'].precision_get(
cr, uid, 'Account')
precision += 5
return super(AccountTax, self)._compute(
cr, uid, taxes, price_unit, quantity, product=product,
partner=partner, precision=precision)
| agpl-3.0 |
UQ-UQx/edx-platform_lti | lms/djangoapps/courseware/features/common.py | 14 | 7888 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from __future__ import absolute_import
import time
from lettuce import world, step
from lettuce.django import django_url
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from student.models import CourseEnrollment
from xmodule.modulestore.django import modulestore
from xmodule.course_module import CourseDescriptor
from courseware.courses import get_course_by_id
from xmodule import seq_module, vertical_module
from logging import getLogger
logger = getLogger(__name__)
@step('I (.*) capturing of screenshots before and after each step$')
def configure_screenshots_for_all_steps(_step, action):
"""
A step to be used in *.feature files. Enables/disables
automatic saving of screenshots before and after each step in a
scenario.
"""
action = action.strip()
if action == 'enable':
world.auto_capture_screenshots = True
elif action == 'disable':
world.auto_capture_screenshots = False
else:
raise ValueError('Parameter `action` should be one of "enable" or "disable".')
@world.absorb
def capture_screenshot_before_after(func):
"""
A decorator that will take a screenshot before and after the applied
function is run. Use this if you do not want to capture screenshots
for each step in a scenario, but rather want to debug a single function.
"""
def inner(*args, **kwargs):
prefix = round(time.time() * 1000)
world.capture_screenshot("{}_{}_{}".format(
prefix, func.func_name, 'before'
))
ret_val = func(*args, **kwargs)
world.capture_screenshot("{}_{}_{}".format(
prefix, func.func_name, 'after'
))
return ret_val
return inner
@step(u'The course "([^"]*)" exists$')
def create_course(_step, course):
# First clear the modulestore so we don't try to recreate
# the same course twice
# This also ensures that the necessary templates are loaded
world.clear_courses()
# Create the course
# We always use the same org and display name,
# but vary the course identifier (e.g. 600x or 191x)
world.scenario_dict['COURSE'] = world.CourseFactory.create(
org='edx',
number=course,
display_name='Test Course'
)
# Add a chapter to the course to contain problems
world.scenario_dict['CHAPTER'] = world.ItemFactory.create(
parent_location=world.scenario_dict['COURSE'].location,
category='chapter',
display_name='Test Chapter',
publish_item=True, # Not needed for direct-only but I'd rather the test didn't know that
)
world.scenario_dict['SECTION'] = world.ItemFactory.create(
parent_location=world.scenario_dict['CHAPTER'].location,
category='sequential',
display_name='Test Section',
publish_item=True,
)
@step(u'I am registered for the course "([^"]*)"$')
def i_am_registered_for_the_course(step, course):
# Create the course
create_course(step, course)
# Create the user
world.create_user('robot', 'test')
user = User.objects.get(username='robot')
# If the user is not already enrolled, enroll the user.
# TODO: change to factory
CourseEnrollment.enroll(user, course_id(course))
world.log_in(username='robot', password='test')
@step(u'The course "([^"]*)" has extra tab "([^"]*)"$')
def add_tab_to_course(_step, course, extra_tab_name):
world.ItemFactory.create(
parent_location=course_location(course),
category="static_tab",
display_name=str(extra_tab_name))
@step(u'I am in a course$')
def go_into_course(step):
step.given('I am registered for the course "6.002x"')
step.given('And I am logged in')
step.given('And I click on View Courseware')
# Do we really use these 3 w/ a different course than is in the scenario_dict? if so, why? If not,
# then get rid of the override arg
def course_id(course_num):
return world.scenario_dict['COURSE'].id.replace(course=course_num)
def course_location(course_num):
return world.scenario_dict['COURSE'].location.replace(course=course_num)
def section_location(course_num):
return world.scenario_dict['SECTION'].location.replace(course=course_num)
def visit_scenario_item(item_key):
"""
Go to the courseware page containing the item stored in `world.scenario_dict`
under the key `item_key`
"""
url = django_url(reverse(
'jump_to',
kwargs={
'course_id': unicode(world.scenario_dict['COURSE'].id),
'location': unicode(world.scenario_dict[item_key].location),
}
))
world.browser.visit(url)
def get_courses():
'''
Returns dict of lists of courses available, keyed by course.org (ie university).
Courses are sorted by course.number.
'''
courses = [c for c in modulestore().get_courses()
if isinstance(c, CourseDescriptor)] # skip error descriptors
courses = sorted(courses, key=lambda course: course.location.course)
return courses
def get_courseware_with_tabs(course_id):
"""
Given a course_id (string), return a courseware array of dictionaries for the
top three levels of navigation. Same as get_courseware() except include
the tabs on the right hand main navigation page.
This hides the appropriate courseware as defined by the hide_from_toc field:
chapter.hide_from_toc
Example:
[{
'chapter_name': 'Overview',
'sections': [{
'clickable_tab_count': 0,
'section_name': 'Welcome',
'tab_classes': []
}, {
'clickable_tab_count': 1,
'section_name': 'System Usage Sequence',
'tab_classes': ['VerticalDescriptor']
}, {
'clickable_tab_count': 0,
'section_name': 'Lab0: Using the tools',
'tab_classes': ['HtmlDescriptor', 'HtmlDescriptor', 'CapaDescriptor']
}, {
'clickable_tab_count': 0,
'section_name': 'Circuit Sandbox',
'tab_classes': []
}]
}, {
'chapter_name': 'Week 1',
'sections': [{
'clickable_tab_count': 4,
'section_name': 'Administrivia and Circuit Elements',
'tab_classes': ['VerticalDescriptor', 'VerticalDescriptor', 'VerticalDescriptor', 'VerticalDescriptor']
}, {
'clickable_tab_count': 0,
'section_name': 'Basic Circuit Analysis',
'tab_classes': ['CapaDescriptor', 'CapaDescriptor', 'CapaDescriptor']
}, {
'clickable_tab_count': 0,
'section_name': 'Resistor Divider',
'tab_classes': []
}, {
'clickable_tab_count': 0,
'section_name': 'Week 1 Tutorials',
'tab_classes': []
}]
}, {
'chapter_name': 'Midterm Exam',
'sections': [{
'clickable_tab_count': 2,
'section_name': 'Midterm Exam',
'tab_classes': ['VerticalDescriptor', 'VerticalDescriptor']
}]
}]
"""
course = get_course_by_id(course_id)
chapters = [chapter for chapter in course.get_children() if not chapter.hide_from_toc]
courseware = [{
'chapter_name': c.display_name_with_default,
'sections': [{
'section_name': s.display_name_with_default,
'clickable_tab_count': len(s.get_children()) if (type(s) == seq_module.SequenceDescriptor) else 0,
'tabs': [{
'children_count': len(t.get_children()) if (type(t) == vertical_module.VerticalDescriptor) else 0,
'class': t.__class__.__name__} for t in s.get_children()
]
} for s in c.get_children() if not s.hide_from_toc]
} for c in chapters]
return courseware
| agpl-3.0 |
rickerc/ceilometer_audit | ceilometer/publisher/test.py | 8 | 1227 | # -*- encoding: utf-8 -*-
#
# Copyright Β© 2013 eNovance
#
# Author: Julien Danjou <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Publish a sample in memory, useful for testing
"""
from ceilometer import publisher
class TestPublisher(publisher.PublisherBase):
"""Publisher used in unit testing."""
def __init__(self, parsed_url):
self.samples = []
self.calls = 0
def publish_samples(self, context, samples):
"""Send a metering message for publishing
:param context: Execution context from the service or RPC call
:param samples: Samples from pipeline after transformation
"""
self.samples.extend(samples)
self.calls += 1
| apache-2.0 |
apaleyes/mxnet | example/speech_recognition/stt_layer_fc.py | 4 | 5311 | import mxnet as mx
from stt_layer_batchnorm import batchnorm
def fc(net,
num_hidden,
act_type,
weight=None,
bias=None,
no_bias=False,
name=None
):
# when weight and bias doesn't have specific name
if weight is None and bias is None:
net = mx.sym.FullyConnected(data=net, num_hidden=num_hidden, no_bias=no_bias, name=name)
# when weight doesn't have specific name but bias has
elif weight is None and bias is not None:
if no_bias:
net = mx.sym.FullyConnected(data=net, num_hidden=num_hidden, no_bias=no_bias, name=name)
else:
net = mx.sym.FullyConnected(data=net, num_hidden=num_hidden, bias=bias, no_bias=no_bias, name=name)
# when bias doesn't have specific name but weight has
elif weight is not None and bias is None:
net = mx.sym.FullyConnected(data=net, num_hidden=num_hidden, weight=weight, no_bias=no_bias, name=name)
# when weight and bias specific name
else:
if no_bias:
net = mx.sym.FullyConnected(data=net, num_hidden=num_hidden, weight=weight, no_bias=no_bias, name=name)
else:
net = mx.sym.FullyConnected(data=net, num_hidden=num_hidden, weight=weight, bias=bias, no_bias=no_bias, name=name)
# activation
if act_type is not None:
net = mx.sym.Activation(data=net, act_type=act_type, name="%s_activation" % name)
return net
def sequence_fc(net,
seq_len,
num_layer,
prefix,
num_hidden_list=[],
act_type_list=[],
is_batchnorm=False,
dropout_rate=0,
):
if num_layer == len(num_hidden_list) == len(act_type_list):
if num_layer > 0:
weight_list = []
bias_list = []
for layer_index in range(num_layer):
weight_list.append(mx.sym.Variable(name='%s_sequence_fc%d_weight' % (prefix, layer_index)))
# if you use batchnorm bias do not have any effect
if not is_batchnorm:
bias_list.append(mx.sym.Variable(name='%s_sequence_fc%d_bias' % (prefix, layer_index)))
# batch normalization parameters
gamma_list = []
beta_list = []
if is_batchnorm:
for layer_index in range(num_layer):
gamma_list.append(mx.sym.Variable(name='%s_sequence_fc%d_gamma' % (prefix, layer_index)))
beta_list.append(mx.sym.Variable(name='%s_sequence_fc%d_beta' % (prefix, layer_index)))
# batch normalization parameters ends
if type(net) is mx.symbol.Symbol:
net = mx.sym.SliceChannel(data=net, num_outputs=seq_len, axis=1, squeeze_axis=1)
elif type(net) is list:
for net_index, one_net in enumerate(net):
if type(one_net) is not mx.symbol.Symbol:
raise Exception('%d th elements of the net should be mx.symbol.Symbol' % net_index)
else:
raise Exception('type of net should be whether mx.symbol.Symbol or list of mx.symbol.Symbol')
hidden_all = []
for seq_index in range(seq_len):
hidden = net[seq_index]
for layer_index in range(num_layer):
if dropout_rate > 0:
hidden = mx.sym.Dropout(data=hidden, p=dropout_rate)
if is_batchnorm:
hidden = fc(net=hidden,
num_hidden=num_hidden_list[layer_index],
act_type=None,
weight=weight_list[layer_index],
no_bias=is_batchnorm,
name="%s_t%d_l%d_fc" % (prefix, seq_index, layer_index)
)
# last layer doesn't have batchnorm
hidden = batchnorm(net=hidden,
gamma=gamma_list[layer_index],
beta=beta_list[layer_index],
name="%s_t%d_l%d_batchnorm" % (prefix, seq_index, layer_index))
hidden = mx.sym.Activation(data=hidden, act_type=act_type_list[layer_index],
name="%s_t%d_l%d_activation" % (prefix, seq_index, layer_index))
else:
hidden = fc(net=hidden,
num_hidden=num_hidden_list[layer_index],
act_type=act_type_list[layer_index],
weight=weight_list[layer_index],
bias=bias_list[layer_index]
)
hidden_all.append(hidden)
net = hidden_all
return net
else:
raise Exception("length doesn't met - num_layer:",
num_layer, ",len(num_hidden_list):",
len(num_hidden_list),
",len(act_type_list):",
len(act_type_list)
)
| apache-2.0 |
zofuthan/edx-platform | cms/djangoapps/contentstore/features/component_settings_editor_helpers.py | 38 | 9441 | # disable missing docstring
# pylint: disable=missing-docstring
from lettuce import world
from nose.tools import assert_equal, assert_in # pylint: disable=no-name-in-module
from terrain.steps import reload_the_page
from common import type_in_codemirror
from selenium.webdriver.common.keys import Keys
@world.absorb
def create_component_instance(step, category, component_type=None, is_advanced=False, advanced_component=None):
"""
Create a new component in a Unit.
Parameters
----------
category: component type (discussion, html, problem, video, advanced)
component_type: for components with multiple templates, the link text in the menu
is_advanced: for problems, is the desired component under the advanced menu?
advanced_component: for advanced components, the related value of policy key 'advanced_modules'
"""
assert_in(category, ['advanced', 'problem', 'html', 'video', 'discussion'])
component_button_css = 'span.large-{}-icon'.format(category.lower())
if category == 'problem':
module_css = 'div.xmodule_CapaModule'
elif category == 'advanced':
module_css = 'div.xmodule_{}Module'.format(advanced_component.title())
else:
module_css = 'div.xmodule_{}Module'.format(category.title())
# Count how many of that module is on the page. Later we will
# assert that one more was added.
# We need to use world.browser.find_by_css instead of world.css_find
# because it's ok if there are currently zero of them.
module_count_before = len(world.browser.find_by_css(module_css))
# Disable the jquery animation for the transition to the menus.
world.disable_jquery_animations()
world.css_click(component_button_css)
if category in ('problem', 'html', 'advanced'):
world.wait_for_invisible(component_button_css)
click_component_from_menu(category, component_type, is_advanced)
expected_count = module_count_before + 1
world.wait_for(
lambda _: len(world.css_find(module_css)) == expected_count,
timeout=20
)
@world.absorb
def click_new_component_button(step, component_button_css):
step.given('I have clicked the new unit button')
world.css_click(component_button_css)
def _click_advanced():
css = 'ul.problem-type-tabs a[href="#tab2"]'
world.css_click(css)
# Wait for the advanced tab items to be displayed
tab2_css = 'div.ui-tabs-panel#tab2'
world.wait_for_visible(tab2_css)
def _find_matching_button(category, component_type):
"""
Find the button with the specified text. There should be one and only one.
"""
# The tab shows buttons for the given category
buttons = world.css_find('div.new-component-{} button'.format(category))
# Find the button whose text matches what you're looking for
matched_buttons = [btn for btn in buttons if btn.text == component_type]
# There should be one and only one
assert_equal(len(matched_buttons), 1)
return matched_buttons[0]
def click_component_from_menu(category, component_type, is_advanced):
"""
Creates a component for a category with more
than one template, i.e. HTML and Problem.
For some problem types, it is necessary to click to
the Advanced tab.
The component_type is the link text, e.g. "Blank Common Problem"
"""
if is_advanced:
# Sometimes this click does not work if you go too fast.
world.retry_on_exception(
_click_advanced,
ignored_exceptions=AssertionError,
)
# Retry this in case the list is empty because you tried too fast.
link = world.retry_on_exception(
lambda: _find_matching_button(category, component_type),
ignored_exceptions=AssertionError
)
# Wait for the link to be clickable. If you go too fast it is not.
world.retry_on_exception(lambda: link.click())
@world.absorb
def edit_component_and_select_settings():
world.edit_component()
world.ensure_settings_visible()
@world.absorb
def ensure_settings_visible():
# Select the 'settings' tab if there is one (it isn't displayed if it is the only option)
settings_button = world.browser.find_by_css('.settings-button')
if len(settings_button) > 0:
world.css_click('.settings-button')
@world.absorb
def edit_component(index=0):
# Verify that the "loading" indication has been hidden.
world.wait_for_loading()
# Verify that the "edit" button is present.
world.wait_for(lambda _driver: world.css_visible('a.edit-button'))
world.css_click('a.edit-button', index)
world.wait_for_ajax_complete()
@world.absorb
def select_editor_tab(tab_name):
editor_tabs = world.browser.find_by_css('.editor-tabs a')
expected_tab_text = tab_name.strip().upper()
matching_tabs = [tab for tab in editor_tabs if tab.text.upper() == expected_tab_text]
assert len(matching_tabs) == 1
tab = matching_tabs[0]
tab.click()
world.wait_for_ajax_complete()
def enter_xml_in_advanced_problem(step, text):
"""
Edits an advanced problem (assumes only on page),
types the provided XML, and saves the component.
"""
world.edit_component()
type_in_codemirror(0, text)
world.save_component()
@world.absorb
def verify_setting_entry(setting, display_name, value, explicitly_set):
"""
Verify the capa module fields are set as expected in the
Advanced Settings editor.
Parameters
----------
setting: the WebDriverElement object found in the browser
display_name: the string expected as the label
html: the expected field value
explicitly_set: True if the value is expected to have been explicitly set
for the problem, rather than derived from the defaults. This is verified
by the existence of a "Clear" button next to the field value.
"""
assert_equal(display_name, setting.find_by_css('.setting-label')[0].html.strip())
# Check if the web object is a list type
# If so, we use a slightly different mechanism for determining its value
if setting.has_class('metadata-list-enum') or setting.has_class('metadata-dict') or setting.has_class('metadata-video-translations'):
list_value = ', '.join(ele.value for ele in setting.find_by_css('.list-settings-item'))
assert_equal(value, list_value)
elif setting.has_class('metadata-videolist-enum'):
list_value = ', '.join(ele.find_by_css('input')[0].value for ele in setting.find_by_css('.videolist-settings-item'))
assert_equal(value, list_value)
else:
assert_equal(value, setting.find_by_css('.setting-input')[0].value)
# VideoList doesn't have clear button
if not setting.has_class('metadata-videolist-enum'):
settingClearButton = setting.find_by_css('.setting-clear')[0]
assert_equal(explicitly_set, settingClearButton.has_class('active'))
assert_equal(not explicitly_set, settingClearButton.has_class('inactive'))
@world.absorb
def verify_all_setting_entries(expected_entries):
settings = world.browser.find_by_css('.wrapper-comp-setting')
assert_equal(len(expected_entries), len(settings))
for (counter, setting) in enumerate(settings):
world.verify_setting_entry(
setting, expected_entries[counter][0],
expected_entries[counter][1], expected_entries[counter][2]
)
@world.absorb
def save_component():
world.css_click("a.action-save")
world.wait_for_ajax_complete()
@world.absorb
def save_component_and_reopen(step):
save_component()
# We have a known issue that modifications are still shown within the edit window after cancel (though)
# they are not persisted. Refresh the browser to make sure the changes WERE persisted after Save.
reload_the_page(step)
edit_component_and_select_settings()
@world.absorb
def cancel_component(step):
world.css_click("a.action-cancel")
# We have a known issue that modifications are still shown within the edit window after cancel (though)
# they are not persisted. Refresh the browser to make sure the changes were not persisted.
reload_the_page(step)
@world.absorb
def revert_setting_entry(label):
get_setting_entry(label).find_by_css('.setting-clear')[0].click()
@world.absorb
def get_setting_entry(label):
def get_setting():
settings = world.css_find('.wrapper-comp-setting')
for setting in settings:
if setting.find_by_css('.setting-label')[0].value == label:
return setting
return None
return world.retry_on_exception(get_setting)
@world.absorb
def get_setting_entry_index(label):
def get_index():
settings = world.css_find('.metadata_edit .wrapper-comp-setting')
for index, setting in enumerate(settings):
if setting.find_by_css('.setting-label')[0].value == label:
return index
return None
return world.retry_on_exception(get_index)
@world.absorb
def set_field_value(index, value):
"""
Set the field to the specified value.
Note: we cannot use css_fill here because the value is not set
until after you move away from that field.
Instead we will find the element, set its value, then hit the Tab key
to get to the next field.
"""
elem = world.css_find('.metadata_edit div.wrapper-comp-setting input.setting-input')[index]
elem.value = value
elem.type(Keys.TAB)
| agpl-3.0 |
danieljabailey/FreeCAD | src/Mod/PartDesign/InitGui.py | 17 | 3817 | # PartDesign gui init module
# (c) 2003 Juergen Riegel
#
# Gathering all the information to start FreeCAD
# This is the second one of three init scripts, the third one
# runs when the gui is up
#***************************************************************************
#* (c) Juergen Riegel ([email protected]) 2002 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
class PartDesignWorkbench ( Workbench ):
"PartDesign workbench object"
Icon = """
/* XPM */
static char * partdesign_xpm[] = {
"16 16 9 1",
" c None",
". c #040006",
"+ c #070F38",
"@ c #002196",
"# c #0030F3",
"$ c #5A4D20",
"% c #858EB2",
"& c #DEB715",
"* c #BFB99D",
" & ........ ",
"&&&$..@@@@@@+...",
"&&&&$@#####@..@.",
"&&&&&$......@#@.",
"&&&&&&@@@+.###@.",
"$&&&&&&@#@.###@.",
".$&&&&&%#@.###@.",
".@*&&&*%#@.###@.",
".@#*&**%#@.###@.",
".@#@%%%.@@.###@.",
".@@@@@@@#@.###@.",
".@#######@.###@.",
".@#######@.##+. ",
".+@@@####@.@.. ",
" ......+++.. ",
" ... "};
"""
MenuText = "Part Design"
ToolTip = "Part Design workbench"
def Initialize(self):
# load the module
try:
from WizardShaft import WizardShaft
except ImportError:
print "Wizard shaft module cannot be loaded"
import PartDesignGui
import PartDesign
try:
import InvoluteGearFeature
except ImportError:
print "Involute gear module cannot be loaded"
def GetClassName(self):
return "PartDesignGui::Workbench"
Gui.addWorkbench(PartDesignWorkbench())
| lgpl-2.1 |
deKupini/erp | openerp/addons/test_impex/tests/test_export.py | 158 | 19124 | # -*- coding: utf-8 -*-
import itertools
import openerp.modules.registry
import openerp
from openerp.tests import common
class CreatorCase(common.TransactionCase):
model_name = False
def __init__(self, *args, **kwargs):
super(CreatorCase, self).__init__(*args, **kwargs)
self.model = None
def setUp(self):
super(CreatorCase, self).setUp()
self.model = self.registry(self.model_name)
def make(self, value):
id = self.model.create(self.cr, openerp.SUPERUSER_ID, {'value': value})
return self.model.browse(self.cr, openerp.SUPERUSER_ID, [id])[0]
def export(self, value, fields=('value',), context=None):
record = self.make(value)
return record._BaseModel__export_rows([f.split('/') for f in fields])
class test_boolean_field(CreatorCase):
model_name = 'export.boolean'
def test_true(self):
self.assertEqual(
self.export(True),
[[u'True']])
def test_false(self):
""" ``False`` value to boolean fields is unique in being exported as a
(unicode) string, not a boolean
"""
self.assertEqual(
self.export(False),
[[u'False']])
class test_integer_field(CreatorCase):
model_name = 'export.integer'
def test_empty(self):
self.assertEqual(self.model.search(self.cr, openerp.SUPERUSER_ID, []), [],
"Test model should have no records")
def test_0(self):
self.assertEqual(
self.export(0),
[[False]])
def test_basic_value(self):
self.assertEqual(
self.export(42),
[[u'42']])
def test_negative(self):
self.assertEqual(
self.export(-32),
[[u'-32']])
def test_huge(self):
self.assertEqual(
self.export(2**31-1),
[[unicode(2**31-1)]])
class test_float_field(CreatorCase):
model_name = 'export.float'
def test_0(self):
self.assertEqual(
self.export(0.0),
[[False]])
def test_epsilon(self):
self.assertEqual(
self.export(0.000000000027),
[[u'2.7e-11']])
def test_negative(self):
self.assertEqual(
self.export(-2.42),
[[u'-2.42']])
def test_positive(self):
self.assertEqual(
self.export(47.36),
[[u'47.36']])
def test_big(self):
self.assertEqual(
self.export(87654321.4678),
[[u'87654321.4678']])
class test_decimal_field(CreatorCase):
model_name = 'export.decimal'
def test_0(self):
self.assertEqual(
self.export(0.0),
[[False]])
def test_epsilon(self):
""" epsilon gets sliced to 0 due to precision
"""
self.assertEqual(
self.export(0.000000000027),
[[False]])
def test_negative(self):
self.assertEqual(
self.export(-2.42),
[[u'-2.42']])
def test_positive(self):
self.assertEqual(
self.export(47.36),
[[u'47.36']])
def test_big(self):
self.assertEqual(
self.export(87654321.4678), [[u'87654321.468']])
class test_string_field(CreatorCase):
model_name = 'export.string.bounded'
def test_empty(self):
self.assertEqual(
self.export(""),
[[False]])
def test_within_bounds(self):
self.assertEqual(
self.export("foobar"),
[[u"foobar"]])
def test_out_of_bounds(self):
self.assertEqual(
self.export("C for Sinking, "
"Java for Drinking, "
"Smalltalk for Thinking. "
"...and Power to the Penguin!"),
[[u"C for Sinking, J"]])
class test_unbound_string_field(CreatorCase):
model_name = 'export.string'
def test_empty(self):
self.assertEqual(
self.export(""),
[[False]])
def test_small(self):
self.assertEqual(
self.export("foobar"),
[[u"foobar"]])
def test_big(self):
self.assertEqual(
self.export("We flew down weekly to meet with IBM, but they "
"thought the way to measure software was the amount "
"of code we wrote, when really the better the "
"software, the fewer lines of code."),
[[u"We flew down weekly to meet with IBM, but they thought the "
u"way to measure software was the amount of code we wrote, "
u"when really the better the software, the fewer lines of "
u"code."]])
class test_text(CreatorCase):
model_name = 'export.text'
def test_empty(self):
self.assertEqual(
self.export(""),
[[False]])
def test_small(self):
self.assertEqual(
self.export("foobar"),
[[u"foobar"]])
def test_big(self):
self.assertEqual(
self.export("So, `bind' is `let' and monadic programming is"
" equivalent to programming in the A-normal form. That"
" is indeed all there is to monads"),
[[u"So, `bind' is `let' and monadic programming is equivalent to"
u" programming in the A-normal form. That is indeed all there"
u" is to monads"]])
class test_date(CreatorCase):
model_name = 'export.date'
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_basic(self):
self.assertEqual(
self.export('2011-11-07'),
[[u'2011-11-07']])
class test_datetime(CreatorCase):
model_name = 'export.datetime'
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_basic(self):
self.assertEqual(
self.export('2011-11-07 21:05:48'),
[[u'2011-11-07 21:05:48']])
def test_tz(self):
""" Export ignores the timezone and always exports to UTC
.. note:: on the other hand, export uses user lang for name_get
"""
# NOTE: ignores user timezone, always exports to UTC
self.assertEqual(
self.export('2011-11-07 21:05:48', context={'tz': 'Pacific/Norfolk'}),
[[u'2011-11-07 21:05:48']])
class test_selection(CreatorCase):
model_name = 'export.selection'
translations_fr = [
("Qux", "toto"),
("Bar", "titi"),
("Foo", "tete"),
]
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_value(self):
""" selections export the *label* for their value
"""
self.assertEqual(
self.export(2),
[[u"Bar"]])
def test_localized_export(self):
self.registry('res.lang').create(self.cr, openerp.SUPERUSER_ID, {
'name': u'FranΓ§ais',
'code': 'fr_FR',
'translatable': True,
'date_format': '%d.%m.%Y',
'decimal_point': ',',
'thousands_sep': ' ',
})
Translations = self.registry('ir.translation')
for source, value in self.translations_fr:
Translations.create(self.cr, openerp.SUPERUSER_ID, {
'name': 'export.selection,value',
'lang': 'fr_FR',
'type': 'selection',
'src': source,
'value': value
})
self.assertEqual(
self.export(2, context={'lang': 'fr_FR'}),
[[u'Bar']])
class test_selection_function(CreatorCase):
model_name = 'export.selection.function'
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_value(self):
# FIXME: selection functions export the *value* itself
self.assertEqual(
self.export(1),
[[1]])
self.assertEqual(
self.export(3),
[[3]])
# fucking hell
self.assertEqual(
self.export(0),
[[False]])
class test_m2o(CreatorCase):
model_name = 'export.many2one'
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_basic(self):
""" Exported value is the name_get of the related object
"""
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
name = dict(self.registry('export.integer').name_get(
self.cr, openerp.SUPERUSER_ID,[integer_id]))[integer_id]
self.assertEqual(
self.export(integer_id),
[[name]])
def test_path(self):
""" Can recursively export fields of m2o via path
"""
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
self.assertEqual(
self.export(integer_id, fields=['value/.id', 'value/value']),
[[unicode(integer_id), u'42']])
def test_external_id(self):
integer_id = self.registry('export.integer').create(
self.cr, openerp.SUPERUSER_ID, {'value': 42})
# Expecting the m2o target model name in the external id,
# not this model's name
external_id = u'__export__.export_integer_%d' % integer_id
self.assertEqual(
self.export(integer_id, fields=['value/id']),
[[external_id]])
class test_o2m(CreatorCase):
model_name = 'export.one2many'
commands = [
(0, False, {'value': 4, 'str': 'record1'}),
(0, False, {'value': 42, 'str': 'record2'}),
(0, False, {'value': 36, 'str': 'record3'}),
(0, False, {'value': 4, 'str': 'record4'}),
(0, False, {'value': 13, 'str': 'record5'}),
]
names = [
u'export.one2many.child:%d' % d['value']
for c, _, d in commands
]
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_single(self):
self.assertEqual(
self.export([(0, False, {'value': 42})]),
# name_get result
[[u'export.one2many.child:42']])
def test_single_subfield(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['value', 'value/value']),
[[u'export.one2many.child:42', u'42']])
def test_integrate_one_in_parent(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['const', 'value/value']),
[[u'4', u'42']])
def test_multiple_records(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/value']),
[
[u'4', u'4'],
[u'', u'42'],
[u'', u'36'],
[u'', u'4'],
[u'', u'13'],
])
def test_multiple_records_name(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value']),
[[
u'4', u','.join(self.names)
]])
def test_multiple_records_id(self):
export = self.export(self.commands, fields=['const', 'value/.id'])
O2M_c = self.registry('export.one2many.child')
ids = O2M_c.browse(self.cr, openerp.SUPERUSER_ID,
O2M_c.search(self.cr, openerp.SUPERUSER_ID, []))
self.assertEqual(
export,
[
['4', str(ids[0].id)],
['', str(ids[1].id)],
['', str(ids[2].id)],
['', str(ids[3].id)],
['', str(ids[4].id)],
])
def test_multiple_records_with_name_before(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value', 'value/value']),
[[ # exports sub-fields of very first o2m
u'4', u','.join(self.names), u'4'
]])
def test_multiple_records_with_name_after(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/value', 'value']),
[ # completely ignores name_get request
[u'4', u'4', ''],
['', u'42', ''],
['', u'36', ''],
['', u'4', ''],
['', u'13', ''],
])
def test_multiple_subfields_neighbour(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/str','value/value']),
[
[u'4', u'record1', u'4'],
['', u'record2', u'42'],
['', u'record3', u'36'],
['', u'record4', u'4'],
['', u'record5', u'13'],
])
def test_multiple_subfields_separated(self):
self.assertEqual(
self.export(self.commands, fields=['value/str', 'const', 'value/value']),
[
[u'record1', u'4', u'4'],
[u'record2', '', u'42'],
[u'record3', '', u'36'],
[u'record4', '', u'4'],
[u'record5', '', u'13'],
])
class test_o2m_multiple(CreatorCase):
model_name = 'export.one2many.multiple'
def make(self, value=None, **values):
if value is not None: values['value'] = value
id = self.model.create(self.cr, openerp.SUPERUSER_ID, values)
return self.model.browse(self.cr, openerp.SUPERUSER_ID, [id])[0]
def export(self, value=None, fields=('child1', 'child2',), context=None, **values):
record = self.make(value, **values)
return record._BaseModel__export_rows([f.split('/') for f in fields])
def test_empty(self):
self.assertEqual(
self.export(child1=False, child2=False),
[[False, False]])
def test_single_per_side(self):
self.assertEqual(
self.export(child1=False, child2=[(0, False, {'value': 42})]),
[[False, u'export.one2many.child.2:42']])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})], child2=False),
[[u'export.one2many.child.1:43', False]])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})],
child2=[(0, False, {'value': 42})]),
[[u'export.one2many.child.1:43', u'export.one2many.child.2:42']])
def test_single_integrate_subfield(self):
fields = ['const', 'child1/value', 'child2/value']
self.assertEqual(
self.export(child1=False, child2=[(0, False, {'value': 42})],
fields=fields),
[[u'36', False, u'42']])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})], child2=False,
fields=fields),
[[u'36', u'43', False]])
self.assertEqual(
self.export(child1=[(0, False, {'value': 43})],
child2=[(0, False, {'value': 42})],
fields=fields),
[[u'36', u'43', u'42']])
def test_multiple(self):
""" With two "concurrent" o2ms, exports the first line combined, then
exports the rows for the first o2m, then the rows for the second o2m.
"""
fields = ['const', 'child1/value', 'child2/value']
child1 = [(0, False, {'value': v, 'str': 'record%.02d' % index})
for index, v in zip(itertools.count(), [4, 42, 36, 4, 13])]
child2 = [(0, False, {'value': v, 'str': 'record%.02d' % index})
for index, v in zip(itertools.count(10), [8, 12, 8, 55, 33, 13])]
self.assertEqual(
self.export(child1=child1, child2=False, fields=fields),
[
[u'36', u'4', False],
['', u'42', ''],
['', u'36', ''],
['', u'4', ''],
['', u'13', ''],
])
self.assertEqual(
self.export(child1=False, child2=child2, fields=fields),
[
[u'36', False, u'8'],
['', '', u'12'],
['', '', u'8'],
['', '', u'55'],
['', '', u'33'],
['', '', u'13'],
])
self.assertEqual(
self.export(child1=child1, child2=child2, fields=fields),
[
[u'36', u'4', u'8'],
['', u'42', ''],
['', u'36', ''],
['', u'4', ''],
['', u'13', ''],
['', '', u'12'],
['', '', u'8'],
['', '', u'55'],
['', '', u'33'],
['', '', u'13'],
])
class test_m2m(CreatorCase):
model_name = 'export.many2many'
commands = [
(0, False, {'value': 4, 'str': 'record000'}),
(0, False, {'value': 42, 'str': 'record001'}),
(0, False, {'value': 36, 'str': 'record010'}),
(0, False, {'value': 4, 'str': 'record011'}),
(0, False, {'value': 13, 'str': 'record100'}),
]
names = [
u'export.many2many.other:%d' % d['value']
for c, _, d in commands
]
def test_empty(self):
self.assertEqual(
self.export(False),
[[False]])
def test_single(self):
self.assertEqual(
self.export([(0, False, {'value': 42})]),
# name_get result
[[u'export.many2many.other:42']])
def test_single_subfield(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['value', 'value/value']),
[[u'export.many2many.other:42', u'42']])
def test_integrate_one_in_parent(self):
self.assertEqual(
self.export([(0, False, {'value': 42})],
fields=['const', 'value/value']),
[[u'4', u'42']])
def test_multiple_records(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value/value']),
[
[u'4', u'4'],
[u'', u'42'],
[u'', u'36'],
[u'', u'4'],
[u'', u'13'],
])
def test_multiple_records_name(self):
self.assertEqual(
self.export(self.commands, fields=['const', 'value']),
[[ # FIXME: hardcoded comma, import uses config.csv_internal_sep
# resolution: remove configurable csv_internal_sep
u'4', u','.join(self.names)
]])
# essentially same as o2m, so boring
class test_function(CreatorCase):
model_name = 'export.function'
def test_value(self):
""" Exports value normally returned by accessing the function field
"""
self.assertEqual(
self.export(42),
[[u'3']])
| agpl-3.0 |
austinvernsonger/metagoofil | hachoir_parser/misc/bplist.py | 84 | 11670 | """
Apple/NeXT Binary Property List (BPLIST) parser.
Also includes a .createXML() function which produces an XML representation of the object.
Note that it will discard unknown objects, nulls and fill values, but should work for most files.
Documents:
- CFBinaryPList.c
http://src.gnu-darwin.org/DarwinSourceArchive/expanded/CF/CF-299/Parsing.subproj/CFBinaryPList.c
- ForFoundationOnly.h (for structure formats)
http://src.gnu-darwin.org/DarwinSourceArchive/expanded/CF/CF-299/Base.subproj/ForFoundationOnly.h
- XML <-> BPList converter
http://scw.us/iPhone/plutil/plutil.pl
Author: Robert Xiao
Created: 2008-09-21
"""
from hachoir_parser import HachoirParser
from hachoir_core.field import (RootSeekableFieldSet, FieldSet, Enum,
Bits, GenericInteger, Float32, Float64, UInt8, UInt64, Bytes, NullBytes, RawBytes, String)
from hachoir_core.endian import BIG_ENDIAN
from hachoir_core.text_handler import displayHandler
from hachoir_core.tools import humanDatetime
from datetime import datetime, timedelta
class BPListTrailer(FieldSet):
def createFields(self):
yield NullBytes(self, "unused", 6)
yield UInt8(self, "offsetIntSize", "Size (in bytes) of offsets in the offset table")
yield UInt8(self, "objectRefSize", "Size (in bytes) of object numbers in object references")
yield UInt64(self, "numObjects", "Number of objects in this file")
yield UInt64(self, "topObject", "Top-level object reference")
yield UInt64(self, "offsetTableOffset", "File offset to the offset table")
def createDescription(self):
return "Binary PList trailer"
class BPListOffsetTable(FieldSet):
def createFields(self):
size = self["../trailer/offsetIntSize"].value*8
for i in range(self["../trailer/numObjects"].value):
yield Bits(self, "offset[]", size)
class BPListSize(FieldSet):
def createFields(self):
yield Bits(self, "size", 4)
if self['size'].value == 0xF:
yield BPListObject(self, "fullsize")
def createValue(self):
if 'fullsize' in self:
return self['fullsize'].value
else:
return self['size'].value
class BPListObjectRef(GenericInteger):
def __init__(self, parent, name, description=None):
size = parent['/trailer/objectRefSize'].value*8
GenericInteger.__init__(self, parent, name, False, size, description)
def getRef(self):
return self.parent['/object[' + str(self.value) + ']']
def createDisplay(self):
return self.getRef().display
def createXML(self, prefix=''):
return self.getRef().createXML(prefix)
class BPListArray(FieldSet):
def __init__(self, parent, name, size, description=None):
FieldSet.__init__(self, parent, name, description=description)
self.numels = size
def createFields(self):
for i in range(self.numels):
yield BPListObjectRef(self, "ref[]")
def createValue(self):
return self.array('ref')
def createDisplay(self):
return '[' + ', '.join([x.display for x in self.value]) + ']'
def createXML(self,prefix=''):
return prefix + '<array>\n' + ''.join([x.createXML(prefix + '\t' ) + '\n' for x in self.value]) + prefix + '</array>'
class BPListDict(FieldSet):
def __init__(self, parent, name, size, description=None):
FieldSet.__init__(self, parent, name, description=description)
self.numels = size
def createFields(self):
for i in range(self.numels):
yield BPListObjectRef(self, "keyref[]")
for i in range(self.numels):
yield BPListObjectRef(self, "valref[]")
def createValue(self):
return zip(self.array('keyref'),self.array('valref'))
def createDisplay(self):
return '{' + ', '.join(['%s: %s'%(k.display,v.display) for k,v in self.value]) + '}'
def createXML(self, prefix=''):
return prefix + '<dict>\n' + ''.join(['%s\t<key>%s</key>\n%s\n'%(prefix,k.getRef().value.encode('utf-8'),v.createXML(prefix + '\t')) for k,v in self.value]) + prefix + '</dict>'
class BPListObject(FieldSet):
def createFields(self):
yield Enum(Bits(self, "marker_type", 4),
{0: "Simple",
1: "Int",
2: "Real",
3: "Date",
4: "Data",
5: "ASCII String",
6: "UTF-16-BE String",
8: "UID",
10: "Array",
13: "Dict",})
markertype = self['marker_type'].value
if markertype == 0:
# Simple (Null)
yield Enum(Bits(self, "value", 4),
{0: "Null",
8: "False",
9: "True",
15: "Fill Byte",})
if self['value'].display == "False":
self.xml=lambda prefix:prefix + "<false/>"
elif self['value'].display == "True":
self.xml=lambda prefix:prefix + "<true/>"
else:
self.xml=lambda prefix:prefix + ""
elif markertype == 1:
# Int
yield Bits(self, "size", 4, "log2 of number of bytes")
size=self['size'].value
# 8-bit (size=0), 16-bit (size=1) and 32-bit (size=2) numbers are unsigned
# 64-bit (size=3) numbers are signed
yield GenericInteger(self, "value", (size>=3), (2**size)*8)
self.xml=lambda prefix:prefix + "<integer>%s</integer>"%self['value'].value
elif markertype == 2:
# Real
yield Bits(self, "size", 4, "log2 of number of bytes")
if self['size'].value == 2: # 2**2 = 4 byte float
yield Float32(self, "value")
elif self['size'].value == 3: # 2**3 = 8 byte float
yield Float64(self, "value")
else:
# FIXME: What is the format of the real?
yield Bits(self, "value", (2**self['size'].value)*8)
self.xml=lambda prefix:prefix + "<real>%s</real>"%self['value'].value
elif markertype == 3:
# Date
yield Bits(self, "extra", 4, "Extra value, should be 3")
# Use a heuristic to determine which epoch to use
def cvt_time(v):
v=timedelta(seconds=v)
epoch2001 = datetime(2001,1,1)
epoch1970 = datetime(1970,1,1)
if (epoch2001 + v - datetime.today()).days > 5*365:
return epoch1970 + v
return epoch2001 + v
yield displayHandler(Float64(self, "value"),lambda x:humanDatetime(cvt_time(x)))
self.xml=lambda prefix:prefix + "<date>%sZ</date>"%(cvt_time(self['value'].value).isoformat())
elif markertype == 4:
# Data
yield BPListSize(self, "size")
if self['size'].value:
yield Bytes(self, "value", self['size'].value)
self.xml=lambda prefix:prefix + "<data>\n%s\n%s</data>"%(self['value'].value.encode('base64').strip(),prefix)
else:
self.xml=lambda prefix:prefix + '<data></data>'
elif markertype == 5:
# ASCII String
yield BPListSize(self, "size")
if self['size'].value:
yield String(self, "value", self['size'].value, charset="ASCII")
self.xml=lambda prefix:prefix + "<string>%s</string>"%(self['value'].value.replace('&','&').encode('iso-8859-1'))
else:
self.xml=lambda prefix:prefix + '<string></string>'
elif markertype == 6:
# UTF-16-BE String
yield BPListSize(self, "size")
if self['size'].value:
yield String(self, "value", self['size'].value*2, charset="UTF-16-BE")
self.xml=lambda prefix:prefix + "<string>%s</string>"%(self['value'].value.replace('&','&').encode('utf-8'))
else:
self.xml=lambda prefix:prefix + '<string></string>'
elif markertype == 8:
# UID
yield Bits(self, "size", 4, "Number of bytes minus 1")
yield GenericInteger(self, "value", False, (self['size'].value + 1)*8)
self.xml=lambda prefix:prefix + "" # no equivalent?
elif markertype == 10:
# Array
yield BPListSize(self, "size")
size = self['size'].value
if size:
yield BPListArray(self, "value", size)
self.xml=lambda prefix:self['value'].createXML(prefix)
elif markertype == 13:
# Dict
yield BPListSize(self, "size")
yield BPListDict(self, "value", self['size'].value)
self.xml=lambda prefix:self['value'].createXML(prefix)
else:
yield Bits(self, "value", 4)
self.xml=lambda prefix:''
def createValue(self):
if 'value' in self:
return self['value'].value
elif self['marker_type'].value in [4,5,6]:
return u''
else:
return None
def createDisplay(self):
if 'value' in self:
return unicode(self['value'].display)
elif self['marker_type'].value in [4,5,6]:
return u''
else:
return None
def createXML(self, prefix=''):
if 'value' in self:
try:
return self.xml(prefix)
except AttributeError:
return ''
return ''
def getFieldType(self):
return '%s<%s>'%(FieldSet.getFieldType(self), self['marker_type'].display)
class BPList(HachoirParser, RootSeekableFieldSet):
endian = BIG_ENDIAN
MAGIC = "bplist00"
PARSER_TAGS = {
"id": "bplist",
"category": "misc",
"file_ext": ("plist",),
"magic": ((MAGIC, 0),),
"min_size": 8 + 32, # bplist00 + 32-byte trailer
"description": "Apple/NeXT Binary Property List",
}
def __init__(self, stream, **args):
RootSeekableFieldSet.__init__(self, None, "root", stream, None, stream.askSize(self))
HachoirParser.__init__(self, stream, **args)
def validate(self):
if self.stream.readBytes(0, len(self.MAGIC)) != self.MAGIC:
return "Invalid magic"
return True
def createFields(self):
yield Bytes(self, "magic", 8, "File magic (bplist00)")
if self.size:
self.seekByte(self.size//8-32, True)
else:
# FIXME: UNTESTED
while True:
try:
self.seekByte(1024)
except:
break
self.seekByte(self.size//8-32)
yield BPListTrailer(self, "trailer")
self.seekByte(self['trailer/offsetTableOffset'].value)
yield BPListOffsetTable(self, "offset_table")
for i in self.array("offset_table/offset"):
if self.current_size > i.value*8:
self.seekByte(i.value)
elif self.current_size < i.value*8:
# try to detect files with gaps or unparsed content
yield RawBytes(self, "padding[]", i.value-self.current_size//8)
yield BPListObject(self, "object[]")
def createXML(self, prefix=''):
return '''<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
''' + self['/object[' + str(self['/trailer/topObject'].value) + ']'].createXML(prefix) + '''
</plist>'''
| gpl-2.0 |
salfab/CouchPotatoServer | libs/sqlalchemy/orm/__init__.py | 18 | 66714 | # orm/__init__.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Functional constructs for ORM configuration.
See the SQLAlchemy object relational tutorial and mapper configuration
documentation for an overview of how this module is used.
"""
from sqlalchemy.orm import exc
from sqlalchemy.orm.mapper import (
Mapper,
_mapper_registry,
class_mapper,
configure_mappers
)
from sqlalchemy.orm.interfaces import (
EXT_CONTINUE,
EXT_STOP,
InstrumentationManager,
MapperExtension,
PropComparator,
SessionExtension,
AttributeExtension,
)
from sqlalchemy.orm.util import (
aliased,
join,
object_mapper,
outerjoin,
polymorphic_union,
with_parent,
)
from sqlalchemy.orm.properties import (
ColumnProperty,
ComparableProperty,
CompositeProperty,
RelationshipProperty,
PropertyLoader,
SynonymProperty,
)
from sqlalchemy.orm import mapper as mapperlib
from sqlalchemy.orm.mapper import reconstructor, validates
from sqlalchemy.orm import strategies
from sqlalchemy.orm.query import AliasOption, Query
from sqlalchemy.sql import util as sql_util
from sqlalchemy.orm.session import Session
from sqlalchemy.orm.session import object_session, sessionmaker, \
make_transient
from sqlalchemy.orm.scoping import ScopedSession
from sqlalchemy import util as sa_util
__all__ = (
'EXT_CONTINUE',
'EXT_STOP',
'InstrumentationManager',
'MapperExtension',
'AttributeExtension',
'PropComparator',
'Query',
'Session',
'aliased',
'backref',
'class_mapper',
'clear_mappers',
'column_property',
'comparable_property',
'compile_mappers',
'configure_mappers',
'composite',
'contains_alias',
'contains_eager',
'create_session',
'defer',
'deferred',
'dynamic_loader',
'eagerload',
'eagerload_all',
'immediateload',
'join',
'joinedload',
'joinedload_all',
'lazyload',
'mapper',
'make_transient',
'noload',
'object_mapper',
'object_session',
'outerjoin',
'polymorphic_union',
'reconstructor',
'relationship',
'relation',
'scoped_session',
'sessionmaker',
'subqueryload',
'subqueryload_all',
'synonym',
'undefer',
'undefer_group',
'validates'
)
def scoped_session(session_factory, scopefunc=None):
"""Provides thread-local or scoped management of :class:`.Session` objects.
This is a front-end function to
:class:`.ScopedSession`::
Session = scoped_session(sessionmaker(autoflush=True))
To instantiate a Session object which is part of the scoped context,
instantiate normally::
session = Session()
Most session methods are available as classmethods from the scoped
session::
Session.commit()
Session.close()
See also: :ref:`unitofwork_contextual`.
:param session_factory: a callable function that produces
:class:`.Session` instances, such as :func:`sessionmaker`.
:param scopefunc: Optional "scope" function which would be
passed to the :class:`.ScopedRegistry`. If None, the
:class:`.ThreadLocalRegistry` is used by default.
:returns: a :class:`.ScopedSession` instance
"""
return ScopedSession(session_factory, scopefunc=scopefunc)
def create_session(bind=None, **kwargs):
"""Create a new :class:`.Session`
with no automation enabled by default.
This function is used primarily for testing. The usual
route to :class:`.Session` creation is via its constructor
or the :func:`.sessionmaker` function.
:param bind: optional, a single Connectable to use for all
database access in the created
:class:`~sqlalchemy.orm.session.Session`.
:param \*\*kwargs: optional, passed through to the
:class:`.Session` constructor.
:returns: an :class:`~sqlalchemy.orm.session.Session` instance
The defaults of create_session() are the opposite of that of
:func:`sessionmaker`; ``autoflush`` and ``expire_on_commit`` are
False, ``autocommit`` is True. In this sense the session acts
more like the "classic" SQLAlchemy 0.3 session with these.
Usage::
>>> from sqlalchemy.orm import create_session
>>> session = create_session()
It is recommended to use :func:`sessionmaker` instead of
create_session().
"""
kwargs.setdefault('autoflush', False)
kwargs.setdefault('autocommit', True)
kwargs.setdefault('expire_on_commit', False)
return Session(bind=bind, **kwargs)
def relationship(argument, secondary=None, **kwargs):
"""Provide a relationship of a primary Mapper to a secondary Mapper.
.. versionchanged:: 0.6
:func:`relationship` is historically known as :func:`relation`.
This corresponds to a parent-child or associative table relationship. The
constructed class is an instance of :class:`.RelationshipProperty`.
A typical :func:`.relationship`, used in a classical mapping::
mapper(Parent, properties={
'children': relationship(Child)
})
Some arguments accepted by :func:`.relationship` optionally accept a
callable function, which when called produces the desired value.
The callable is invoked by the parent :class:`.Mapper` at "mapper initialization"
time, which happens only when mappers are first used, and is assumed
to be after all mappings have been constructed. This can be used
to resolve order-of-declaration and other dependency issues, such as
if ``Child`` is declared below ``Parent`` in the same file::
mapper(Parent, properties={
"children":relationship(lambda: Child,
order_by=lambda: Child.id)
})
When using the :ref:`declarative_toplevel` extension, the Declarative
initializer allows string arguments to be passed to :func:`.relationship`.
These string arguments are converted into callables that evaluate
the string as Python code, using the Declarative
class-registry as a namespace. This allows the lookup of related
classes to be automatic via their string name, and removes the need to import
related classes at all into the local module space::
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Parent(Base):
__tablename__ = 'parent'
id = Column(Integer, primary_key=True)
children = relationship("Child", order_by="Child.id")
A full array of examples and reference documentation regarding
:func:`.relationship` is at :ref:`relationship_config_toplevel`.
:param argument:
a mapped class, or actual :class:`.Mapper` instance, representing the target of
the relationship.
``argument`` may also be passed as a callable function
which is evaluated at mapper initialization time, and may be passed as a
Python-evaluable string when using Declarative.
:param secondary:
for a many-to-many relationship, specifies the intermediary
table, and is an instance of :class:`.Table`. The ``secondary`` keyword
argument should generally only
be used for a table that is not otherwise expressed in any class
mapping, unless this relationship is declared as view only, otherwise
conflicting persistence operations can occur.
``secondary`` may
also be passed as a callable function which is evaluated at
mapper initialization time.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
many-to-one reference should be loaded when replaced, if
not already loaded. Normally, history tracking logic for
simple many-to-ones only needs to be aware of the "new"
value in order to perform a flush. This flag is available
for applications that make use of
:func:`.attributes.get_history` which also need to know
the "previous" value of the attribute.
:param backref:
indicates the string name of a property to be placed on the related
mapper's class that will handle this relationship in the other
direction. The other property will be created automatically
when the mappers are configured. Can also be passed as a
:func:`backref` object to control the configuration of the
new relationship.
:param back_populates:
Takes a string name and has the same meaning as ``backref``,
except the complementing property is **not** created automatically,
and instead must be configured explicitly on the other mapper. The
complementing property should also indicate ``back_populates``
to this relationship to ensure proper functioning.
:param cascade:
a comma-separated list of cascade rules which determines how
Session operations should be "cascaded" from parent to child.
This defaults to ``False``, which means the default cascade
should be used. The default value is ``"save-update, merge"``.
Available cascades are:
* ``save-update`` - cascade the :meth:`.Session.add`
operation. This cascade applies both to future and
past calls to :meth:`~sqlalchemy.orm.session.Session.add`,
meaning new items added to a collection or scalar relationship
get placed into the same session as that of the parent, and
also applies to items which have been removed from this
relationship but are still part of unflushed history.
* ``merge`` - cascade the :meth:`~sqlalchemy.orm.session.Session.merge`
operation
* ``expunge`` - cascade the :meth:`.Session.expunge`
operation
* ``delete`` - cascade the :meth:`.Session.delete`
operation
* ``delete-orphan`` - if an item of the child's type is
detached from its parent, mark it for deletion.
.. versionchanged:: 0.7
This option does not prevent
a new instance of the child object from being persisted
without a parent to start with; to constrain against
that case, ensure the child's foreign key column(s)
is configured as NOT NULL
* ``refresh-expire`` - cascade the :meth:`.Session.expire`
and :meth:`~sqlalchemy.orm.session.Session.refresh` operations
* ``all`` - shorthand for "save-update,merge, refresh-expire,
expunge, delete"
See the section :ref:`unitofwork_cascades` for more background
on configuring cascades.
:param cascade_backrefs=True:
a boolean value indicating if the ``save-update`` cascade should
operate along an assignment event intercepted by a backref.
When set to ``False``,
the attribute managed by this relationship will not cascade
an incoming transient object into the session of a
persistent parent, if the event is received via backref.
That is::
mapper(A, a_table, properties={
'bs':relationship(B, backref="a", cascade_backrefs=False)
})
If an ``A()`` is present in the session, assigning it to
the "a" attribute on a transient ``B()`` will not place
the ``B()`` into the session. To set the flag in the other
direction, i.e. so that ``A().bs.append(B())`` won't add
a transient ``A()`` into the session for a persistent ``B()``::
mapper(A, a_table, properties={
'bs':relationship(B,
backref=backref("a", cascade_backrefs=False)
)
})
See the section :ref:`unitofwork_cascades` for more background
on configuring cascades.
:param collection_class:
a class or callable that returns a new list-holding object. will
be used in place of a plain list for storing elements.
Behavior of this attribute is described in detail at
:ref:`custom_collections`.
:param comparator_factory:
a class which extends :class:`.RelationshipProperty.Comparator` which
provides custom SQL clause generation for comparison operations.
:param doc:
docstring which will be applied to the resulting descriptor.
:param extension:
an :class:`.AttributeExtension` instance, or list of extensions,
which will be prepended to the list of attribute listeners for
the resulting descriptor placed on the class.
**Deprecated.** Please see :class:`.AttributeEvents`.
:param foreign_keys:
a list of columns which are to be used as "foreign key" columns.
Normally, :func:`relationship` uses the :class:`.ForeignKey`
and :class:`.ForeignKeyConstraint` objects present within the
mapped or secondary :class:`.Table` to determine the "foreign" side of
the join condition. This is used to construct SQL clauses in order
to load objects, as well as to "synchronize" values from
primary key columns to referencing foreign key columns.
The ``foreign_keys`` parameter overrides the notion of what's
"foreign" in the table metadata, allowing the specification
of a list of :class:`.Column` objects that should be considered
part of the foreign key.
There are only two use cases for ``foreign_keys`` - one, when it is not
convenient for :class:`.Table` metadata to contain its own foreign key
metadata (which should be almost never, unless reflecting a large amount of
tables from a MySQL MyISAM schema, or a schema that doesn't actually
have foreign keys on it). The other is for extremely
rare and exotic composite foreign key setups where some columns
should artificially not be considered as foreign.
``foreign_keys`` may also be passed as a callable function
which is evaluated at mapper initialization time, and may be passed as a
Python-evaluable string when using Declarative.
:param innerjoin=False:
when ``True``, joined eager loads will use an inner join to join
against related tables instead of an outer join. The purpose
of this option is generally one of performance, as inner joins
generally perform better than outer joins. Another reason can be
the use of ``with_lockmode``, which does not support outer joins.
This flag can be set to ``True`` when the relationship references an
object via many-to-one using local foreign keys that are not nullable,
or when the reference is one-to-one or a collection that is guaranteed
to have one or at least one entry.
:param join_depth:
when non-``None``, an integer value indicating how many levels
deep "eager" loaders should join on a self-referring or cyclical
relationship. The number counts how many times the same Mapper
shall be present in the loading condition along a particular join
branch. When left at its default of ``None``, eager loaders
will stop chaining when they encounter a the same target mapper
which is already higher up in the chain. This option applies
both to joined- and subquery- eager loaders.
:param lazy='select': specifies
how the related items should be loaded. Default value is
``select``. Values include:
* ``select`` - items should be loaded lazily when the property is first
accessed, using a separate SELECT statement, or identity map
fetch for simple many-to-one references.
* ``immediate`` - items should be loaded as the parents are loaded,
using a separate SELECT statement, or identity map fetch for
simple many-to-one references.
.. versionadded:: 0.6.5
* ``joined`` - items should be loaded "eagerly" in the same query as
that of the parent, using a JOIN or LEFT OUTER JOIN. Whether
the join is "outer" or not is determined by the ``innerjoin``
parameter.
* ``subquery`` - items should be loaded "eagerly" within the same
query as that of the parent, using a second SQL statement
which issues a JOIN to a subquery of the original
statement.
* ``noload`` - no loading should occur at any time. This is to
support "write-only" attributes, or attributes which are
populated in some manner specific to the application.
* ``dynamic`` - the attribute will return a pre-configured
:class:`~sqlalchemy.orm.query.Query` object for all read
operations, onto which further filtering operations can be
applied before iterating the results. See
the section :ref:`dynamic_relationship` for more details.
* True - a synonym for 'select'
* False - a synonym for 'joined'
* None - a synonym for 'noload'
Detailed discussion of loader strategies is at :ref:`loading_toplevel`.
:param load_on_pending=False:
Indicates loading behavior for transient or pending parent objects.
When set to ``True``, causes the lazy-loader to
issue a query for a parent object that is not persistent, meaning it has
never been flushed. This may take effect for a pending object when
autoflush is disabled, or for a transient object that has been
"attached" to a :class:`.Session` but is not part of its pending
collection. Attachment of transient objects to the session without
moving to the "pending" state is not a supported behavior at this time.
Note that the load of related objects on a pending or transient object
also does not trigger any attribute change events - no user-defined
events will be emitted for these attributes, and if and when the
object is ultimately flushed, only the user-specific foreign key
attributes will be part of the modified state.
The load_on_pending flag does not improve behavior
when the ORM is used normally - object references should be constructed
at the object level, not at the foreign key level, so that they
are present in an ordinary way before flush() proceeds. This flag
is not not intended for general use.
New in 0.6.5.
:param order_by:
indicates the ordering that should be applied when loading these
items. ``order_by`` is expected to refer to one of the :class:`.Column`
objects to which the target class is mapped, or
the attribute itself bound to the target class which refers
to the column.
``order_by`` may also be passed as a callable function
which is evaluated at mapper initialization time, and may be passed as a
Python-evaluable string when using Declarative.
:param passive_deletes=False:
Indicates loading behavior during delete operations.
A value of True indicates that unloaded child items should not
be loaded during a delete operation on the parent. Normally,
when a parent item is deleted, all child items are loaded so
that they can either be marked as deleted, or have their
foreign key to the parent set to NULL. Marking this flag as
True usually implies an ON DELETE <CASCADE|SET NULL> rule is in
place which will handle updating/deleting child rows on the
database side.
Additionally, setting the flag to the string value 'all' will
disable the "nulling out" of the child foreign keys, when there
is no delete or delete-orphan cascade enabled. This is
typically used when a triggering or error raise scenario is in
place on the database side. Note that the foreign key
attributes on in-session child objects will not be changed
after a flush occurs so this is a very special use-case
setting.
:param passive_updates=True:
Indicates loading and INSERT/UPDATE/DELETE behavior when the
source of a foreign key value changes (i.e. an "on update"
cascade), which are typically the primary key columns of the
source row.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will
handle propagation of an UPDATE from a source column to
dependent rows. Note that with databases which enforce
referential integrity (i.e. PostgreSQL, MySQL with InnoDB tables),
ON UPDATE CASCADE is required for this operation. The
relationship() will update the value of the attribute on related
items which are locally present in the session during a flush.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The relationship() will issue the
appropriate UPDATE statements to the database in response to the
change of a referenced key, and items locally present in the
session during a flush will also be refreshed.
This flag should probably be set to False if primary key changes
are expected and the database in use doesn't support CASCADE
(i.e. SQLite, MySQL MyISAM tables).
Also see the passive_updates flag on ``mapper()``.
A future SQLAlchemy release will provide a "detect" feature for
this flag.
:param post_update:
this indicates that the relationship should be handled by a
second UPDATE statement after an INSERT or before a
DELETE. Currently, it also will issue an UPDATE after the
instance was UPDATEd as well, although this technically should
be improved. This flag is used to handle saving bi-directional
dependencies between two individual rows (i.e. each row
references the other), where it would otherwise be impossible to
INSERT or DELETE both rows fully since one row exists before the
other. Use this flag when a particular mapping arrangement will
incur two rows that are dependent on each other, such as a table
that has a one-to-many relationship to a set of child rows, and
also has a column that references a single child row within that
list (i.e. both tables contain a foreign key to each other). If
a ``flush()`` operation returns an error that a "cyclical
dependency" was detected, this is a cue that you might want to
use ``post_update`` to "break" the cycle.
:param primaryjoin:
a SQL expression that will be used as the primary
join of this child object against the parent object, or in a
many-to-many relationship the join of the primary object to the
association table. By default, this value is computed based on the
foreign key relationships of the parent and child tables (or association
table).
``primaryjoin`` may also be passed as a callable function
which is evaluated at mapper initialization time, and may be passed as a
Python-evaluable string when using Declarative.
:param remote_side:
used for self-referential relationships, indicates the column or
list of columns that form the "remote side" of the relationship.
``remote_side`` may also be passed as a callable function
which is evaluated at mapper initialization time, and may be passed as a
Python-evaluable string when using Declarative.
:param query_class:
a :class:`.Query` subclass that will be used as the base of the
"appender query" returned by a "dynamic" relationship, that
is, a relationship that specifies ``lazy="dynamic"`` or was
otherwise constructed using the :func:`.orm.dynamic_loader`
function.
:param secondaryjoin:
a SQL expression that will be used as the join of
an association table to the child object. By default, this value is
computed based on the foreign key relationships of the association and
child tables.
``secondaryjoin`` may also be passed as a callable function
which is evaluated at mapper initialization time, and may be passed as a
Python-evaluable string when using Declarative.
:param single_parent=(True|False):
when True, installs a validator which will prevent objects
from being associated with more than one parent at a time.
This is used for many-to-one or many-to-many relationships that
should be treated either as one-to-one or one-to-many. Its
usage is optional unless delete-orphan cascade is also
set on this relationship(), in which case its required.
:param uselist=(True|False):
a boolean that indicates if this property should be loaded as a
list or a scalar. In most cases, this value is determined
automatically by ``relationship()``, based on the type and direction
of the relationship - one to many forms a list, many to one
forms a scalar, many to many is a list. If a scalar is desired
where normally a list would be present, such as a bi-directional
one-to-one relationship, set uselist to False.
:param viewonly=False:
when set to True, the relationship is used only for loading objects
within the relationship, and has no effect on the unit-of-work
flush process. Relationships with viewonly can specify any kind of
join conditions to provide additional views of related objects
onto a parent object. Note that the functionality of a viewonly
relationship has its limits - complicated join conditions may
not compile into eager or lazy loaders properly. If this is the
case, use an alternative method.
"""
return RelationshipProperty(argument, secondary=secondary, **kwargs)
def relation(*arg, **kw):
"""A synonym for :func:`relationship`."""
return relationship(*arg, **kw)
def dynamic_loader(argument, **kw):
"""Construct a dynamically-loading mapper property.
This is essentially the same as
using the ``lazy='dynamic'`` argument with :func:`relationship`::
dynamic_loader(SomeClass)
# is the same as
relationship(SomeClass, lazy="dynamic")
See the section :ref:`dynamic_relationship` for more details
on dynamic loading.
"""
kw['lazy'] = 'dynamic'
return relationship(argument, **kw)
def column_property(*cols, **kw):
"""Provide a column-level property for use with a Mapper.
Column-based properties can normally be applied to the mapper's
``properties`` dictionary using the :class:`.Column` element directly.
Use this function when the given column is not directly present within the
mapper's selectable; examples include SQL expressions, functions, and
scalar SELECT queries.
Columns that aren't present in the mapper's selectable won't be persisted
by the mapper and are effectively "read-only" attributes.
:param \*cols:
list of Column objects to be mapped.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
scalar attribute should be loaded when replaced, if not
already loaded. Normally, history tracking logic for
simple non-primary-key scalar values only needs to be
aware of the "new" value in order to perform a flush. This
flag is available for applications that make use of
:func:`.attributes.get_history` or :meth:`.Session.is_modified`
which also need to know
the "previous" value of the attribute.
.. versionadded:: 0.6.6
:param comparator_factory: a class which extends
:class:`.ColumnProperty.Comparator` which provides custom SQL clause
generation for comparison operations.
:param group:
a group name for this property when marked as deferred.
:param deferred:
when True, the column property is "deferred", meaning that
it does not load immediately, and is instead loaded when the
attribute is first accessed on an instance. See also
:func:`~sqlalchemy.orm.deferred`.
:param doc:
optional string that will be applied as the doc on the
class-bound descriptor.
:param expire_on_flush=True:
Disable expiry on flush. A column_property() which refers
to a SQL expression (and not a single table-bound column)
is considered to be a "read only" property; populating it
has no effect on the state of data, and it can only return
database state. For this reason a column_property()'s value
is expired whenever the parent object is involved in a
flush, that is, has any kind of "dirty" state within a flush.
Setting this parameter to ``False`` will have the effect of
leaving any existing value present after the flush proceeds.
Note however that the :class:`.Session` with default expiration
settings still expires
all attributes after a :meth:`.Session.commit` call, however.
.. versionadded:: 0.7.3
:param extension:
an
:class:`.AttributeExtension`
instance, or list of extensions, which will be prepended
to the list of attribute listeners for the resulting
descriptor placed on the class.
**Deprecated.** Please see :class:`.AttributeEvents`.
"""
return ColumnProperty(*cols, **kw)
def composite(class_, *cols, **kwargs):
"""Return a composite column-based property for use with a Mapper.
See the mapping documentation section :ref:`mapper_composite` for a full
usage example.
:param class\_:
The "composite type" class.
:param \*cols:
List of Column objects to be mapped.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
scalar attribute should be loaded when replaced, if not
already loaded. See the same flag on :func:`.column_property`.
.. versionchanged:: 0.7
This flag specifically becomes meaningful
- previously it was a placeholder.
:param group:
A group name for this property when marked as deferred.
:param deferred:
When True, the column property is "deferred", meaning that it does not
load immediately, and is instead loaded when the attribute is first
accessed on an instance. See also :func:`~sqlalchemy.orm.deferred`.
:param comparator_factory: a class which extends
:class:`.CompositeProperty.Comparator` which provides custom SQL clause
generation for comparison operations.
:param doc:
optional string that will be applied as the doc on the
class-bound descriptor.
:param extension:
an :class:`.AttributeExtension` instance,
or list of extensions, which will be prepended to the list of
attribute listeners for the resulting descriptor placed on the class.
**Deprecated.** Please see :class:`.AttributeEvents`.
"""
return CompositeProperty(class_, *cols, **kwargs)
def backref(name, **kwargs):
"""Create a back reference with explicit keyword arguments, which are the same
arguments one can send to :func:`relationship`.
Used with the ``backref`` keyword argument to :func:`relationship` in
place of a string argument, e.g.::
'items':relationship(SomeItem, backref=backref('parent', lazy='subquery'))
"""
return (name, kwargs)
def deferred(*columns, **kwargs):
"""Return a :class:`.DeferredColumnProperty`, which indicates this
object attributes should only be loaded from its corresponding
table column when first accessed.
Used with the "properties" dictionary sent to :func:`mapper`.
See also:
:ref:`deferred`
"""
return ColumnProperty(deferred=True, *columns, **kwargs)
def mapper(class_, local_table=None, *args, **params):
"""Return a new :class:`~.Mapper` object.
This function is typically used behind the scenes
via the Declarative extension. When using Declarative,
many of the usual :func:`.mapper` arguments are handled
by the Declarative extension itself, including ``class_``,
``local_table``, ``properties``, and ``inherits``.
Other options are passed to :func:`.mapper` using
the ``__mapper_args__`` class variable::
class MyClass(Base):
__tablename__ = 'my_table'
id = Column(Integer, primary_key=True)
type = Column(String(50))
alt = Column("some_alt", Integer)
__mapper_args__ = {
'polymorphic_on' : type
}
Explicit use of :func:`.mapper`
is often referred to as *classical mapping*. The above
declarative example is equivalent in classical form to::
my_table = Table("my_table", metadata,
Column('id', Integer, primary_key=True),
Column('type', String(50)),
Column("some_alt", Integer)
)
class MyClass(object):
pass
mapper(MyClass, my_table,
polymorphic_on=my_table.c.type,
properties={
'alt':my_table.c.some_alt
})
See also:
:ref:`classical_mapping` - discussion of direct usage of
:func:`.mapper`
:param class\_: The class to be mapped. When using Declarative,
this argument is automatically passed as the declared class
itself.
:param local_table: The :class:`.Table` or other selectable
to which the class is mapped. May be ``None`` if
this mapper inherits from another mapper using single-table
inheritance. When using Declarative, this argument is
automatically passed by the extension, based on what
is configured via the ``__table__`` argument or via the :class:`.Table`
produced as a result of the ``__tablename__`` and :class:`.Column`
arguments present.
:param always_refresh: If True, all query operations for this mapped
class will overwrite all data within object instances that already
exist within the session, erasing any in-memory changes with
whatever information was loaded from the database. Usage of this
flag is highly discouraged; as an alternative, see the method
:meth:`.Query.populate_existing`.
:param allow_null_pks: This flag is deprecated - this is stated as
allow_partial_pks which defaults to True.
:param allow_partial_pks: Defaults to True. Indicates that a
composite primary key with some NULL values should be considered as
possibly existing within the database. This affects whether a
mapper will assign an incoming row to an existing identity, as well
as if :meth:`.Session.merge` will check the database first for a
particular primary key value. A "partial primary key" can occur if
one has mapped to an OUTER JOIN, for example.
:param batch: Defaults to ``True``, indicating that save operations
of multiple entities can be batched together for efficiency.
Setting to False indicates
that an instance will be fully saved before saving the next
instance. This is used in the extremely rare case that a
:class:`.MapperEvents` listener requires being called
in between individual row persistence operations.
:param column_prefix: A string which will be prepended
to the mapped attribute name when :class:`.Column`
objects are automatically assigned as attributes to the
mapped class. Does not affect explicitly specified
column-based properties.
See the section :ref:`column_prefix` for an example.
:param concrete: If True, indicates this mapper should use concrete
table inheritance with its parent mapper.
See the section :ref:`concrete_inheritance` for an example.
:param exclude_properties: A list or set of string column names to
be excluded from mapping.
See :ref:`include_exclude_cols` for an example.
:param extension: A :class:`.MapperExtension` instance or
list of :class:`.MapperExtension`
instances which will be applied to all operations by this
:class:`.Mapper`. **Deprecated.** Please see :class:`.MapperEvents`.
:param include_properties: An inclusive list or set of string column
names to map.
See :ref:`include_exclude_cols` for an example.
:param inherits: A mapped class or the corresponding :class:`.Mapper`
of one indicating a superclass to which this :class:`.Mapper`
should *inherit* from. The mapped class here must be a subclass of the
other mapper's class. When using Declarative, this argument
is passed automatically as a result of the natural class
hierarchy of the declared classes.
See also:
:ref:`inheritance_toplevel`
:param inherit_condition: For joined table inheritance, a SQL
expression which will
define how the two tables are joined; defaults to a natural join
between the two tables.
:param inherit_foreign_keys: When ``inherit_condition`` is used and the
columns present are missing a :class:`.ForeignKey` configuration,
this parameter can be used to specify which columns are "foreign".
In most cases can be left as ``None``.
:param non_primary: Specify that this :class:`.Mapper` is in addition
to the "primary" mapper, that is, the one used for persistence.
The :class:`.Mapper` created here may be used for ad-hoc
mapping of the class to an alternate selectable, for loading
only.
The ``non_primary`` feature is rarely needed with modern
usage.
:param order_by: A single :class:`.Column` or list of :class:`.Column`
objects for which selection operations should use as the default
ordering for entities. By default mappers have no pre-defined
ordering.
:param passive_updates: Indicates UPDATE behavior of foreign key
columns when a primary key column changes on a joined-table inheritance
mapping. Defaults to ``True``.
When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will handle
propagation of an UPDATE from a source column to dependent columns
on joined-table rows.
When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The :class:`.Mapper` here will
emit an UPDATE statement for the dependent columns during a
primary key change.
See also:
:ref:`passive_updates` - description of a similar feature as
used with :func:`.relationship`
:param polymorphic_on: Specifies the column, attribute, or
SQL expression used to determine the target class for an
incoming row, when inheriting classes are present.
This value is commonly a :class:`.Column` object that's
present in the mapped :class:`.Table`::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":discriminator,
"polymorphic_identity":"employee"
}
It may also be specified
as a SQL expression, as in this example where we
use the :func:`.case` construct to provide a conditional
approach::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
__mapper_args__ = {
"polymorphic_on":case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee"),
"polymorphic_identity":"employee"
}
It may also refer to any attribute
configured with :func:`.column_property`, or to the
string name of one::
class Employee(Base):
__tablename__ = 'employee'
id = Column(Integer, primary_key=True)
discriminator = Column(String(50))
employee_type = column_property(
case([
(discriminator == "EN", "engineer"),
(discriminator == "MA", "manager"),
], else_="employee")
)
__mapper_args__ = {
"polymorphic_on":employee_type,
"polymorphic_identity":"employee"
}
.. versionchanged:: 0.7.4
``polymorphic_on`` may be specified as a SQL expression,
or refer to any attribute configured with
:func:`.column_property`, or to the string name of one.
When setting ``polymorphic_on`` to reference an
attribute or expression that's not present in the
locally mapped :class:`.Table`, yet the value
of the discriminator should be persisted to the database,
the value of the
discriminator is not automatically set on new
instances; this must be handled by the user,
either through manual means or via event listeners.
A typical approach to establishing such a listener
looks like::
from sqlalchemy import event
from sqlalchemy.orm import object_mapper
@event.listens_for(Employee, "init", propagate=True)
def set_identity(instance, *arg, **kw):
mapper = object_mapper(instance)
instance.discriminator = mapper.polymorphic_identity
Where above, we assign the value of ``polymorphic_identity``
for the mapped class to the ``discriminator`` attribute,
thus persisting the value to the ``discriminator`` column
in the database.
See also:
:ref:`inheritance_toplevel`
:param polymorphic_identity: Specifies the value which
identifies this particular class as returned by the
column expression referred to by the ``polymorphic_on``
setting. As rows are received, the value corresponding
to the ``polymorphic_on`` column expression is compared
to this value, indicating which subclass should
be used for the newly reconstructed object.
:param properties: A dictionary mapping the string names of object
attributes to :class:`.MapperProperty` instances, which define the
persistence behavior of that attribute. Note that :class:`.Column`
objects present in
the mapped :class:`.Table` are automatically placed into
``ColumnProperty`` instances upon mapping, unless overridden.
When using Declarative, this argument is passed automatically,
based on all those :class:`.MapperProperty` instances declared
in the declared class body.
:param primary_key: A list of :class:`.Column` objects which define the
primary key to be used against this mapper's selectable unit.
This is normally simply the primary key of the ``local_table``, but
can be overridden here.
:param version_id_col: A :class:`.Column`
that will be used to keep a running version id of mapped entities
in the database. This is used during save operations to ensure that
no other thread or process has updated the instance during the
lifetime of the entity, else a :class:`~sqlalchemy.orm.exc.StaleDataError`
exception is
thrown. By default the column must be of :class:`.Integer` type,
unless ``version_id_generator`` specifies a new generation
algorithm.
:param version_id_generator: A callable which defines the algorithm
used to generate new version ids. Defaults to an integer
generator. Can be replaced with one that generates timestamps,
uuids, etc. e.g.::
import uuid
class MyClass(Base):
__tablename__ = 'mytable'
id = Column(Integer, primary_key=True)
version_uuid = Column(String(32))
__mapper_args__ = {
'version_id_col':version_uuid,
'version_id_generator':lambda version:uuid.uuid4().hex
}
The callable receives the current version identifier as its
single argument.
:param with_polymorphic: A tuple in the form ``(<classes>,
<selectable>)`` indicating the default style of "polymorphic"
loading, that is, which tables are queried at once. <classes> is
any single or list of mappers and/or classes indicating the
inherited classes that should be loaded at once. The special value
``'*'`` may be used to indicate all descending classes should be
loaded immediately. The second tuple argument <selectable>
indicates a selectable that will be used to query for multiple
classes.
See also:
:ref:`concrete_inheritance` - typically uses ``with_polymorphic``
to specify a UNION statement to select from.
:ref:`with_polymorphic` - usage example of the related
:meth:`.Query.with_polymorphic` method
"""
return Mapper(class_, local_table, *args, **params)
def synonym(name, map_column=False, descriptor=None,
comparator_factory=None, doc=None):
"""Denote an attribute name as a synonym to a mapped property.
.. versionchanged:: 0.7
:func:`.synonym` is superseded by the :mod:`~sqlalchemy.ext.hybrid`
extension. See the documentation for hybrids
at :ref:`hybrids_toplevel`.
Used with the ``properties`` dictionary sent to
:func:`~sqlalchemy.orm.mapper`::
class MyClass(object):
def _get_status(self):
return self._status
def _set_status(self, value):
self._status = value
status = property(_get_status, _set_status)
mapper(MyClass, sometable, properties={
"status":synonym("_status", map_column=True)
})
Above, the ``status`` attribute of MyClass will produce
expression behavior against the table column named ``status``,
using the Python attribute ``_status`` on the mapped class
to represent the underlying value.
:param name: the name of the existing mapped property, which can be
any other ``MapperProperty`` including column-based properties and
relationships.
:param map_column: if ``True``, an additional ``ColumnProperty`` is created
on the mapper automatically, using the synonym's name as the keyname of
the property, and the keyname of this ``synonym()`` as the name of the
column to map.
"""
return SynonymProperty(name, map_column=map_column,
descriptor=descriptor,
comparator_factory=comparator_factory,
doc=doc)
def comparable_property(comparator_factory, descriptor=None):
"""Provides a method of applying a :class:`.PropComparator`
to any Python descriptor attribute.
.. versionchanged:: 0.7
:func:`.comparable_property` is superseded by
the :mod:`~sqlalchemy.ext.hybrid` extension. See the example
at :ref:`hybrid_custom_comparators`.
Allows any Python descriptor to behave like a SQL-enabled
attribute when used at the class level in queries, allowing
redefinition of expression operator behavior.
In the example below we redefine :meth:`.PropComparator.operate`
to wrap both sides of an expression in ``func.lower()`` to produce
case-insensitive comparison::
from sqlalchemy.orm import comparable_property
from sqlalchemy.orm.interfaces import PropComparator
from sqlalchemy.sql import func
from sqlalchemy import Integer, String, Column
from sqlalchemy.ext.declarative import declarative_base
class CaseInsensitiveComparator(PropComparator):
def __clause_element__(self):
return self.prop
def operate(self, op, other):
return op(
func.lower(self.__clause_element__()),
func.lower(other)
)
Base = declarative_base()
class SearchWord(Base):
__tablename__ = 'search_word'
id = Column(Integer, primary_key=True)
word = Column(String)
word_insensitive = comparable_property(lambda prop, mapper:
CaseInsensitiveComparator(mapper.c.word, mapper)
)
A mapping like the above allows the ``word_insensitive`` attribute
to render an expression like::
>>> print SearchWord.word_insensitive == "Trucks"
lower(search_word.word) = lower(:lower_1)
:param comparator_factory:
A PropComparator subclass or factory that defines operator behavior
for this property.
:param descriptor:
Optional when used in a ``properties={}`` declaration. The Python
descriptor or property to layer comparison behavior on top of.
The like-named descriptor will be automatically retrieved from the
mapped class if left blank in a ``properties`` declaration.
"""
return ComparableProperty(comparator_factory, descriptor)
@sa_util.deprecated("0.7", message=":func:`.compile_mappers` "
"is renamed to :func:`.configure_mappers`")
def compile_mappers():
"""Initialize the inter-mapper relationships of all mappers that have been defined."""
configure_mappers()
def clear_mappers():
"""Remove all mappers from all classes.
This function removes all instrumentation from classes and disposes
of their associated mappers. Once called, the classes are unmapped
and can be later re-mapped with new mappers.
:func:`.clear_mappers` is *not* for normal use, as there is literally no
valid usage for it outside of very specific testing scenarios. Normally,
mappers are permanent structural components of user-defined classes, and
are never discarded independently of their class. If a mapped class itself
is garbage collected, its mapper is automatically disposed of as well. As
such, :func:`.clear_mappers` is only for usage in test suites that re-use
the same classes with different mappings, which is itself an extremely rare
use case - the only such use case is in fact SQLAlchemy's own test suite,
and possibly the test suites of other ORM extension libraries which
intend to test various combinations of mapper construction upon a fixed
set of classes.
"""
mapperlib._COMPILE_MUTEX.acquire()
try:
while _mapper_registry:
try:
# can't even reliably call list(weakdict) in jython
mapper, b = _mapper_registry.popitem()
mapper.dispose()
except KeyError:
pass
finally:
mapperlib._COMPILE_MUTEX.release()
def joinedload(*keys, **kw):
"""Return a ``MapperOption`` that will convert the property of the given
name or series of mapped attributes into an joined eager load.
.. versionchanged:: 0.6beta3
This function is known as :func:`eagerload` in all versions
of SQLAlchemy prior to version 0.6beta3, including the 0.5 and 0.4
series. :func:`eagerload` will remain available for the foreseeable
future in order to enable cross-compatibility.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
examples::
# joined-load the "orders" collection on "User"
query(User).options(joinedload(User.orders))
# joined-load the "keywords" collection on each "Item",
# but not the "items" collection on "Order" - those
# remain lazily loaded.
query(Order).options(joinedload(Order.items, Item.keywords))
# to joined-load across both, use joinedload_all()
query(Order).options(joinedload_all(Order.items, Item.keywords))
# set the default strategy to be 'joined'
query(Order).options(joinedload('*'))
:func:`joinedload` also accepts a keyword argument `innerjoin=True` which
indicates using an inner join instead of an outer::
query(Order).options(joinedload(Order.user, innerjoin=True))
.. note::
The join created by :func:`joinedload` is anonymously aliased such that
it **does not affect the query results**. An :meth:`.Query.order_by`
or :meth:`.Query.filter` call **cannot** reference these aliased
tables - so-called "user space" joins are constructed using
:meth:`.Query.join`. The rationale for this is that :func:`joinedload` is only
applied in order to affect how related objects or collections are loaded
as an optimizing detail - it can be added or removed with no impact
on actual results. See the section :ref:`zen_of_eager_loading` for
a detailed description of how this is used, including how to use a single
explicit JOIN for filtering/ordering and eager loading simultaneously.
See also: :func:`subqueryload`, :func:`lazyload`
"""
innerjoin = kw.pop('innerjoin', None)
if innerjoin is not None:
return (
strategies.EagerLazyOption(keys, lazy='joined'),
strategies.EagerJoinOption(keys, innerjoin)
)
else:
return strategies.EagerLazyOption(keys, lazy='joined')
def joinedload_all(*keys, **kw):
"""Return a ``MapperOption`` that will convert all properties along the
given dot-separated path or series of mapped attributes
into an joined eager load.
.. versionchanged:: 0.6beta3
This function is known as :func:`eagerload_all` in all versions
of SQLAlchemy prior to version 0.6beta3, including the 0.5 and 0.4
series. :func:`eagerload_all` will remain available for the
foreseeable future in order to enable cross-compatibility.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
For example::
query.options(joinedload_all('orders.items.keywords'))...
will set all of ``orders``, ``orders.items``, and ``orders.items.keywords`` to
load in one joined eager load.
Individual descriptors are accepted as arguments as well::
query.options(joinedload_all(User.orders, Order.items, Item.keywords))
The keyword arguments accept a flag `innerjoin=True|False` which will
override the value of the `innerjoin` flag specified on the
relationship().
See also: :func:`subqueryload_all`, :func:`lazyload`
"""
innerjoin = kw.pop('innerjoin', None)
if innerjoin is not None:
return (
strategies.EagerLazyOption(keys, lazy='joined', chained=True),
strategies.EagerJoinOption(keys, innerjoin, chained=True)
)
else:
return strategies.EagerLazyOption(keys, lazy='joined', chained=True)
def eagerload(*args, **kwargs):
"""A synonym for :func:`joinedload()`."""
return joinedload(*args, **kwargs)
def eagerload_all(*args, **kwargs):
"""A synonym for :func:`joinedload_all()`"""
return joinedload_all(*args, **kwargs)
def subqueryload(*keys):
"""Return a ``MapperOption`` that will convert the property
of the given name or series of mapped attributes
into an subquery eager load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
examples::
# subquery-load the "orders" collection on "User"
query(User).options(subqueryload(User.orders))
# subquery-load the "keywords" collection on each "Item",
# but not the "items" collection on "Order" - those
# remain lazily loaded.
query(Order).options(subqueryload(Order.items, Item.keywords))
# to subquery-load across both, use subqueryload_all()
query(Order).options(subqueryload_all(Order.items, Item.keywords))
# set the default strategy to be 'subquery'
query(Order).options(subqueryload('*'))
See also: :func:`joinedload`, :func:`lazyload`
"""
return strategies.EagerLazyOption(keys, lazy="subquery")
def subqueryload_all(*keys):
"""Return a ``MapperOption`` that will convert all properties along the
given dot-separated path or series of mapped attributes
into a subquery eager load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
For example::
query.options(subqueryload_all('orders.items.keywords'))...
will set all of ``orders``, ``orders.items``, and ``orders.items.keywords`` to
load in one subquery eager load.
Individual descriptors are accepted as arguments as well::
query.options(subqueryload_all(User.orders, Order.items,
Item.keywords))
See also: :func:`joinedload_all`, :func:`lazyload`, :func:`immediateload`
"""
return strategies.EagerLazyOption(keys, lazy="subquery", chained=True)
def lazyload(*keys):
"""Return a ``MapperOption`` that will convert the property of the given
name or series of mapped attributes into a lazy load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
See also: :func:`eagerload`, :func:`subqueryload`, :func:`immediateload`
"""
return strategies.EagerLazyOption(keys, lazy=True)
def lazyload_all(*keys):
"""Return a ``MapperOption`` that will convert all the properties
along the given dot-separated path or series of mapped attributes
into a lazy load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
See also: :func:`eagerload`, :func:`subqueryload`, :func:`immediateload`
"""
return strategies.EagerLazyOption(keys, lazy=True, chained=True)
def noload(*keys):
"""Return a ``MapperOption`` that will convert the property of the
given name or series of mapped attributes into a non-load.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
See also: :func:`lazyload`, :func:`eagerload`,
:func:`subqueryload`, :func:`immediateload`
"""
return strategies.EagerLazyOption(keys, lazy=None)
def immediateload(*keys):
"""Return a ``MapperOption`` that will convert the property of the given
name or series of mapped attributes into an immediate load.
The "immediate" load means the attribute will be fetched
with a separate SELECT statement per parent in the
same way as lazy loading - except the loader is guaranteed
to be called at load time before the parent object
is returned in the result.
The normal behavior of lazy loading applies - if
the relationship is a simple many-to-one, and the child
object is already present in the :class:`.Session`,
no SELECT statement will be emitted.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
See also: :func:`lazyload`, :func:`eagerload`, :func:`subqueryload`
.. versionadded:: 0.6.5
"""
return strategies.EagerLazyOption(keys, lazy='immediate')
def contains_alias(alias):
"""Return a :class:`.MapperOption` that will indicate to the query that
the main table has been aliased.
This is used in the very rare case that :func:`.contains_eager`
is being used in conjunction with a user-defined SELECT
statement that aliases the parent table. E.g.::
# define an aliased UNION called 'ulist'
statement = users.select(users.c.user_id==7).\\
union(users.select(users.c.user_id>7)).\\
alias('ulist')
# add on an eager load of "addresses"
statement = statement.outerjoin(addresses).\\
select().apply_labels()
# create query, indicating "ulist" will be an
# alias for the main table, "addresses"
# property should be eager loaded
query = session.query(User).options(
contains_alias('ulist'),
contains_eager('addresses'))
# then get results via the statement
results = query.from_statement(statement).all()
:param alias: is the string name of an alias, or a
:class:`~.sql.expression.Alias` object representing
the alias.
"""
return AliasOption(alias)
def contains_eager(*keys, **kwargs):
"""Return a ``MapperOption`` that will indicate to the query that
the given attribute should be eagerly loaded from columns currently
in the query.
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
The option is used in conjunction with an explicit join that loads
the desired rows, i.e.::
sess.query(Order).\\
join(Order.user).\\
options(contains_eager(Order.user))
The above query would join from the ``Order`` entity to its related
``User`` entity, and the returned ``Order`` objects would have the
``Order.user`` attribute pre-populated.
:func:`contains_eager` also accepts an `alias` argument, which is the
string name of an alias, an :func:`~sqlalchemy.sql.expression.alias`
construct, or an :func:`~sqlalchemy.orm.aliased` construct. Use this when
the eagerly-loaded rows are to come from an aliased table::
user_alias = aliased(User)
sess.query(Order).\\
join((user_alias, Order.user)).\\
options(contains_eager(Order.user, alias=user_alias))
See also :func:`eagerload` for the "automatic" version of this
functionality.
For additional examples of :func:`contains_eager` see
:ref:`contains_eager`.
"""
alias = kwargs.pop('alias', None)
if kwargs:
raise exceptions.ArgumentError('Invalid kwargs for contains_eag'
'er: %r' % kwargs.keys())
return strategies.EagerLazyOption(keys, lazy='joined',
propagate_to_loaders=False, chained=True), \
strategies.LoadEagerFromAliasOption(keys, alias=alias, chained=True)
def defer(*key):
"""Return a :class:`.MapperOption` that will convert the column property
of the given name into a deferred load.
Used with :meth:`.Query.options`.
e.g.::
from sqlalchemy.orm import defer
query(MyClass).options(defer("attribute_one"),
defer("attribute_two"))
A class bound descriptor is also accepted::
query(MyClass).options(
defer(MyClass.attribute_one),
defer(MyClass.attribute_two))
A "path" can be specified onto a related or collection object using a
dotted name. The :func:`.orm.defer` option will be applied to that object
when loaded::
query(MyClass).options(
defer("related.attribute_one"),
defer("related.attribute_two"))
To specify a path via class, send multiple arguments::
query(MyClass).options(
defer(MyClass.related, MyOtherClass.attribute_one),
defer(MyClass.related, MyOtherClass.attribute_two))
See also:
:ref:`deferred`
:param \*key: A key representing an individual path. Multiple entries
are accepted to allow a multiple-token path for a single target, not
multiple targets.
"""
return strategies.DeferredOption(key, defer=True)
def undefer(*key):
"""Return a :class:`.MapperOption` that will convert the column property
of the given name into a non-deferred (regular column) load.
Used with :meth:`.Query.options`.
e.g.::
from sqlalchemy.orm import undefer
query(MyClass).options(undefer("attribute_one"),
undefer("attribute_two"))
A class bound descriptor is also accepted::
query(MyClass).options(
undefer(MyClass.attribute_one),
undefer(MyClass.attribute_two))
A "path" can be specified onto a related or collection object using a
dotted name. The :func:`.orm.undefer` option will be applied to that
object when loaded::
query(MyClass).options(
undefer("related.attribute_one"),
undefer("related.attribute_two"))
To specify a path via class, send multiple arguments::
query(MyClass).options(
undefer(MyClass.related, MyOtherClass.attribute_one),
undefer(MyClass.related, MyOtherClass.attribute_two))
See also:
:func:`.orm.undefer_group` as a means to "undefer" a group
of attributes at once.
:ref:`deferred`
:param \*key: A key representing an individual path. Multiple entries
are accepted to allow a multiple-token path for a single target, not
multiple targets.
"""
return strategies.DeferredOption(key, defer=False)
def undefer_group(name):
"""Return a :class:`.MapperOption` that will convert the given group of deferred
column properties into a non-deferred (regular column) load.
Used with :meth:`.Query.options`.
e.g.::
query(MyClass).options(undefer("group_one"))
See also:
:ref:`deferred`
:param name: String name of the deferred group. This name is
established using the "group" name to the :func:`.orm.deferred`
configurational function.
"""
return strategies.UndeferGroupOption(name)
from sqlalchemy import util as _sa_util
_sa_util.importlater.resolve_all()
| gpl-3.0 |
petosegan/scikit-learn | sklearn/metrics/__init__.py | 214 | 3440 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
]
| bsd-3-clause |
NiklasRosenstein/pydoc-markdown | pydoc-markdown/src/test/test_processors/test_sphinx.py | 1 | 1153 |
from . import assert_processor_result
from pydoc_markdown.contrib.processors.sphinx import SphinxProcessor
def test_sphinx_processor(processor=None):
assert_processor_result(processor or SphinxProcessor(),
'''
:param s: A string.
:param b: An int.
:return: Something funny.
''',
'''
**Arguments**:
- `s`: A string.
- `b`: An int.
**Returns**:
Something funny.
''')
# check code blocks indentation
assert_processor_result(processor or SphinxProcessor(),
'''
Code example:
```
with a() as b:
b()
```
Implicit block:
c()
A longer one:
d()
with e() as f:
f()
''',
'''
Code example:
```
with a() as b:
b()
```
Implicit block:
c()
A longer one:
d()
with e() as f:
f()
''')
assert_processor_result(processor or SphinxProcessor(),
'''
:param foo: A foo value
:type foo: str
:type bar: int
:param bar: A bar value
:returns: Some eggs from foo and bar
:rtype: str
''',
'''
**Arguments**:
- `foo` (`str`): A foo value
- `bar` (`int`): A bar value
**Returns**:
`str`: Some eggs from foo and bar
''')
| mit |
openprocurement/reports | reports/tests/invoices_tests.py | 1 | 5864 | import unittest
from copy import copy
from reports.tests.base import BaseInvoicesUtilityTest
from reports.utilities.invoices import NEW_ALG_DATE
from reports.helpers import prepare_result_file_name
test_bids_valid = [
[{
"id": "bid_id",
"status": "active",
"date": "2017-12-01T00:00:00Z",
"owner": "test"
}],
[{
"owner": "test",
"date": "2017-10-05T13:32:25.774673+02:00",
"id": "44931d9653034837baff087cfc2fb5ac",
}],
[{
"owner": "test",
"date": "2017-10-10T13:32:25.774673+02:00",
"id": "f55962b1374b43ddb886821c0582bc7f"
}]]
test_award_period = '2016-04-17T13:32:25.774673+02:00'
class ReportInvoicesUtilityTestCase(BaseInvoicesUtilityTest):
def test_invoices_utility_output(self):
data = {
"date": "2017-12-15T00:01:50+02:00",
"procurementMethodType": "belowThreshold",
"status": "cancelled",
"bids": [{
"id": "bid_id",
"status": "active",
"date": "2017-12-01T00:00:00Z",
"owner": "test"
}],
"awards": [{
"bid_id": "bid_id",
"status": "active",
"date": "2017-12-01T00:00:00Z",
}]
}
doc = copy(self.test_data)
doc.update(data)
self.utility.db.save(doc)
self.utility.counters = {
index: [0 for _ in range(0, 5)]
for index in range(0, 5)
}
self.utility.run()
self.assertEqual(
self.utility.counters[3], [1, 0, 0, 0, 0]
)
def expected_output():
return '{}\r\n'.format(','.join(self.utility.headers)) +\
'after_2017-01-01\r\n' +\
'{}\r\n'.format(','.join((str(i) for i in self.utility.counters[0]))) +\
'{}\r\n'.format(','.join((str(i) for i in self.utility.config.payments(grid=2017)))) +\
'{}\r\n'.format(','.join(
(str(c * v) for c, v in zip(self.utility.counters[0], self.utility.config.payments())))) +\
'after_2017-08-16\r\n' +\
'{}\r\n'.format(','.join(
(str(i) for i in self.utility.counters[1]))) +\
'{}\r\n'.format(','.join(
(str(i) for i in self.utility.counters[2]))) +\
'{}\r\n'.format(','.join(
(str(i) for i in self.utility.counters[3]))) +\
'{}\r\n'.format(','.join(
(str(a - b - c) for a, b, c in zip(
self.utility.counters[1], self.utility.counters[2], self.utility.counters[3]
))), '\r\n') +\
'{}\r\n'.format(','.join(
(str(i) for i in self.utility.config.payments()))) +\
'{}\r\n'.format(','.join(
(str(c * v) for c, v in
zip((a - b - c for a, b, c in zip(
self.utility.counters[1], self.utility.counters[2], self.utility.counters[3]
)), self.utility.config.payments())))
)
with open(prepare_result_file_name(self.utility), 'rb') as file:
self.assertEqual(file.read(), expected_output())
doc = self.utility.db[doc['_id']]
doc.update({'value': {'amount': 25000, 'currency': 'UAH'}})
self.utility.db.save(doc)
self.utility.counters = {
index: [0 for _ in range(0, 5)]
for index in range(0, 5)
}
self.utility.run()
self.utility.counter = self.utility.counters[3]
self.assertEqual(
self.utility.counters[3], [0, 1, 0, 0, 0]
)
with open(prepare_result_file_name(self.utility), 'rb') as file:
self.assertEqual(file.read(), expected_output())
doc = self.utility.db[doc['_id']]
doc.update({'value': {'amount': 55000, 'currency': 'UAH'}})
self.utility.db.save(doc)
self.utility.counters = {
index: [0 for _ in range(0, 5)]
for index in range(0, 5)
}
self.utility.run()
self.assertEqual(
self.utility.counters[3], [0, 0, 1, 0, 0]
)
with open(prepare_result_file_name(self.utility), 'rb') as file:
self.assertEqual(file.read(), expected_output())
self.utility.counter = [0 for _ in self.utility.config.payments()]
doc = self.utility.db[doc['_id']]
doc.update({'value': {'amount': 255000, 'currency': 'UAH'}})
self.utility.db.save(doc)
self.utility.counters = {
index: [0 for _ in range(0, 5)]
for index in range(0, 5)
}
self.utility.run()
self.assertEqual(
self.utility.counters[3], [0, 0, 0, 1, 0]
)
with open(prepare_result_file_name(self.utility), 'rb') as file:
self.assertEqual(file.read(), expected_output())
self.utility.counter = [0 for _ in self.utility.config.payments()]
doc = self.utility.db[doc['_id']]
doc.update({'value': {'amount': 1255000, 'currency': 'UAH'}})
self.utility.db.save(doc)
self.utility.counters = {
index: [0 for _ in range(0, 5)]
for index in range(0, 5)
}
self.utility.run()
self.assertEqual(
self.utility.counters[3], [0, 0, 0, 0, 1]
)
with open(prepare_result_file_name(self.utility), 'rb') as file:
self.assertEqual(file.read(), expected_output())
del self.utility.db[doc['_id']]
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ReportInvoicesUtilityTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| apache-2.0 |
mottosso/mindbender-setup | bin/windows/python36/Lib/reprlib.py | 46 | 5336 | """Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr", "repr", "recursive_repr"]
import builtins
from itertools import islice
try:
from _thread import get_ident
except ImportError:
from _dummy_thread import get_ident
def recursive_repr(fillvalue='...'):
'Decorator to make a repr function return fillvalue for a recursive call'
def decorating_function(user_function):
repr_running = set()
def wrapper(self):
key = id(self), get_ident()
if key in repr_running:
return fillvalue
repr_running.add(key)
try:
result = user_function(self)
finally:
repr_running.discard(key)
return result
# Can't use functools.wraps() here because of bootstrap issues
wrapper.__module__ = getattr(user_function, '__module__')
wrapper.__doc__ = getattr(user_function, '__doc__')
wrapper.__name__ = getattr(user_function, '__name__')
wrapper.__qualname__ = getattr(user_function, '__qualname__')
wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
return wrapper
return decorating_function
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 30
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
return self.repr_instance(x, level)
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def repr_array(self, x, level):
if not x:
return "array('%s')" % x.typecode
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
if not x:
return 'set()'
x = _possibly_sorted(x)
return self._repr_iterable(x, level, '{', '}', self.maxset)
def repr_frozenset(self, x, level):
if not x:
return 'frozenset()'
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset({', '})',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = builtins.repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = builtins.repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_int(self, x, level):
s = builtins.repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = builtins.repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %#x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
aRepr = Repr()
repr = aRepr.repr
| mit |
alirizakeles/tendenci | tendenci/apps/versions/models.py | 1 | 2293 | # models.py
from dateutil.parser import parse
from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.fields import GenericForeignKey
import json
from tendenci.apps.versions.managers import VersionManager
class Version(models.Model):
"""
Creates a historical version of an object.
Stores the creator, create_dt, and the object serialized in json
"""
create_dt = models.DateTimeField(_('create time'))
user = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
content_type = models.ForeignKey(ContentType)
object_id = models.IntegerField(_('object id'))
object_repr = models.CharField(_('object repr'), max_length=200)
object_changes = models.TextField(_('change message'), blank=True)
object_value = models.TextField(_('changed object'), blank=True)
hash = models.CharField(max_length=40, null=True, default='')
_object = GenericForeignKey('content_type', 'object_id')
objects = VersionManager()
class Meta:
app_label = 'versions'
def get_object(self):
_object = None
try:
_object = self._object
except:
pass
return _object
def get_version_object(self):
data = json.loads(self.object_value)['fields']
obj_data = {'pk': self.object_id}
for f in self.get_object()._meta.fields:
field_name = unicode(f.name)
if field_name in data:
#print unicode(f.get_internal_type())
if unicode(f.get_internal_type()) == 'ForeignKey' or unicode(f.get_internal_type()) == 'OneToOneField':
obj_data[field_name + "_id"] = data[field_name]
elif unicode(f.get_internal_type()) == 'DateTimeField':
obj_data[field_name] = parse(data[field_name])
else:
obj_data[field_name] = data[field_name]
obj = self.get_object().__class__(**obj_data)
return obj
def get_object_version_url(self):
try:
return self.get_version_object().get_version_url(self.hash)
except:
return ''
| gpl-3.0 |
yiannist/ganeti | lib/cmdlib/instance_migration.py | 1 | 49981 | #
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Logical units dealing with instance migration an failover."""
import logging
import time
from ganeti import constants
from ganeti import errors
from ganeti import locking
from ganeti import hypervisor
from ganeti.masterd import iallocator
from ganeti import utils
from ganeti.cmdlib.base import LogicalUnit, Tasklet
from ganeti.cmdlib.common import ExpandInstanceUuidAndName, \
CheckIAllocatorOrNode, ExpandNodeUuidAndName
from ganeti.cmdlib.instance_storage import CheckDiskConsistency, \
ExpandCheckDisks, ShutdownInstanceDisks, AssembleInstanceDisks
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
CheckTargetNodeIPolicy, ReleaseLocks, CheckNodeNotDrained, \
CopyLockList, CheckNodeFreeMemory, CheckInstanceBridgesExist
import ganeti.masterd.instance
def _ExpandNamesForMigration(lu):
"""Expands names for use with L{TLMigrateInstance}.
@type lu: L{LogicalUnit}
"""
if lu.op.target_node is not None:
(lu.op.target_node_uuid, lu.op.target_node) = \
ExpandNodeUuidAndName(lu.cfg, lu.op.target_node_uuid, lu.op.target_node)
lu.needed_locks[locking.LEVEL_NODE] = []
lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
lu.dont_collate_locks[locking.LEVEL_NODE] = True
lu.needed_locks[locking.LEVEL_NODE_RES] = []
lu.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
lu.dont_collate_locks[locking.LEVEL_NODE_RES] = True
def _DeclareLocksForMigration(lu, level):
"""Declares locks for L{TLMigrateInstance}.
@type lu: L{LogicalUnit}
@param level: Lock level
"""
if level == locking.LEVEL_NODE:
assert lu.op.instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
instance = lu.cfg.GetInstanceInfo(lu.op.instance_uuid)
disks = lu.cfg.GetInstanceDisks(instance.uuid)
if utils.AnyDiskOfType(disks, constants.DTS_EXT_MIRROR):
if lu.op.target_node is None:
lu.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
else:
lu.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
lu.op.target_node_uuid]
else:
lu._LockInstancesNodes() # pylint: disable=W0212
assert (lu.needed_locks[locking.LEVEL_NODE] or
lu.needed_locks[locking.LEVEL_NODE] is locking.ALL_SET)
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
lu.needed_locks[locking.LEVEL_NODE_RES] = \
CopyLockList(lu.needed_locks[locking.LEVEL_NODE])
class LUInstanceFailover(LogicalUnit):
"""Failover an instance.
This is migration by shutting the instance down, but with the disks
of the instance already available on the new node.
See also:
L{LUInstanceMove} for moving an instance by copying the data.
L{LUInstanceMigrate} for the live migration of an instance (no shutdown
required).
"""
HPATH = "instance-failover"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def CheckArguments(self):
"""Check the arguments.
"""
self.iallocator = getattr(self.op, "iallocator", None)
self.target_node = getattr(self.op, "target_node", None)
def ExpandNames(self):
self._ExpandAndLockInstance(allow_forthcoming=True)
_ExpandNamesForMigration(self)
self._migrater = \
TLMigrateInstance(self, self.op.instance_uuid, self.op.instance_name,
self.op.cleanup, True, False,
self.op.ignore_consistency, True,
self.op.shutdown_timeout, self.op.ignore_ipolicy, True)
self.tasklets = [self._migrater]
def DeclareLocks(self, level):
_DeclareLocksForMigration(self, level)
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
instance = self._migrater.instance
source_node_uuid = instance.primary_node
target_node_uuid = self._migrater.target_node_uuid
env = {
"IGNORE_CONSISTENCY": self.op.ignore_consistency,
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
"OLD_PRIMARY": self.cfg.GetNodeName(source_node_uuid),
"NEW_PRIMARY": self.cfg.GetNodeName(target_node_uuid),
"FAILOVER_CLEANUP": self.op.cleanup,
}
disks = self.cfg.GetInstanceDisks(instance.uuid)
if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
env["OLD_SECONDARY"] = self.cfg.GetNodeName(secondary_nodes[0])
env["NEW_SECONDARY"] = self.cfg.GetNodeName(source_node_uuid)
else:
env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
env.update(BuildInstanceHookEnvByObject(self, instance))
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
instance = self._migrater.instance
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
nl = [self.cfg.GetMasterNode()] + list(secondary_nodes)
nl.append(self._migrater.target_node_uuid)
return (nl, nl + [instance.primary_node])
class LUInstanceMigrate(LogicalUnit):
"""Migrate an instance.
This is migration without shutting down (live migration) and the disks are
already available on the new node.
See also:
L{LUInstanceMove} for moving an instance by copying the data.
L{LUInstanceFailover} for the migration of an instance where a shutdown is
required.
"""
HPATH = "instance-migrate"
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
def ExpandNames(self):
self._ExpandAndLockInstance()
_ExpandNamesForMigration(self)
self._migrater = \
TLMigrateInstance(self, self.op.instance_uuid, self.op.instance_name,
self.op.cleanup, False, self.op.allow_failover, False,
self.op.allow_runtime_changes,
constants.DEFAULT_SHUTDOWN_TIMEOUT,
self.op.ignore_ipolicy, self.op.ignore_hvversions)
self.tasklets = [self._migrater]
def DeclareLocks(self, level):
_DeclareLocksForMigration(self, level)
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
instance = self._migrater.instance
source_node_uuid = instance.primary_node
target_node_uuid = self._migrater.target_node_uuid
env = BuildInstanceHookEnvByObject(self, instance)
env.update({
"MIGRATE_LIVE": self._migrater.live,
"MIGRATE_CLEANUP": self.op.cleanup,
"OLD_PRIMARY": self.cfg.GetNodeName(source_node_uuid),
"NEW_PRIMARY": self.cfg.GetNodeName(target_node_uuid),
"ALLOW_RUNTIME_CHANGES": self.op.allow_runtime_changes,
})
disks = self.cfg.GetInstanceDisks(instance.uuid)
if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
env["OLD_SECONDARY"] = self.cfg.GetNodeName(secondary_nodes[0])
env["NEW_SECONDARY"] = self.cfg.GetNodeName(source_node_uuid)
else:
env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
return env
def BuildHooksNodes(self):
"""Build hooks nodes.
"""
instance = self._migrater.instance
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
snode_uuids = list(secondary_nodes)
nl = [self.cfg.GetMasterNode(), instance.primary_node] + snode_uuids
nl.append(self._migrater.target_node_uuid)
return (nl, nl)
class TLMigrateInstance(Tasklet):
"""Tasklet class for instance migration.
@type live: boolean
@ivar live: whether the migration will be done live or non-live;
this variable is initalized only after CheckPrereq has run
@type cleanup: boolean
@ivar cleanup: Wheater we cleanup from a failed migration
@type iallocator: string
@ivar iallocator: The iallocator used to determine target_node
@type target_node_uuid: string
@ivar target_node_uuid: If given, the target node UUID to reallocate the
instance to
@type failover: boolean
@ivar failover: Whether operation results in failover or migration
@type fallback: boolean
@ivar fallback: Whether fallback to failover is allowed if migration not
possible
@type ignore_consistency: boolean
@ivar ignore_consistency: Wheter we should ignore consistency between source
and target node
@type shutdown_timeout: int
@ivar shutdown_timeout: In case of failover timeout of the shutdown
@type ignore_ipolicy: bool
@ivar ignore_ipolicy: If true, we can ignore instance policy when migrating
@type ignore_hvversions: bool
@ivar ignore_hvversions: If true, accept incompatible hypervisor versions
"""
# Constants
_MIGRATION_POLL_INTERVAL = 1 # seconds
_MIGRATION_FEEDBACK_INTERVAL = 10 # seconds
_POSTCOPY_SYNC_COUNT_THRESHOLD = 2 # Precopy passes before enabling postcopy
def __init__(self, lu, instance_uuid, instance_name, cleanup, failover,
fallback, ignore_consistency, allow_runtime_changes,
shutdown_timeout, ignore_ipolicy, ignore_hvversions):
"""Initializes this class.
"""
Tasklet.__init__(self, lu)
# Parameters
self.instance_uuid = instance_uuid
self.instance_name = instance_name
self.cleanup = cleanup
self.live = False # will be overridden later
self.failover = failover
self.fallback = fallback
self.ignore_consistency = ignore_consistency
self.shutdown_timeout = shutdown_timeout
self.ignore_ipolicy = ignore_ipolicy
self.allow_runtime_changes = allow_runtime_changes
self.ignore_hvversions = ignore_hvversions
def CheckPrereq(self):
"""Check prerequisites.
This checks that the instance is in the cluster.
"""
(self.instance_uuid, self.instance_name) = \
ExpandInstanceUuidAndName(self.lu.cfg, self.instance_uuid,
self.instance_name)
self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
assert self.instance is not None
cluster = self.cfg.GetClusterInfo()
if (not self.cleanup and
not self.instance.admin_state == constants.ADMINST_UP and
not self.failover and self.fallback):
self.lu.LogInfo("Instance is marked down or offline, fallback allowed,"
" switching to failover")
self.failover = True
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
if not utils.AllDiskOfType(disks, constants.DTS_MIRRORED):
if self.failover:
text = "failovers"
else:
text = "migrations"
invalid_disks = set(d.dev_type for d in disks
if d.dev_type not in constants.DTS_MIRRORED)
raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
" %s" % (utils.CommaJoin(invalid_disks), text),
errors.ECODE_STATE)
# TODO allow heterogeneous disk types if all are mirrored in some way.
if utils.AllDiskOfType(disks, constants.DTS_EXT_MIRROR):
CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
if self.lu.op.iallocator:
self._RunAllocator()
else:
# We set set self.target_node_uuid as it is required by
# BuildHooksEnv
self.target_node_uuid = self.lu.op.target_node_uuid
# Check that the target node is correct in terms of instance policy
nodeinfo = self.cfg.GetNodeInfo(self.target_node_uuid)
group_info = self.cfg.GetNodeGroup(nodeinfo.group)
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
group_info)
CheckTargetNodeIPolicy(self.lu, ipolicy, self.instance, nodeinfo,
self.cfg, ignore=self.ignore_ipolicy)
# self.target_node is already populated, either directly or by the
# iallocator run
target_node_uuid = self.target_node_uuid
if self.target_node_uuid == self.instance.primary_node:
raise errors.OpPrereqError(
"Cannot migrate instance %s to its primary (%s)" %
(self.instance.name,
self.cfg.GetNodeName(self.instance.primary_node)),
errors.ECODE_STATE)
if len(self.lu.tasklets) == 1:
# It is safe to release locks only when we're the only tasklet
# in the LU
ReleaseLocks(self.lu, locking.LEVEL_NODE,
keep=[self.instance.primary_node, self.target_node_uuid])
elif utils.AllDiskOfType(disks, constants.DTS_INT_MIRROR):
templates = [d.dev_type for d in disks]
secondary_node_uuids = \
self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
if not secondary_node_uuids:
raise errors.ConfigurationError("No secondary node but using"
" %s disk types" %
utils.CommaJoin(set(templates)))
self.target_node_uuid = target_node_uuid = secondary_node_uuids[0]
if self.lu.op.iallocator or \
(self.lu.op.target_node_uuid and
self.lu.op.target_node_uuid != target_node_uuid):
if self.failover:
text = "failed over"
else:
text = "migrated"
raise errors.OpPrereqError("Instances with disk types %s cannot"
" be %s to arbitrary nodes"
" (neither an iallocator nor a target"
" node can be passed)" %
(utils.CommaJoin(set(templates)), text),
errors.ECODE_INVAL)
nodeinfo = self.cfg.GetNodeInfo(target_node_uuid)
group_info = self.cfg.GetNodeGroup(nodeinfo.group)
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
group_info)
CheckTargetNodeIPolicy(self.lu, ipolicy, self.instance, nodeinfo,
self.cfg, ignore=self.ignore_ipolicy)
else:
raise errors.OpPrereqError("Instance mixes internal and external "
"mirroring. This is not currently supported.")
i_be = cluster.FillBE(self.instance)
# check memory requirements on the secondary node
if (not self.cleanup and
(not self.failover or
self.instance.admin_state == constants.ADMINST_UP)):
self.tgt_free_mem = CheckNodeFreeMemory(
self.lu, target_node_uuid,
"migrating instance %s" % self.instance.name,
i_be[constants.BE_MINMEM], self.instance.hypervisor,
self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
else:
self.lu.LogInfo("Not checking memory on the secondary node as"
" instance will not be started")
# check if failover must be forced instead of migration
if (not self.cleanup and not self.failover and
i_be[constants.BE_ALWAYS_FAILOVER]):
self.lu.LogInfo("Instance configured to always failover; fallback"
" to failover")
self.failover = True
# check bridge existance
CheckInstanceBridgesExist(self.lu, self.instance,
node_uuid=target_node_uuid)
if not self.cleanup:
CheckNodeNotDrained(self.lu, target_node_uuid)
if not self.failover:
result = self.rpc.call_instance_migratable(self.instance.primary_node,
self.instance)
if result.fail_msg and self.fallback:
self.lu.LogInfo("Can't migrate, instance offline, fallback to"
" failover")
self.failover = True
else:
result.Raise("Can't migrate, please use failover",
prereq=True, ecode=errors.ECODE_STATE)
assert not (self.failover and self.cleanup)
if not self.failover:
if self.lu.op.live is not None and self.lu.op.mode is not None:
raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
" parameters are accepted",
errors.ECODE_INVAL)
if self.lu.op.live is not None:
if self.lu.op.live:
self.lu.op.mode = constants.HT_MIGRATION_LIVE
else:
self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
# reset the 'live' parameter to None so that repeated
# invocations of CheckPrereq do not raise an exception
self.lu.op.live = None
elif self.lu.op.mode is None:
# read the default value from the hypervisor
i_hv = cluster.FillHV(self.instance, skip_globals=False)
self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
else:
# Failover is never live
self.live = False
if not (self.failover or self.cleanup):
remote_info = self.rpc.call_instance_info(
self.instance.primary_node, self.instance.name,
self.instance.hypervisor, cluster.hvparams[self.instance.hypervisor])
remote_info.Raise("Error checking instance on node %s" %
self.cfg.GetNodeName(self.instance.primary_node),
prereq=True)
instance_running = bool(remote_info.payload)
if instance_running:
self.current_mem = int(remote_info.payload["memory"])
def _RunAllocator(self):
"""Run the allocator based on input opcode.
"""
# FIXME: add a self.ignore_ipolicy option
req = iallocator.IAReqRelocate(
inst_uuid=self.instance_uuid,
relocate_from_node_uuids=[self.instance.primary_node])
ial = iallocator.IAllocator(self.cfg, self.rpc, req)
ial.Run(self.lu.op.iallocator)
if not ial.success:
raise errors.OpPrereqError("Can't compute nodes using"
" iallocator '%s': %s" %
(self.lu.op.iallocator, ial.info),
errors.ECODE_NORES)
self.target_node_uuid = self.cfg.GetNodeInfoByName(
ial.result[0]).uuid # pylint: disable=E1136
self.lu.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
self.instance_name, self.lu.op.iallocator,
utils.CommaJoin(ial.result))
def _WaitUntilSync(self):
"""Poll with custom rpc for disk sync.
This uses our own step-based rpc call.
"""
self.feedback_fn("* wait until resync is done")
all_done = False
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
while not all_done:
all_done = True
result = self.rpc.call_drbd_wait_sync(self.all_node_uuids,
(disks, self.instance))
min_percent = 100
for node_uuid, nres in result.items():
nres.Raise("Cannot resync disks on node %s" %
self.cfg.GetNodeName(node_uuid))
node_done, node_percent = nres.payload
all_done = all_done and node_done
if node_percent is not None:
min_percent = min(min_percent, node_percent)
if not all_done:
if min_percent < 100:
self.feedback_fn(" - progress: %.1f%%" % min_percent)
time.sleep(2)
def _OpenInstanceDisks(self, node_uuid, exclusive):
"""Open instance disks.
"""
if exclusive:
mode = "in exclusive mode"
else:
mode = "in shared mode"
node_name = self.cfg.GetNodeName(node_uuid)
self.feedback_fn("* opening instance disks on node %s %s" %
(node_name, mode))
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
result = self.rpc.call_blockdev_open(node_uuid, self.instance.name,
(disks, self.instance), exclusive)
result.Raise("Cannot open instance disks on node %s" % node_name)
def _CloseInstanceDisks(self, node_uuid):
"""Close instance disks.
"""
node_name = self.cfg.GetNodeName(node_uuid)
self.feedback_fn("* closing instance disks on node %s" % node_name)
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
result = self.rpc.call_blockdev_close(node_uuid, self.instance.name,
(disks, self.instance))
msg = result.fail_msg
if msg:
if result.offline or self.ignore_consistency:
self.lu.LogWarning("Could not close instance disks on node %s,"
" proceeding anyway" % node_name)
else:
raise errors.OpExecError("Cannot close instance disks on node %s: %s" %
(node_name, msg))
def _GoStandalone(self):
"""Disconnect from the network.
"""
self.feedback_fn("* changing into standalone mode")
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
result = self.rpc.call_drbd_disconnect_net(
self.all_node_uuids, (disks, self.instance))
for node_uuid, nres in result.items():
nres.Raise("Cannot disconnect disks node %s" %
self.cfg.GetNodeName(node_uuid))
def _GoReconnect(self, multimaster):
"""Reconnect to the network.
"""
if multimaster:
msg = "dual-master"
else:
msg = "single-master"
self.feedback_fn("* changing disks into %s mode" % msg)
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
result = self.rpc.call_drbd_attach_net(self.all_node_uuids,
(disks, self.instance),
multimaster)
for node_uuid, nres in result.items():
nres.Raise("Cannot change disks config on node %s" %
self.cfg.GetNodeName(node_uuid))
def _FindInstanceLocations(self, name):
"""Returns a list of nodes that have the given instance running
Args:
name: string, instance name string to search for
Returns:
list of strings, node uuids
"""
self.feedback_fn("* checking where the instance actually runs (if this"
" hangs, the hypervisor might be in a bad state)")
cluster_hvparams = self.cfg.GetClusterInfo().hvparams
online_node_uuids = self.cfg.GetOnlineNodeList()
instance_list = self.rpc.call_instance_list(
online_node_uuids, [self.instance.hypervisor], cluster_hvparams)
# Verify each result and raise an exception if failed
for node_uuid, result in instance_list.items():
result.Raise("Can't contact node %s" % self.cfg.GetNodeName(node_uuid))
# Xen renames the instance during migration, unfortunately we don't have
# a nicer way of identifying that it's the same instance. This is an awful
# leaking abstraction.
# xm and xl have different (undocumented) naming conventions
# xm: (in tools/python/xen/xend/XendCheckpoint.py save() & restore())
# source dom name target dom name
# during copy: migrating-$DOM $DOM
# finalize migrate: <none> $DOM
# finished: <none> $DOM
#
# xl: (in tools/libxl/xl_cmdimpl.c migrate_domain() & migrate_receive())
# source dom name target dom name
# during copy: $DOM $DOM--incoming
# finalize migrate: $DOM--migratedaway $DOM
# finished: <none> $DOM
variants = [
name, 'migrating-' + name, name + '--incoming', name + '--migratedaway']
node_uuids = [node for node, data in instance_list.items()
if any(var in data.payload for var in variants)]
self.feedback_fn("* instance running on: %s" % ','.join(
self.cfg.GetNodeName(uuid) for uuid in node_uuids))
return node_uuids
def _ExecCleanup(self):
"""Try to cleanup after a failed migration.
The cleanup is done by:
- check that the instance is running only on one node
- try 'aborting' migration if it is running on two nodes
- update the config if needed
- change disks on its secondary node to secondary
- wait until disks are fully synchronized
- disconnect from the network
- change disks into single-master mode
- wait again until disks are fully synchronized
"""
instance_locations = self._FindInstanceLocations(self.instance.name)
runningon_source = self.source_node_uuid in instance_locations
runningon_target = self.target_node_uuid in instance_locations
if runningon_source and runningon_target:
# If we have an instance on both the source and the destination, we know
# that instance migration was interrupted in the middle, we can try to
# do effectively the same as when aborting an interrupted migration.
self.feedback_fn("Trying to cleanup after failed migration")
result = self.rpc.call_migration_info(
self.source_node_uuid, self.instance)
if result.fail_msg:
raise errors.OpExecError(
"Failed fetching source migration information from %s: %s" %
(self.cfg.GetNodeName(self.source_node_uuid), result.fail_msg))
self.migration_info = result.payload
abort_results = self._AbortMigration()
if abort_results[0].fail_msg or abort_results[1].fail_msg:
raise errors.OpExecError(
"Instance migration cleanup failed: %s" % ','.join([
abort_results[0].fail_msg, abort_results[1].fail_msg]))
# AbortMigration() should have fixed instance locations, so query again
instance_locations = self._FindInstanceLocations(self.instance.name)
runningon_source = self.source_node_uuid in instance_locations
runningon_target = self.target_node_uuid in instance_locations
# Abort didn't work, manual intervention required
if runningon_source and runningon_target:
raise errors.OpExecError("Instance seems to be running on two nodes,"
" or the hypervisor is confused; you will have"
" to ensure manually that it runs only on one"
" and restart this operation")
if not (runningon_source or runningon_target):
if len(instance_locations) == 1:
# The instance is running on a differrent node than expected, let's
# adopt it as if it was running on the secondary
self.target_node_uuid = instance_locations[0]
self.feedback_fn("* instance running on unexpected node (%s),"
" updating as the new secondary" %
self.cfg.GetNodeName(self.target_node_uuid))
runningon_target = True
else:
raise errors.OpExecError("Instance does not seem to be running at all;"
" in this case it's safer to repair by"
" running 'gnt-instance stop' to ensure disk"
" shutdown, and then restarting it")
if runningon_target:
# the migration has actually succeeded, we need to update the config
self.feedback_fn("* instance running on secondary node (%s),"
" updating config" %
self.cfg.GetNodeName(self.target_node_uuid))
self.cfg.SetInstancePrimaryNode(self.instance.uuid,
self.target_node_uuid)
demoted_node_uuid = self.source_node_uuid
else:
self.feedback_fn("* instance confirmed to be running on its"
" primary node (%s)" %
self.cfg.GetNodeName(self.source_node_uuid))
demoted_node_uuid = self.target_node_uuid
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
# TODO: Cleanup code duplication of _RevertDiskStatus()
self._CloseInstanceDisks(demoted_node_uuid)
if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
try:
self._WaitUntilSync()
except errors.OpExecError:
# we ignore here errors, since if the device is standalone, it
# won't be able to sync
pass
self._GoStandalone()
self._GoReconnect(False)
self._WaitUntilSync()
elif utils.AnyDiskOfType(disks, constants.DTS_EXT_MIRROR):
self._OpenInstanceDisks(self.instance.primary_node, True)
self.feedback_fn("* done")
def _RevertDiskStatus(self):
"""Try to revert the disk status after a failed migration.
"""
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
self._CloseInstanceDisks(self.target_node_uuid)
unmap_types = (constants.DT_RBD, constants.DT_EXT)
if utils.AnyDiskOfType(disks, unmap_types):
# If the instance's disk template is `rbd' or `ext' and there was an
# unsuccessful migration, unmap the device from the target node.
unmap_disks = [d for d in disks if d.dev_type in unmap_types]
disks = ExpandCheckDisks(unmap_disks, unmap_disks)
self.feedback_fn("* unmapping instance's disks %s from %s" %
(utils.CommaJoin(d.name for d in unmap_disks),
self.cfg.GetNodeName(self.target_node_uuid)))
for disk in disks:
result = self.rpc.call_blockdev_shutdown(self.target_node_uuid,
(disk, self.instance))
msg = result.fail_msg
if msg:
logging.error("Migration failed and I couldn't unmap the block device"
" %s on target node %s: %s", disk.iv_name,
self.cfg.GetNodeName(self.target_node_uuid), msg)
logging.error("You need to unmap the device %s manually on %s",
disk.iv_name,
self.cfg.GetNodeName(self.target_node_uuid))
if utils.AllDiskOfType(disks, constants.DTS_EXT_MIRROR):
self._OpenInstanceDisks(self.source_node_uuid, True)
return
try:
self._GoStandalone()
self._GoReconnect(False)
self._WaitUntilSync()
except errors.OpExecError, err:
self.lu.LogWarning("Migration failed and I can't reconnect the drives,"
" please try to recover the instance manually;"
" error '%s'" % str(err))
def _AbortMigration(self):
"""Call the hypervisor code to abort a started migration.
Returns:
tuple of rpc call results
"""
src_result = self.rpc.call_instance_finalize_migration_dst(
self.target_node_uuid, self.instance, self.migration_info, False)
abort_msg = src_result.fail_msg
if abort_msg:
logging.error("Aborting migration failed on target node %s: %s",
self.cfg.GetNodeName(self.target_node_uuid), abort_msg)
# Don't raise an exception here, as we stil have to try to revert the
# disk status, even if this step failed.
dst_result = self.rpc.call_instance_finalize_migration_src(
self.source_node_uuid, self.instance, False, self.live)
abort_msg = dst_result.fail_msg
if abort_msg:
logging.error("Aborting migration failed on source node %s: %s",
self.cfg.GetNodeName(self.source_node_uuid), abort_msg)
return src_result, dst_result
def _ExecMigration(self):
"""Migrate an instance.
The migrate is done by:
- change the disks into dual-master mode
- wait until disks are fully synchronized again
- migrate the instance
- change disks on the new secondary node (the old primary) to secondary
- wait until disks are fully synchronized
- change disks into single-master mode
"""
# Check for hypervisor version mismatch and warn the user.
hvspecs = [(self.instance.hypervisor,
self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])]
nodeinfo = self.rpc.call_node_info(
[self.source_node_uuid, self.target_node_uuid], None, hvspecs)
for ninfo in nodeinfo.values():
ninfo.Raise("Unable to retrieve node information from node '%s'" %
ninfo.node)
(_, _, (src_info, )) = nodeinfo[self.source_node_uuid].payload
(_, _, (dst_info, )) = nodeinfo[self.target_node_uuid].payload
if ((constants.HV_NODEINFO_KEY_VERSION in src_info) and
(constants.HV_NODEINFO_KEY_VERSION in dst_info)):
src_version = src_info[constants.HV_NODEINFO_KEY_VERSION]
dst_version = dst_info[constants.HV_NODEINFO_KEY_VERSION]
if src_version != dst_version:
self.feedback_fn("* warning: hypervisor version mismatch between"
" source (%s) and target (%s) node" %
(src_version, dst_version))
hv = hypervisor.GetHypervisorClass(self.instance.hypervisor)
if hv.VersionsSafeForMigration(src_version, dst_version):
self.feedback_fn(" migrating from hypervisor version %s to %s should"
" be safe" % (src_version, dst_version))
else:
self.feedback_fn(" migrating from hypervisor version %s to %s is"
" likely unsupported" % (src_version, dst_version))
if self.ignore_hvversions:
self.feedback_fn(" continuing anyway (told to ignore version"
" mismatch)")
else:
raise errors.OpExecError("Unsupported migration between hypervisor"
" versions (%s to %s)" %
(src_version, dst_version))
self.feedback_fn("* checking disk consistency between source and target")
for (idx, dev) in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)):
if not CheckDiskConsistency(self.lu, self.instance, dev,
self.target_node_uuid,
False):
raise errors.OpExecError("Disk %s is degraded or not fully"
" synchronized on target node,"
" aborting migration" % idx)
if self.current_mem > self.tgt_free_mem:
if not self.allow_runtime_changes:
raise errors.OpExecError("Memory ballooning not allowed and not enough"
" free memory to fit instance %s on target"
" node %s (have %dMB, need %dMB)" %
(self.instance.name,
self.cfg.GetNodeName(self.target_node_uuid),
self.tgt_free_mem, self.current_mem))
self.feedback_fn("* setting instance memory to %s" % self.tgt_free_mem)
rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
self.instance,
self.tgt_free_mem)
rpcres.Raise("Cannot modify instance runtime memory")
# First get the migration information from the remote node
result = self.rpc.call_migration_info(self.source_node_uuid, self.instance)
msg = result.fail_msg
if msg:
log_err = ("Failed fetching source migration information from %s: %s" %
(self.cfg.GetNodeName(self.source_node_uuid), msg))
logging.error(log_err)
raise errors.OpExecError(log_err)
self.migration_info = migration_info = result.payload
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
self._CloseInstanceDisks(self.target_node_uuid)
if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
# Then switch the disks to master/master mode
self._GoStandalone()
self._GoReconnect(True)
self._WaitUntilSync()
self._OpenInstanceDisks(self.source_node_uuid, False)
self._OpenInstanceDisks(self.target_node_uuid, False)
self.feedback_fn("* preparing %s to accept the instance" %
self.cfg.GetNodeName(self.target_node_uuid))
result = self.rpc.call_accept_instance(self.target_node_uuid,
self.instance,
migration_info,
self.nodes_ip[self.target_node_uuid])
msg = result.fail_msg
if msg:
logging.error("Instance pre-migration failed, trying to revert"
" disk status: %s", msg)
self.feedback_fn("Pre-migration failed, aborting")
self._AbortMigration()
self._RevertDiskStatus()
raise errors.OpExecError("Could not pre-migrate instance %s: %s" %
(self.instance.name, msg))
self.feedback_fn("* migrating instance to %s" %
self.cfg.GetNodeName(self.target_node_uuid))
cluster = self.cfg.GetClusterInfo()
result = self.rpc.call_instance_migrate(
self.source_node_uuid, cluster.cluster_name, self.instance,
self.nodes_ip[self.target_node_uuid], self.live)
msg = result.fail_msg
if msg:
logging.error("Instance migration failed, trying to revert"
" disk status: %s", msg)
self.feedback_fn("Migration failed, aborting")
self._AbortMigration()
self._RevertDiskStatus()
raise errors.OpExecError("Could not migrate instance %s: %s" %
(self.instance.name, msg))
self.feedback_fn("* starting memory transfer")
last_feedback = time.time()
cluster_migration_caps = \
cluster.hvparams.get("kvm", {}).get(constants.HV_KVM_MIGRATION_CAPS, "")
migration_caps = \
self.instance.hvparams.get(constants.HV_KVM_MIGRATION_CAPS,
cluster_migration_caps)
# migration_caps is a ':' delimited string, so checking
# if 'postcopy-ram' is a substring also covers using
# x-postcopy-ram for QEMU 2.5
postcopy_enabled = "postcopy-ram" in migration_caps
while True:
result = self.rpc.call_instance_get_migration_status(
self.source_node_uuid, self.instance)
msg = result.fail_msg
ms = result.payload # MigrationStatus instance
if msg or (ms.status in constants.HV_MIGRATION_FAILED_STATUSES):
logging.error("Instance migration failed, trying to revert"
" disk status: %s", msg)
self.feedback_fn("Migration failed, aborting")
self._AbortMigration()
self._RevertDiskStatus()
if not msg:
msg = "hypervisor returned failure"
raise errors.OpExecError("Could not migrate instance %s: %s" %
(self.instance.name, msg))
if (postcopy_enabled
and ms.status == constants.HV_MIGRATION_ACTIVE
and int(ms.dirty_sync_count) >= self._POSTCOPY_SYNC_COUNT_THRESHOLD):
self.feedback_fn("* finishing memory transfer with postcopy")
self.rpc.call_instance_start_postcopy(self.source_node_uuid,
self.instance)
if self.instance.hypervisor == 'kvm':
migration_active = \
ms.status in constants.HV_KVM_MIGRATION_ACTIVE_STATUSES
else:
migration_active = \
ms.status == constants.HV_MIGRATION_ACTIVE
if not migration_active:
self.feedback_fn("* memory transfer complete")
break
if (utils.TimeoutExpired(last_feedback,
self._MIGRATION_FEEDBACK_INTERVAL) and
ms.transferred_ram is not None):
mem_progress = 100 * float(ms.transferred_ram) / float(ms.total_ram)
self.feedback_fn("* memory transfer progress: %.2f %%" % mem_progress)
last_feedback = time.time()
time.sleep(self._MIGRATION_POLL_INTERVAL)
# Always call finalize on both source and target, they should compose
# a single operation, consisting of (potentially) parallel steps, that
# should be always attempted/retried together (like in _AbortMigration)
# without setting any expecetations in what order they execute.
result_src = self.rpc.call_instance_finalize_migration_src(
self.source_node_uuid, self.instance, True, self.live)
result_dst = self.rpc.call_instance_finalize_migration_dst(
self.target_node_uuid, self.instance, migration_info, True)
err_msg = []
if result_src.fail_msg:
logging.error("Instance migration succeeded, but finalization failed"
" on the source node: %s", result_src.fail_msg)
err_msg.append(self.cfg.GetNodeName(self.source_node_uuid) + ': '
+ result_src.fail_msg)
if result_dst.fail_msg:
logging.error("Instance migration succeeded, but finalization failed"
" on the target node: %s", result_dst.fail_msg)
err_msg.append(self.cfg.GetNodeName(self.target_node_uuid) + ': '
+ result_dst.fail_msg)
if err_msg:
raise errors.OpExecError(
"Could not finalize instance migration: %s" % ' '.join(err_msg))
# Update instance location only after finalize completed. This way, if
# either finalize fails, the config still stores the old primary location,
# so we can know which instance to delete if we need to (manually) clean up.
self.cfg.SetInstancePrimaryNode(self.instance.uuid, self.target_node_uuid)
self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
self._CloseInstanceDisks(self.source_node_uuid)
disks = self.cfg.GetInstanceDisks(self.instance_uuid)
if utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR):
self._WaitUntilSync()
self._GoStandalone()
self._GoReconnect(False)
self._WaitUntilSync()
elif utils.AnyDiskOfType(disks, constants.DTS_EXT_MIRROR):
self._OpenInstanceDisks(self.target_node_uuid, True)
# If the instance's disk template is `rbd' or `ext' and there was a
# successful migration, unmap the device from the source node.
unmap_types = (constants.DT_RBD, constants.DT_EXT)
if utils.AnyDiskOfType(disks, unmap_types):
unmap_disks = [d for d in disks if d.dev_type in unmap_types]
disks = ExpandCheckDisks(unmap_disks, unmap_disks)
self.feedback_fn("* unmapping instance's disks %s from %s" %
(utils.CommaJoin(d.name for d in unmap_disks),
self.cfg.GetNodeName(self.source_node_uuid)))
for disk in disks:
result = self.rpc.call_blockdev_shutdown(self.source_node_uuid,
(disk, self.instance))
msg = result.fail_msg
if msg:
logging.error("Migration was successful, but couldn't unmap the"
" block device %s on source node %s: %s",
disk.iv_name,
self.cfg.GetNodeName(self.source_node_uuid), msg)
logging.error("You need to unmap the device %s manually on %s",
disk.iv_name,
self.cfg.GetNodeName(self.source_node_uuid))
self.feedback_fn("* done")
def _ExecFailover(self):
"""Failover an instance.
The failover is done by shutting it down on its present node and
starting it on the secondary.
"""
if self.instance.forthcoming:
self.feedback_fn("Instance is forthcoming, just updating the"
" configuration")
self.cfg.SetInstancePrimaryNode(self.instance.uuid,
self.target_node_uuid)
return
primary_node = self.cfg.GetNodeInfo(self.instance.primary_node)
source_node_uuid = self.instance.primary_node
if self.instance.disks_active:
self.feedback_fn("* checking disk consistency between source and target")
inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
for (idx, dev) in enumerate(inst_disks):
# for drbd, these are drbd over lvm
if not CheckDiskConsistency(self.lu, self.instance, dev,
self.target_node_uuid, False):
if primary_node.offline:
self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
" target node %s" %
(primary_node.name, idx,
self.cfg.GetNodeName(self.target_node_uuid)))
elif not self.ignore_consistency:
raise errors.OpExecError("Disk %s is degraded on target node,"
" aborting failover" % idx)
else:
self.feedback_fn("* not checking disk consistency as instance is not"
" running")
self.feedback_fn("* shutting down instance on source node")
logging.info("Shutting down instance %s on node %s",
self.instance.name, self.cfg.GetNodeName(source_node_uuid))
result = self.rpc.call_instance_shutdown(source_node_uuid, self.instance,
self.shutdown_timeout,
self.lu.op.reason)
msg = result.fail_msg
if msg:
if self.ignore_consistency or primary_node.offline:
self.lu.LogWarning("Could not shutdown instance %s on node %s,"
" proceeding anyway; please make sure node"
" %s is down; error details: %s",
self.instance.name,
self.cfg.GetNodeName(source_node_uuid),
self.cfg.GetNodeName(source_node_uuid), msg)
else:
raise errors.OpExecError("Could not shutdown instance %s on"
" node %s: %s" %
(self.instance.name,
self.cfg.GetNodeName(source_node_uuid), msg))
disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
if disk_template in constants.DTS_EXT_MIRROR:
self._CloseInstanceDisks(source_node_uuid)
self.feedback_fn("* deactivating the instance's disks on source node")
if not ShutdownInstanceDisks(self.lu, self.instance, ignore_primary=True):
raise errors.OpExecError("Can't shut down the instance's disks")
self.cfg.SetInstancePrimaryNode(self.instance.uuid, self.target_node_uuid)
self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
# Only start the instance if it's marked as up
if self.instance.admin_state == constants.ADMINST_UP:
self.feedback_fn("* activating the instance's disks on target node %s" %
self.cfg.GetNodeName(self.target_node_uuid))
logging.info("Starting instance %s on node %s", self.instance.name,
self.cfg.GetNodeName(self.target_node_uuid))
disks_ok, _, _ = AssembleInstanceDisks(self.lu, self.instance,
ignore_secondaries=True)
if not disks_ok:
ShutdownInstanceDisks(self.lu, self.instance)
raise errors.OpExecError("Can't activate the instance's disks")
self.feedback_fn("* starting the instance on the target node %s" %
self.cfg.GetNodeName(self.target_node_uuid))
result = self.rpc.call_instance_start(self.target_node_uuid,
(self.instance, None, None), False,
self.lu.op.reason)
msg = result.fail_msg
if msg:
ShutdownInstanceDisks(self.lu, self.instance)
raise errors.OpExecError("Could not start instance %s on node %s: %s" %
(self.instance.name,
self.cfg.GetNodeName(self.target_node_uuid),
msg))
def Exec(self, feedback_fn):
"""Perform the migration.
"""
self.feedback_fn = feedback_fn
self.source_node_uuid = self.instance.primary_node
# FIXME: if we implement migrate-to-any in DRBD, this needs fixing
disks = self.cfg.GetInstanceDisks(self.instance.uuid)
# TODO allow mixed disks
if utils.AllDiskOfType(disks, constants.DTS_INT_MIRROR):
secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
self.target_node_uuid = secondary_nodes[0]
# Otherwise self.target_node has been populated either
# directly, or through an iallocator.
self.all_node_uuids = [self.source_node_uuid, self.target_node_uuid]
self.nodes_ip = dict((uuid, node.secondary_ip) for (uuid, node)
in self.cfg.GetMultiNodeInfo(self.all_node_uuids))
if self.failover:
feedback_fn("Failover instance %s" % self.instance.name)
self._ExecFailover()
else:
feedback_fn("Migrating instance %s" % self.instance.name)
if self.cleanup:
return self._ExecCleanup()
else:
return self._ExecMigration()
| bsd-2-clause |
robobrobro/ballin-octo-shame | lib/Python-3.4.3/Lib/idlelib/EditorWindow.py | 9 | 66988 | import importlib
import importlib.abc
import importlib.util
import os
import platform
import re
import string
import sys
from tkinter import *
import tkinter.simpledialog as tkSimpleDialog
import tkinter.messagebox as tkMessageBox
import traceback
import webbrowser
from idlelib.MultiCall import MultiCallCreator
from idlelib import idlever
from idlelib import WindowList
from idlelib import SearchDialog
from idlelib import GrepDialog
from idlelib import ReplaceDialog
from idlelib import PyParse
from idlelib.configHandler import idleConf
from idlelib import aboutDialog, textView, configDialog
from idlelib import macosxSupport
# The default tab setting for a Text widget, in average-width characters.
TK_TABWIDTH_DEFAULT = 8
_py_version = ' (%s)' % platform.python_version()
def _sphinx_version():
"Format sys.version_info to produce the Sphinx version string used to install the chm docs"
major, minor, micro, level, serial = sys.version_info
release = '%s%s' % (major, minor)
release += '%s' % (micro,)
if level == 'candidate':
release += 'rc%s' % (serial,)
elif level != 'final':
release += '%s%s' % (level[0], serial)
return release
class HelpDialog(object):
def __init__(self):
self.parent = None # parent of help window
self.dlg = None # the help window iteself
def display(self, parent, near=None):
""" Display the help dialog.
parent - parent widget for the help window
near - a Toplevel widget (e.g. EditorWindow or PyShell)
to use as a reference for placing the help window
"""
if self.dlg is None:
self.show_dialog(parent)
if near:
self.nearwindow(near)
def show_dialog(self, parent):
self.parent = parent
fn=os.path.join(os.path.abspath(os.path.dirname(__file__)),'help.txt')
self.dlg = dlg = textView.view_file(parent,'Help',fn, modal=False)
dlg.bind('<Destroy>', self.destroy, '+')
def nearwindow(self, near):
# Place the help dialog near the window specified by parent.
# Note - this may not reposition the window in Metacity
# if "/apps/metacity/general/disable_workarounds" is enabled
dlg = self.dlg
geom = (near.winfo_rootx() + 10, near.winfo_rooty() + 10)
dlg.withdraw()
dlg.geometry("=+%d+%d" % geom)
dlg.deiconify()
dlg.lift()
def destroy(self, ev=None):
self.dlg = None
self.parent = None
helpDialog = HelpDialog() # singleton instance
def _help_dialog(parent): # wrapper for htest
helpDialog.show_dialog(parent)
class EditorWindow(object):
from idlelib.Percolator import Percolator
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.IOBinding import IOBinding, filesystemencoding, encoding
from idlelib import Bindings
from tkinter import Toplevel
from idlelib.MultiStatusBar import MultiStatusBar
help_url = None
def __init__(self, flist=None, filename=None, key=None, root=None):
if EditorWindow.help_url is None:
dochome = os.path.join(sys.base_prefix, 'Doc', 'index.html')
if sys.platform.count('linux'):
# look for html docs in a couple of standard places
pyver = 'python-docs-' + '%s.%s.%s' % sys.version_info[:3]
if os.path.isdir('/var/www/html/python/'): # "python2" rpm
dochome = '/var/www/html/python/index.html'
else:
basepath = '/usr/share/doc/' # standard location
dochome = os.path.join(basepath, pyver,
'Doc', 'index.html')
elif sys.platform[:3] == 'win':
chmfile = os.path.join(sys.base_prefix, 'Doc',
'Python%s.chm' % _sphinx_version())
if os.path.isfile(chmfile):
dochome = chmfile
elif sys.platform == 'darwin':
# documentation may be stored inside a python framework
dochome = os.path.join(sys.base_prefix,
'Resources/English.lproj/Documentation/index.html')
dochome = os.path.normpath(dochome)
if os.path.isfile(dochome):
EditorWindow.help_url = dochome
if sys.platform == 'darwin':
# Safari requires real file:-URLs
EditorWindow.help_url = 'file://' + EditorWindow.help_url
else:
EditorWindow.help_url = "https://docs.python.org/%d.%d/" % sys.version_info[:2]
currentTheme=idleConf.CurrentTheme()
self.flist = flist
root = root or flist.root
self.root = root
try:
sys.ps1
except AttributeError:
sys.ps1 = '>>> '
self.menubar = Menu(root)
self.top = top = WindowList.ListedToplevel(root, menu=self.menubar)
if flist:
self.tkinter_vars = flist.vars
#self.top.instance_dict makes flist.inversedict available to
#configDialog.py so it can access all EditorWindow instances
self.top.instance_dict = flist.inversedict
else:
self.tkinter_vars = {} # keys: Tkinter event names
# values: Tkinter variable instances
self.top.instance_dict = {}
self.recent_files_path = os.path.join(idleConf.GetUserCfgDir(),
'recent-files.lst')
self.text_frame = text_frame = Frame(top)
self.vbar = vbar = Scrollbar(text_frame, name='vbar')
self.width = idleConf.GetOption('main', 'EditorWindow',
'width', type='int')
text_options = {
'name': 'text',
'padx': 5,
'wrap': 'none',
'width': self.width,
'height': idleConf.GetOption('main', 'EditorWindow',
'height', type='int')}
if TkVersion >= 8.5:
# Starting with tk 8.5 we have to set the new tabstyle option
# to 'wordprocessor' to achieve the same display of tabs as in
# older tk versions.
text_options['tabstyle'] = 'wordprocessor'
self.text = text = MultiCallCreator(Text)(text_frame, **text_options)
self.top.focused_widget = self.text
self.createmenubar()
self.apply_bindings()
self.top.protocol("WM_DELETE_WINDOW", self.close)
self.top.bind("<<close-window>>", self.close_event)
if macosxSupport.isAquaTk():
# Command-W on editorwindows doesn't work without this.
text.bind('<<close-window>>', self.close_event)
# Some OS X systems have only one mouse button,
# so use control-click for pulldown menus there.
# (Note, AquaTk defines <2> as the right button if
# present and the Tk Text widget already binds <2>.)
text.bind("<Control-Button-1>",self.right_menu_event)
else:
# Elsewhere, use right-click for pulldown menus.
text.bind("<3>",self.right_menu_event)
text.bind("<<cut>>", self.cut)
text.bind("<<copy>>", self.copy)
text.bind("<<paste>>", self.paste)
text.bind("<<center-insert>>", self.center_insert_event)
text.bind("<<help>>", self.help_dialog)
text.bind("<<python-docs>>", self.python_docs)
text.bind("<<about-idle>>", self.about_dialog)
text.bind("<<open-config-dialog>>", self.config_dialog)
text.bind("<<open-config-extensions-dialog>>",
self.config_extensions_dialog)
text.bind("<<open-module>>", self.open_module)
text.bind("<<do-nothing>>", lambda event: "break")
text.bind("<<select-all>>", self.select_all)
text.bind("<<remove-selection>>", self.remove_selection)
text.bind("<<find>>", self.find_event)
text.bind("<<find-again>>", self.find_again_event)
text.bind("<<find-in-files>>", self.find_in_files_event)
text.bind("<<find-selection>>", self.find_selection_event)
text.bind("<<replace>>", self.replace_event)
text.bind("<<goto-line>>", self.goto_line_event)
text.bind("<<smart-backspace>>",self.smart_backspace_event)
text.bind("<<newline-and-indent>>",self.newline_and_indent_event)
text.bind("<<smart-indent>>",self.smart_indent_event)
text.bind("<<indent-region>>",self.indent_region_event)
text.bind("<<dedent-region>>",self.dedent_region_event)
text.bind("<<comment-region>>",self.comment_region_event)
text.bind("<<uncomment-region>>",self.uncomment_region_event)
text.bind("<<tabify-region>>",self.tabify_region_event)
text.bind("<<untabify-region>>",self.untabify_region_event)
text.bind("<<toggle-tabs>>",self.toggle_tabs_event)
text.bind("<<change-indentwidth>>",self.change_indentwidth_event)
text.bind("<Left>", self.move_at_edge_if_selection(0))
text.bind("<Right>", self.move_at_edge_if_selection(1))
text.bind("<<del-word-left>>", self.del_word_left)
text.bind("<<del-word-right>>", self.del_word_right)
text.bind("<<beginning-of-line>>", self.home_callback)
if flist:
flist.inversedict[self] = key
if key:
flist.dict[key] = self
text.bind("<<open-new-window>>", self.new_callback)
text.bind("<<close-all-windows>>", self.flist.close_all_callback)
text.bind("<<open-class-browser>>", self.open_class_browser)
text.bind("<<open-path-browser>>", self.open_path_browser)
text.bind("<<open-turtle-demo>>", self.open_turtle_demo)
self.set_status_bar()
vbar['command'] = text.yview
vbar.pack(side=RIGHT, fill=Y)
text['yscrollcommand'] = vbar.set
fontWeight = 'normal'
if idleConf.GetOption('main', 'EditorWindow', 'font-bold', type='bool'):
fontWeight='bold'
text.config(font=(idleConf.GetOption('main', 'EditorWindow', 'font'),
idleConf.GetOption('main', 'EditorWindow',
'font-size', type='int'),
fontWeight))
text_frame.pack(side=LEFT, fill=BOTH, expand=1)
text.pack(side=TOP, fill=BOTH, expand=1)
text.focus_set()
# usetabs true -> literal tab characters are used by indent and
# dedent cmds, possibly mixed with spaces if
# indentwidth is not a multiple of tabwidth,
# which will cause Tabnanny to nag!
# false -> tab characters are converted to spaces by indent
# and dedent cmds, and ditto TAB keystrokes
# Although use-spaces=0 can be configured manually in config-main.def,
# configuration of tabs v. spaces is not supported in the configuration
# dialog. IDLE promotes the preferred Python indentation: use spaces!
usespaces = idleConf.GetOption('main', 'Indent',
'use-spaces', type='bool')
self.usetabs = not usespaces
# tabwidth is the display width of a literal tab character.
# CAUTION: telling Tk to use anything other than its default
# tab setting causes it to use an entirely different tabbing algorithm,
# treating tab stops as fixed distances from the left margin.
# Nobody expects this, so for now tabwidth should never be changed.
self.tabwidth = 8 # must remain 8 until Tk is fixed.
# indentwidth is the number of screen characters per indent level.
# The recommended Python indentation is four spaces.
self.indentwidth = self.tabwidth
self.set_notabs_indentwidth()
# If context_use_ps1 is true, parsing searches back for a ps1 line;
# else searches for a popular (if, def, ...) Python stmt.
self.context_use_ps1 = False
# When searching backwards for a reliable place to begin parsing,
# first start num_context_lines[0] lines back, then
# num_context_lines[1] lines back if that didn't work, and so on.
# The last value should be huge (larger than the # of lines in a
# conceivable file).
# Making the initial values larger slows things down more often.
self.num_context_lines = 50, 500, 5000000
self.per = per = self.Percolator(text)
self.undo = undo = self.UndoDelegator()
per.insertfilter(undo)
text.undo_block_start = undo.undo_block_start
text.undo_block_stop = undo.undo_block_stop
undo.set_saved_change_hook(self.saved_change_hook)
# IOBinding implements file I/O and printing functionality
self.io = io = self.IOBinding(self)
io.set_filename_change_hook(self.filename_change_hook)
self.good_load = False
self.set_indentation_params(False)
self.color = None # initialized below in self.ResetColorizer
if filename:
if os.path.exists(filename) and not os.path.isdir(filename):
if io.loadfile(filename):
self.good_load = True
is_py_src = self.ispythonsource(filename)
self.set_indentation_params(is_py_src)
else:
io.set_filename(filename)
self.good_load = True
self.ResetColorizer()
self.saved_change_hook()
self.update_recent_files_list()
self.load_extensions()
menu = self.menudict.get('windows')
if menu:
end = menu.index("end")
if end is None:
end = -1
if end >= 0:
menu.add_separator()
end = end + 1
self.wmenu_end = end
WindowList.register_callback(self.postwindowsmenu)
# Some abstractions so IDLE extensions are cross-IDE
self.askyesno = tkMessageBox.askyesno
self.askinteger = tkSimpleDialog.askinteger
self.showerror = tkMessageBox.showerror
self._highlight_workaround() # Fix selection tags on Windows
def _highlight_workaround(self):
# On Windows, Tk removes painting of the selection
# tags which is different behavior than on Linux and Mac.
# See issue14146 for more information.
if not sys.platform.startswith('win'):
return
text = self.text
text.event_add("<<Highlight-FocusOut>>", "<FocusOut>")
text.event_add("<<Highlight-FocusIn>>", "<FocusIn>")
def highlight_fix(focus):
sel_range = text.tag_ranges("sel")
if sel_range:
if focus == 'out':
HILITE_CONFIG = idleConf.GetHighlight(
idleConf.CurrentTheme(), 'hilite')
text.tag_config("sel_fix", HILITE_CONFIG)
text.tag_raise("sel_fix")
text.tag_add("sel_fix", *sel_range)
elif focus == 'in':
text.tag_remove("sel_fix", "1.0", "end")
text.bind("<<Highlight-FocusOut>>",
lambda ev: highlight_fix("out"))
text.bind("<<Highlight-FocusIn>>",
lambda ev: highlight_fix("in"))
def _filename_to_unicode(self, filename):
"""convert filename to unicode in order to display it in Tk"""
if isinstance(filename, str) or not filename:
return filename
else:
try:
return filename.decode(self.filesystemencoding)
except UnicodeDecodeError:
# XXX
try:
return filename.decode(self.encoding)
except UnicodeDecodeError:
# byte-to-byte conversion
return filename.decode('iso8859-1')
def new_callback(self, event):
dirname, basename = self.io.defaultfilename()
self.flist.new(dirname)
return "break"
def home_callback(self, event):
if (event.state & 4) != 0 and event.keysym == "Home":
# state&4==Control. If <Control-Home>, use the Tk binding.
return
if self.text.index("iomark") and \
self.text.compare("iomark", "<=", "insert lineend") and \
self.text.compare("insert linestart", "<=", "iomark"):
# In Shell on input line, go to just after prompt
insertpt = int(self.text.index("iomark").split(".")[1])
else:
line = self.text.get("insert linestart", "insert lineend")
for insertpt in range(len(line)):
if line[insertpt] not in (' ','\t'):
break
else:
insertpt=len(line)
lineat = int(self.text.index("insert").split('.')[1])
if insertpt == lineat:
insertpt = 0
dest = "insert linestart+"+str(insertpt)+"c"
if (event.state&1) == 0:
# shift was not pressed
self.text.tag_remove("sel", "1.0", "end")
else:
if not self.text.index("sel.first"):
# there was no previous selection
self.text.mark_set("my_anchor", "insert")
else:
if self.text.compare(self.text.index("sel.first"), "<",
self.text.index("insert")):
self.text.mark_set("my_anchor", "sel.first") # extend back
else:
self.text.mark_set("my_anchor", "sel.last") # extend forward
first = self.text.index(dest)
last = self.text.index("my_anchor")
if self.text.compare(first,">",last):
first,last = last,first
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", first, last)
self.text.mark_set("insert", dest)
self.text.see("insert")
return "break"
def set_status_bar(self):
self.status_bar = self.MultiStatusBar(self.top)
if sys.platform == "darwin":
# Insert some padding to avoid obscuring some of the statusbar
# by the resize widget.
self.status_bar.set_label('_padding1', ' ', side=RIGHT)
self.status_bar.set_label('column', 'Col: ?', side=RIGHT)
self.status_bar.set_label('line', 'Ln: ?', side=RIGHT)
self.status_bar.pack(side=BOTTOM, fill=X)
self.text.bind("<<set-line-and-column>>", self.set_line_and_column)
self.text.event_add("<<set-line-and-column>>",
"<KeyRelease>", "<ButtonRelease>")
self.text.after_idle(self.set_line_and_column)
def set_line_and_column(self, event=None):
line, column = self.text.index(INSERT).split('.')
self.status_bar.set_label('column', 'Col: %s' % column)
self.status_bar.set_label('line', 'Ln: %s' % line)
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("format", "F_ormat"),
("run", "_Run"),
("options", "_Options"),
("windows", "_Window"),
("help", "_Help"),
]
def createmenubar(self):
mbar = self.menubar
self.menudict = menudict = {}
for name, label in self.menu_specs:
underline, label = prepstr(label)
menudict[name] = menu = Menu(mbar, name=name)
mbar.add_cascade(label=label, menu=menu, underline=underline)
if macosxSupport.isCarbonTk():
# Insert the application menu
menudict['application'] = menu = Menu(mbar, name='apple')
mbar.add_cascade(label='IDLE', menu=menu)
self.fill_menus()
self.recent_files_menu = Menu(self.menubar)
self.menudict['file'].insert_cascade(3, label='Recent Files',
underline=0,
menu=self.recent_files_menu)
self.base_helpmenu_length = self.menudict['help'].index(END)
self.reset_help_menu_entries()
def postwindowsmenu(self):
# Only called when Windows menu exists
menu = self.menudict['windows']
end = menu.index("end")
if end is None:
end = -1
if end > self.wmenu_end:
menu.delete(self.wmenu_end+1, end)
WindowList.add_windows_to_menu(menu)
rmenu = None
def right_menu_event(self, event):
self.text.mark_set("insert", "@%d,%d" % (event.x, event.y))
if not self.rmenu:
self.make_rmenu()
rmenu = self.rmenu
self.event = event
iswin = sys.platform[:3] == 'win'
if iswin:
self.text.config(cursor="arrow")
for item in self.rmenu_specs:
try:
label, eventname, verify_state = item
except ValueError: # see issue1207589
continue
if verify_state is None:
continue
state = getattr(self, verify_state)()
rmenu.entryconfigure(label, state=state)
rmenu.tk_popup(event.x_root, event.y_root)
if iswin:
self.text.config(cursor="ibeam")
rmenu_specs = [
# ("Label", "<<virtual-event>>", "statefuncname"), ...
("Close", "<<close-window>>", None), # Example
]
def make_rmenu(self):
rmenu = Menu(self.text, tearoff=0)
for item in self.rmenu_specs:
label, eventname = item[0], item[1]
if label is not None:
def command(text=self.text, eventname=eventname):
text.event_generate(eventname)
rmenu.add_command(label=label, command=command)
else:
rmenu.add_separator()
self.rmenu = rmenu
def rmenu_check_cut(self):
return self.rmenu_check_copy()
def rmenu_check_copy(self):
try:
indx = self.text.index('sel.first')
except TclError:
return 'disabled'
else:
return 'normal' if indx else 'disabled'
def rmenu_check_paste(self):
try:
self.text.tk.call('tk::GetSelection', self.text, 'CLIPBOARD')
except TclError:
return 'disabled'
else:
return 'normal'
def about_dialog(self, event=None):
aboutDialog.AboutDialog(self.top,'About IDLE')
def config_dialog(self, event=None):
configDialog.ConfigDialog(self.top,'Settings')
def config_extensions_dialog(self, event=None):
configDialog.ConfigExtensionsDialog(self.top)
def help_dialog(self, event=None):
if self.root:
parent = self.root
else:
parent = self.top
helpDialog.display(parent, near=self.top)
def python_docs(self, event=None):
if sys.platform[:3] == 'win':
try:
os.startfile(self.help_url)
except OSError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(self.help_url)
return "break"
def cut(self,event):
self.text.event_generate("<<Cut>>")
return "break"
def copy(self,event):
if not self.text.tag_ranges("sel"):
# There is no selection, so do nothing and maybe interrupt.
return
self.text.event_generate("<<Copy>>")
return "break"
def paste(self,event):
self.text.event_generate("<<Paste>>")
self.text.see("insert")
return "break"
def select_all(self, event=None):
self.text.tag_add("sel", "1.0", "end-1c")
self.text.mark_set("insert", "1.0")
self.text.see("insert")
return "break"
def remove_selection(self, event=None):
self.text.tag_remove("sel", "1.0", "end")
self.text.see("insert")
def move_at_edge_if_selection(self, edge_index):
"""Cursor move begins at start or end of selection
When a left/right cursor key is pressed create and return to Tkinter a
function which causes a cursor move from the associated edge of the
selection.
"""
self_text_index = self.text.index
self_text_mark_set = self.text.mark_set
edges_table = ("sel.first+1c", "sel.last-1c")
def move_at_edge(event):
if (event.state & 5) == 0: # no shift(==1) or control(==4) pressed
try:
self_text_index("sel.first")
self_text_mark_set("insert", edges_table[edge_index])
except TclError:
pass
return move_at_edge
def del_word_left(self, event):
self.text.event_generate('<Meta-Delete>')
return "break"
def del_word_right(self, event):
self.text.event_generate('<Meta-d>')
return "break"
def find_event(self, event):
SearchDialog.find(self.text)
return "break"
def find_again_event(self, event):
SearchDialog.find_again(self.text)
return "break"
def find_selection_event(self, event):
SearchDialog.find_selection(self.text)
return "break"
def find_in_files_event(self, event):
GrepDialog.grep(self.text, self.io, self.flist)
return "break"
def replace_event(self, event):
ReplaceDialog.replace(self.text)
return "break"
def goto_line_event(self, event):
text = self.text
lineno = tkSimpleDialog.askinteger("Goto",
"Go to line number:",parent=text)
if lineno is None:
return "break"
if lineno <= 0:
text.bell()
return "break"
text.mark_set("insert", "%d.0" % lineno)
text.see("insert")
def open_module(self, event=None):
# XXX Shouldn't this be in IOBinding?
try:
name = self.text.get("sel.first", "sel.last")
except TclError:
name = ""
else:
name = name.strip()
name = tkSimpleDialog.askstring("Module",
"Enter the name of a Python module\n"
"to search on sys.path and open:",
parent=self.text, initialvalue=name)
if name:
name = name.strip()
if not name:
return
# XXX Ought to insert current file's directory in front of path
try:
spec = importlib.util.find_spec(name)
except (ValueError, ImportError) as msg:
tkMessageBox.showerror("Import error", str(msg), parent=self.text)
return
if spec is None:
tkMessageBox.showerror("Import error", "module not found",
parent=self.text)
return
if not isinstance(spec.loader, importlib.abc.SourceLoader):
tkMessageBox.showerror("Import error", "not a source-based module",
parent=self.text)
return
try:
file_path = spec.loader.get_filename(name)
except AttributeError:
tkMessageBox.showerror("Import error",
"loader does not support get_filename",
parent=self.text)
return
if self.flist:
self.flist.open(file_path)
else:
self.io.loadfile(file_path)
return file_path
def open_class_browser(self, event=None):
filename = self.io.filename
if not (self.__class__.__name__ == 'PyShellEditorWindow'
and filename):
filename = self.open_module()
if filename is None:
return
head, tail = os.path.split(filename)
base, ext = os.path.splitext(tail)
from idlelib import ClassBrowser
ClassBrowser.ClassBrowser(self.flist, base, [head])
def open_path_browser(self, event=None):
from idlelib import PathBrowser
PathBrowser.PathBrowser(self.flist)
def open_turtle_demo(self, event = None):
import subprocess
cmd = [sys.executable,
'-c',
'from turtledemo.__main__ import main; main()']
p = subprocess.Popen(cmd, shell=False)
def gotoline(self, lineno):
if lineno is not None and lineno > 0:
self.text.mark_set("insert", "%d.0" % lineno)
self.text.tag_remove("sel", "1.0", "end")
self.text.tag_add("sel", "insert", "insert +1l")
self.center()
def ispythonsource(self, filename):
if not filename or os.path.isdir(filename):
return True
base, ext = os.path.splitext(os.path.basename(filename))
if os.path.normcase(ext) in (".py", ".pyw"):
return True
line = self.text.get('1.0', '1.0 lineend')
return line.startswith('#!') and 'python' in line
def close_hook(self):
if self.flist:
self.flist.unregister_maybe_terminate(self)
self.flist = None
def set_close_hook(self, close_hook):
self.close_hook = close_hook
def filename_change_hook(self):
if self.flist:
self.flist.filename_changed_edit(self)
self.saved_change_hook()
self.top.update_windowlist_registry(self)
self.ResetColorizer()
def _addcolorizer(self):
if self.color:
return
if self.ispythonsource(self.io.filename):
self.color = self.ColorDelegator()
# can add more colorizers here...
if self.color:
self.per.removefilter(self.undo)
self.per.insertfilter(self.color)
self.per.insertfilter(self.undo)
def _rmcolorizer(self):
if not self.color:
return
self.color.removecolors()
self.per.removefilter(self.color)
self.color = None
def ResetColorizer(self):
"Update the color theme"
# Called from self.filename_change_hook and from configDialog.py
self._rmcolorizer()
self._addcolorizer()
theme = idleConf.GetOption('main','Theme','name')
normal_colors = idleConf.GetHighlight(theme, 'normal')
cursor_color = idleConf.GetHighlight(theme, 'cursor', fgBg='fg')
select_colors = idleConf.GetHighlight(theme, 'hilite')
self.text.config(
foreground=normal_colors['foreground'],
background=normal_colors['background'],
insertbackground=cursor_color,
selectforeground=select_colors['foreground'],
selectbackground=select_colors['background'],
)
IDENTCHARS = string.ascii_letters + string.digits + "_"
def colorize_syntax_error(self, text, pos):
text.tag_add("ERROR", pos)
char = text.get(pos)
if char and char in self.IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
if '\n' == text.get(pos): # error at line end
text.mark_set("insert", pos)
else:
text.mark_set("insert", pos + "+1c")
text.see(pos)
def ResetFont(self):
"Update the text widgets' font if it is changed"
# Called from configDialog.py
fontWeight='normal'
if idleConf.GetOption('main','EditorWindow','font-bold',type='bool'):
fontWeight='bold'
self.text.config(font=(idleConf.GetOption('main','EditorWindow','font'),
idleConf.GetOption('main','EditorWindow','font-size',
type='int'),
fontWeight))
def RemoveKeybindings(self):
"Remove the keybindings before they are changed."
# Called from configDialog.py
self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
for event, keylist in keydefs.items():
self.text.event_delete(event, *keylist)
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
for event, keylist in xkeydefs.items():
self.text.event_delete(event, *keylist)
def ApplyKeybindings(self):
"Update the keybindings after they are changed"
# Called from configDialog.py
self.Bindings.default_keydefs = keydefs = idleConf.GetCurrentKeySet()
self.apply_bindings()
for extensionName in self.get_standard_extension_names():
xkeydefs = idleConf.GetExtensionBindings(extensionName)
if xkeydefs:
self.apply_bindings(xkeydefs)
#update menu accelerators
menuEventDict = {}
for menu in self.Bindings.menudefs:
menuEventDict[menu[0]] = {}
for item in menu[1]:
if item:
menuEventDict[menu[0]][prepstr(item[0])[1]] = item[1]
for menubarItem in self.menudict:
menu = self.menudict[menubarItem]
end = menu.index(END)
if end is None:
# Skip empty menus
continue
end += 1
for index in range(0, end):
if menu.type(index) == 'command':
accel = menu.entrycget(index, 'accelerator')
if accel:
itemName = menu.entrycget(index, 'label')
event = ''
if menubarItem in menuEventDict:
if itemName in menuEventDict[menubarItem]:
event = menuEventDict[menubarItem][itemName]
if event:
accel = get_accelerator(keydefs, event)
menu.entryconfig(index, accelerator=accel)
def set_notabs_indentwidth(self):
"Update the indentwidth if changed and not using tabs in this window"
# Called from configDialog.py
if not self.usetabs:
self.indentwidth = idleConf.GetOption('main', 'Indent','num-spaces',
type='int')
def reset_help_menu_entries(self):
"Update the additional help entries on the Help menu"
help_list = idleConf.GetAllExtraHelpSourcesList()
helpmenu = self.menudict['help']
# first delete the extra help entries, if any
helpmenu_length = helpmenu.index(END)
if helpmenu_length > self.base_helpmenu_length:
helpmenu.delete((self.base_helpmenu_length + 1), helpmenu_length)
# then rebuild them
if help_list:
helpmenu.add_separator()
for entry in help_list:
cmd = self.__extra_help_callback(entry[1])
helpmenu.add_command(label=entry[0], command=cmd)
# and update the menu dictionary
self.menudict['help'] = helpmenu
def __extra_help_callback(self, helpfile):
"Create a callback with the helpfile value frozen at definition time"
def display_extra_help(helpfile=helpfile):
if not helpfile.startswith(('www', 'http')):
helpfile = os.path.normpath(helpfile)
if sys.platform[:3] == 'win':
try:
os.startfile(helpfile)
except OSError as why:
tkMessageBox.showerror(title='Document Start Failure',
message=str(why), parent=self.text)
else:
webbrowser.open(helpfile)
return display_extra_help
def update_recent_files_list(self, new_file=None):
"Load and update the recent files list and menus"
rf_list = []
if os.path.exists(self.recent_files_path):
with open(self.recent_files_path, 'r',
encoding='utf_8', errors='replace') as rf_list_file:
rf_list = rf_list_file.readlines()
if new_file:
new_file = os.path.abspath(new_file) + '\n'
if new_file in rf_list:
rf_list.remove(new_file) # move to top
rf_list.insert(0, new_file)
# clean and save the recent files list
bad_paths = []
for path in rf_list:
if '\0' in path or not os.path.exists(path[0:-1]):
bad_paths.append(path)
rf_list = [path for path in rf_list if path not in bad_paths]
ulchars = "1234567890ABCDEFGHIJK"
rf_list = rf_list[0:len(ulchars)]
try:
with open(self.recent_files_path, 'w',
encoding='utf_8', errors='replace') as rf_file:
rf_file.writelines(rf_list)
except OSError as err:
if not getattr(self.root, "recentfilelist_error_displayed", False):
self.root.recentfilelist_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update Recent Files list:\n%s'
% str(err),
parent=self.text)
# for each edit window instance, construct the recent files menu
for instance in self.top.instance_dict:
menu = instance.recent_files_menu
menu.delete(0, END) # clear, and rebuild:
for i, file_name in enumerate(rf_list):
file_name = file_name.rstrip() # zap \n
# make unicode string to display non-ASCII chars correctly
ufile_name = self._filename_to_unicode(file_name)
callback = instance.__recent_file_callback(file_name)
menu.add_command(label=ulchars[i] + " " + ufile_name,
command=callback,
underline=0)
def __recent_file_callback(self, file_name):
def open_recent_file(fn_closure=file_name):
self.io.open(editFile=fn_closure)
return open_recent_file
def saved_change_hook(self):
short = self.short_title()
long = self.long_title()
if short and long:
title = short + " - " + long + _py_version
elif short:
title = short
elif long:
title = long
else:
title = "Untitled"
icon = short or long or title
if not self.get_saved():
title = "*%s*" % title
icon = "*%s" % icon
self.top.wm_title(title)
self.top.wm_iconname(icon)
def get_saved(self):
return self.undo.get_saved()
def set_saved(self, flag):
self.undo.set_saved(flag)
def reset_undo(self):
self.undo.reset_undo()
def short_title(self):
filename = self.io.filename
if filename:
filename = os.path.basename(filename)
else:
filename = "Untitled"
# return unicode string to display non-ASCII chars correctly
return self._filename_to_unicode(filename)
def long_title(self):
# return unicode string to display non-ASCII chars correctly
return self._filename_to_unicode(self.io.filename or "")
def center_insert_event(self, event):
self.center()
def center(self, mark="insert"):
text = self.text
top, bot = self.getwindowlines()
lineno = self.getlineno(mark)
height = bot - top
newtop = max(1, lineno - height//2)
text.yview(float(newtop))
def getwindowlines(self):
text = self.text
top = self.getlineno("@0,0")
bot = self.getlineno("@0,65535")
if top == bot and text.winfo_height() == 1:
# Geometry manager hasn't run yet
height = int(text['height'])
bot = top + height - 1
return top, bot
def getlineno(self, mark="insert"):
text = self.text
return int(float(text.index(mark)))
def get_geometry(self):
"Return (width, height, x, y)"
geom = self.top.wm_geometry()
m = re.match(r"(\d+)x(\d+)\+(-?\d+)\+(-?\d+)", geom)
return list(map(int, m.groups()))
def close_event(self, event):
self.close()
def maybesave(self):
if self.io:
if not self.get_saved():
if self.top.state()!='normal':
self.top.deiconify()
self.top.lower()
self.top.lift()
return self.io.maybesave()
def close(self):
reply = self.maybesave()
if str(reply) != "cancel":
self._close()
return reply
def _close(self):
if self.io.filename:
self.update_recent_files_list(new_file=self.io.filename)
WindowList.unregister_callback(self.postwindowsmenu)
self.unload_extensions()
self.io.close()
self.io = None
self.undo = None
if self.color:
self.color.close(False)
self.color = None
self.text = None
self.tkinter_vars = None
self.per.close()
self.per = None
self.top.destroy()
if self.close_hook:
# unless override: unregister from flist, terminate if last window
self.close_hook()
def load_extensions(self):
self.extensions = {}
self.load_standard_extensions()
def unload_extensions(self):
for ins in list(self.extensions.values()):
if hasattr(ins, "close"):
ins.close()
self.extensions = {}
def load_standard_extensions(self):
for name in self.get_standard_extension_names():
try:
self.load_extension(name)
except:
print("Failed to load extension", repr(name))
traceback.print_exc()
def get_standard_extension_names(self):
return idleConf.GetExtensions(editor_only=True)
def load_extension(self, name):
try:
try:
mod = importlib.import_module('.' + name, package=__package__)
except (ImportError, TypeError):
mod = importlib.import_module(name)
except ImportError:
print("\nFailed to import extension: ", name)
raise
cls = getattr(mod, name)
keydefs = idleConf.GetExtensionBindings(name)
if hasattr(cls, "menudefs"):
self.fill_menus(cls.menudefs, keydefs)
ins = cls(self)
self.extensions[name] = ins
if keydefs:
self.apply_bindings(keydefs)
for vevent in keydefs:
methodname = vevent.replace("-", "_")
while methodname[:1] == '<':
methodname = methodname[1:]
while methodname[-1:] == '>':
methodname = methodname[:-1]
methodname = methodname + "_event"
if hasattr(ins, methodname):
self.text.bind(vevent, getattr(ins, methodname))
def apply_bindings(self, keydefs=None):
if keydefs is None:
keydefs = self.Bindings.default_keydefs
text = self.text
text.keydefs = keydefs
for event, keylist in keydefs.items():
if keylist:
text.event_add(event, *keylist)
def fill_menus(self, menudefs=None, keydefs=None):
"""Add appropriate entries to the menus and submenus
Menus that are absent or None in self.menudict are ignored.
"""
if menudefs is None:
menudefs = self.Bindings.menudefs
if keydefs is None:
keydefs = self.Bindings.default_keydefs
menudict = self.menudict
text = self.text
for mname, entrylist in menudefs:
menu = menudict.get(mname)
if not menu:
continue
for entry in entrylist:
if not entry:
menu.add_separator()
else:
label, eventname = entry
checkbutton = (label[:1] == '!')
if checkbutton:
label = label[1:]
underline, label = prepstr(label)
accelerator = get_accelerator(keydefs, eventname)
def command(text=text, eventname=eventname):
text.event_generate(eventname)
if checkbutton:
var = self.get_var_obj(eventname, BooleanVar)
menu.add_checkbutton(label=label, underline=underline,
command=command, accelerator=accelerator,
variable=var)
else:
menu.add_command(label=label, underline=underline,
command=command,
accelerator=accelerator)
def getvar(self, name):
var = self.get_var_obj(name)
if var:
value = var.get()
return value
else:
raise NameError(name)
def setvar(self, name, value, vartype=None):
var = self.get_var_obj(name, vartype)
if var:
var.set(value)
else:
raise NameError(name)
def get_var_obj(self, name, vartype=None):
var = self.tkinter_vars.get(name)
if not var and vartype:
# create a Tkinter variable object with self.text as master:
self.tkinter_vars[name] = var = vartype(self.text)
return var
# Tk implementations of "virtual text methods" -- each platform
# reusing IDLE's support code needs to define these for its GUI's
# flavor of widget.
# Is character at text_index in a Python string? Return 0 for
# "guaranteed no", true for anything else. This info is expensive
# to compute ab initio, but is probably already known by the
# platform's colorizer.
def is_char_in_string(self, text_index):
if self.color:
# Return true iff colorizer hasn't (re)gotten this far
# yet, or the character is tagged as being in a string
return self.text.tag_prevrange("TODO", text_index) or \
"STRING" in self.text.tag_names(text_index)
else:
# The colorizer is missing: assume the worst
return 1
# If a selection is defined in the text widget, return (start,
# end) as Tkinter text indices, otherwise return (None, None)
def get_selection_indices(self):
try:
first = self.text.index("sel.first")
last = self.text.index("sel.last")
return first, last
except TclError:
return None, None
# Return the text widget's current view of what a tab stop means
# (equivalent width in spaces).
def get_tk_tabwidth(self):
current = self.text['tabs'] or TK_TABWIDTH_DEFAULT
return int(current)
# Set the text widget's current view of what a tab stop means.
def set_tk_tabwidth(self, newtabwidth):
text = self.text
if self.get_tk_tabwidth() != newtabwidth:
# Set text widget tab width
pixels = text.tk.call("font", "measure", text["font"],
"-displayof", text.master,
"n" * newtabwidth)
text.configure(tabs=pixels)
### begin autoindent code ### (configuration was moved to beginning of class)
def set_indentation_params(self, is_py_src, guess=True):
if is_py_src and guess:
i = self.guess_indent()
if 2 <= i <= 8:
self.indentwidth = i
if self.indentwidth != self.tabwidth:
self.usetabs = False
self.set_tk_tabwidth(self.tabwidth)
def smart_backspace_event(self, event):
text = self.text
first, last = self.get_selection_indices()
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
return "break"
# Delete whitespace left, until hitting a real char or closest
# preceding virtual tab stop.
chars = text.get("insert linestart", "insert")
if chars == '':
if text.compare("insert", ">", "1.0"):
# easy: delete preceding newline
text.delete("insert-1c")
else:
text.bell() # at start of buffer
return "break"
if chars[-1] not in " \t":
# easy: delete preceding real char
text.delete("insert-1c")
return "break"
# Ick. It may require *inserting* spaces if we back up over a
# tab character! This is written to be clear, not fast.
tabwidth = self.tabwidth
have = len(chars.expandtabs(tabwidth))
assert have > 0
want = ((have - 1) // self.indentwidth) * self.indentwidth
# Debug prompt is multilined....
if self.context_use_ps1:
last_line_of_prompt = sys.ps1.split('\n')[-1]
else:
last_line_of_prompt = ''
ncharsdeleted = 0
while 1:
if chars == last_line_of_prompt:
break
chars = chars[:-1]
ncharsdeleted = ncharsdeleted + 1
have = len(chars.expandtabs(tabwidth))
if have <= want or chars[-1] not in " \t":
break
text.undo_block_start()
text.delete("insert-%dc" % ncharsdeleted, "insert")
if have < want:
text.insert("insert", ' ' * (want - have))
text.undo_block_stop()
return "break"
def smart_indent_event(self, event):
# if intraline selection:
# delete it
# elif multiline selection:
# do indent-region
# else:
# indent one level
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
if index2line(first) != index2line(last):
return self.indent_region_event(event)
text.delete(first, last)
text.mark_set("insert", first)
prefix = text.get("insert linestart", "insert")
raw, effective = classifyws(prefix, self.tabwidth)
if raw == len(prefix):
# only whitespace to the left
self.reindent_to(effective + self.indentwidth)
else:
# tab to the next 'stop' within or to right of line's text:
if self.usetabs:
pad = '\t'
else:
effective = len(prefix.expandtabs(self.tabwidth))
n = self.indentwidth
pad = ' ' * (n - effective % n)
text.insert("insert", pad)
text.see("insert")
return "break"
finally:
text.undo_block_stop()
def newline_and_indent_event(self, event):
text = self.text
first, last = self.get_selection_indices()
text.undo_block_start()
try:
if first and last:
text.delete(first, last)
text.mark_set("insert", first)
line = text.get("insert linestart", "insert")
i, n = 0, len(line)
while i < n and line[i] in " \t":
i = i+1
if i == n:
# the cursor is in or at leading indentation in a continuation
# line; just inject an empty line at the start
text.insert("insert linestart", '\n')
return "break"
indent = line[:i]
# strip whitespace before insert point unless it's in the prompt
i = 0
last_line_of_prompt = sys.ps1.split('\n')[-1]
while line and line[-1] in " \t" and line != last_line_of_prompt:
line = line[:-1]
i = i+1
if i:
text.delete("insert - %d chars" % i, "insert")
# strip whitespace after insert point
while text.get("insert") in " \t":
text.delete("insert")
# start new line
text.insert("insert", '\n')
# adjust indentation for continuations and block
# open/close first need to find the last stmt
lno = index2line(text.index('insert'))
y = PyParse.Parser(self.indentwidth, self.tabwidth)
if not self.context_use_ps1:
for context in self.num_context_lines:
startat = max(lno - context, 1)
startatindex = repr(startat) + ".0"
rawtext = text.get(startatindex, "insert")
y.set_str(rawtext)
bod = y.find_good_parse_start(
self.context_use_ps1,
self._build_char_in_string_func(startatindex))
if bod is not None or startat == 1:
break
y.set_lo(bod or 0)
else:
r = text.tag_prevrange("console", "insert")
if r:
startatindex = r[1]
else:
startatindex = "1.0"
rawtext = text.get(startatindex, "insert")
y.set_str(rawtext)
y.set_lo(0)
c = y.get_continuation_type()
if c != PyParse.C_NONE:
# The current stmt hasn't ended yet.
if c == PyParse.C_STRING_FIRST_LINE:
# after the first line of a string; do not indent at all
pass
elif c == PyParse.C_STRING_NEXT_LINES:
# inside a string which started before this line;
# just mimic the current indent
text.insert("insert", indent)
elif c == PyParse.C_BRACKET:
# line up with the first (if any) element of the
# last open bracket structure; else indent one
# level beyond the indent of the line with the
# last open bracket
self.reindent_to(y.compute_bracket_indent())
elif c == PyParse.C_BACKSLASH:
# if more than one line in this stmt already, just
# mimic the current indent; else if initial line
# has a start on an assignment stmt, indent to
# beyond leftmost =; else to beyond first chunk of
# non-whitespace on initial line
if y.get_num_lines_in_stmt() > 1:
text.insert("insert", indent)
else:
self.reindent_to(y.compute_backslash_indent())
else:
assert 0, "bogus continuation type %r" % (c,)
return "break"
# This line starts a brand new stmt; indent relative to
# indentation of initial line of closest preceding
# interesting stmt.
indent = y.get_base_indent_string()
text.insert("insert", indent)
if y.is_block_opener():
self.smart_indent_event(event)
elif indent and y.is_block_closer():
self.smart_backspace_event(event)
return "break"
finally:
text.see("insert")
text.undo_block_stop()
# Our editwin provides a is_char_in_string function that works
# with a Tk text index, but PyParse only knows about offsets into
# a string. This builds a function for PyParse that accepts an
# offset.
def _build_char_in_string_func(self, startindex):
def inner(offset, _startindex=startindex,
_icis=self.is_char_in_string):
return _icis(_startindex + "+%dc" % offset)
return inner
def indent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = effective + self.indentwidth
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def dedent_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, self.tabwidth)
effective = max(effective - self.indentwidth, 0)
lines[pos] = self._make_blanks(effective) + line[raw:]
self.set_region(head, tail, chars, lines)
return "break"
def comment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines) - 1):
line = lines[pos]
lines[pos] = '##' + line
self.set_region(head, tail, chars, lines)
def uncomment_region_event(self, event):
head, tail, chars, lines = self.get_region()
for pos in range(len(lines)):
line = lines[pos]
if not line:
continue
if line[:2] == '##':
line = line[2:]
elif line[:1] == '#':
line = line[1:]
lines[pos] = line
self.set_region(head, tail, chars, lines)
def tabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
if tabwidth is None: return
for pos in range(len(lines)):
line = lines[pos]
if line:
raw, effective = classifyws(line, tabwidth)
ntabs, nspaces = divmod(effective, tabwidth)
lines[pos] = '\t' * ntabs + ' ' * nspaces + line[raw:]
self.set_region(head, tail, chars, lines)
def untabify_region_event(self, event):
head, tail, chars, lines = self.get_region()
tabwidth = self._asktabwidth()
if tabwidth is None: return
for pos in range(len(lines)):
lines[pos] = lines[pos].expandtabs(tabwidth)
self.set_region(head, tail, chars, lines)
def toggle_tabs_event(self, event):
if self.askyesno(
"Toggle tabs",
"Turn tabs " + ("on", "off")[self.usetabs] +
"?\nIndent width " +
("will be", "remains at")[self.usetabs] + " 8." +
"\n Note: a tab is always 8 columns",
parent=self.text):
self.usetabs = not self.usetabs
# Try to prevent inconsistent indentation.
# User must change indent width manually after using tabs.
self.indentwidth = 8
return "break"
# XXX this isn't bound to anything -- see tabwidth comments
## def change_tabwidth_event(self, event):
## new = self._asktabwidth()
## if new != self.tabwidth:
## self.tabwidth = new
## self.set_indentation_params(0, guess=0)
## return "break"
def change_indentwidth_event(self, event):
new = self.askinteger(
"Indent width",
"New indent width (2-16)\n(Always use 8 when using tabs)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16)
if new and new != self.indentwidth and not self.usetabs:
self.indentwidth = new
return "break"
def get_region(self):
text = self.text
first, last = self.get_selection_indices()
if first and last:
head = text.index(first + " linestart")
tail = text.index(last + "-1c lineend +1c")
else:
head = text.index("insert linestart")
tail = text.index("insert lineend +1c")
chars = text.get(head, tail)
lines = chars.split("\n")
return head, tail, chars, lines
def set_region(self, head, tail, chars, lines):
text = self.text
newchars = "\n".join(lines)
if newchars == chars:
text.bell()
return
text.tag_remove("sel", "1.0", "end")
text.mark_set("insert", head)
text.undo_block_start()
text.delete(head, tail)
text.insert(head, newchars)
text.undo_block_stop()
text.tag_add("sel", head, "insert")
# Make string that displays as n leading blanks.
def _make_blanks(self, n):
if self.usetabs:
ntabs, nspaces = divmod(n, self.tabwidth)
return '\t' * ntabs + ' ' * nspaces
else:
return ' ' * n
# Delete from beginning of line to insert point, then reinsert
# column logical (meaning use tabs if appropriate) spaces.
def reindent_to(self, column):
text = self.text
text.undo_block_start()
if text.compare("insert linestart", "!=", "insert"):
text.delete("insert linestart", "insert")
if column:
text.insert("insert", self._make_blanks(column))
text.undo_block_stop()
def _asktabwidth(self):
return self.askinteger(
"Tab width",
"Columns per tab? (2-16)",
parent=self.text,
initialvalue=self.indentwidth,
minvalue=2,
maxvalue=16)
# Guess indentwidth from text content.
# Return guessed indentwidth. This should not be believed unless
# it's in a reasonable range (e.g., it will be 0 if no indented
# blocks are found).
def guess_indent(self):
opener, indented = IndentSearcher(self.text, self.tabwidth).run()
if opener and indented:
raw, indentsmall = classifyws(opener, self.tabwidth)
raw, indentlarge = classifyws(indented, self.tabwidth)
else:
indentsmall = indentlarge = 0
return indentlarge - indentsmall
# "line.col" -> line, as an int
def index2line(index):
return int(float(index))
# Look at the leading whitespace in s.
# Return pair (# of leading ws characters,
# effective # of leading blanks after expanding
# tabs to width tabwidth)
def classifyws(s, tabwidth):
raw = effective = 0
for ch in s:
if ch == ' ':
raw = raw + 1
effective = effective + 1
elif ch == '\t':
raw = raw + 1
effective = (effective // tabwidth + 1) * tabwidth
else:
break
return raw, effective
import tokenize
_tokenize = tokenize
del tokenize
class IndentSearcher(object):
# .run() chews over the Text widget, looking for a block opener
# and the stmt following it. Returns a pair,
# (line containing block opener, line containing stmt)
# Either or both may be None.
def __init__(self, text, tabwidth):
self.text = text
self.tabwidth = tabwidth
self.i = self.finished = 0
self.blkopenline = self.indentedline = None
def readline(self):
if self.finished:
return ""
i = self.i = self.i + 1
mark = repr(i) + ".0"
if self.text.compare(mark, ">=", "end"):
return ""
return self.text.get(mark, mark + " lineend+1c")
def tokeneater(self, type, token, start, end, line,
INDENT=_tokenize.INDENT,
NAME=_tokenize.NAME,
OPENERS=('class', 'def', 'for', 'if', 'try', 'while')):
if self.finished:
pass
elif type == NAME and token in OPENERS:
self.blkopenline = line
elif type == INDENT and self.blkopenline:
self.indentedline = line
self.finished = 1
def run(self):
save_tabsize = _tokenize.tabsize
_tokenize.tabsize = self.tabwidth
try:
try:
tokens = _tokenize.generate_tokens(self.readline)
for token in tokens:
self.tokeneater(*token)
except (_tokenize.TokenError, SyntaxError):
# since we cut off the tokenizer early, we can trigger
# spurious errors
pass
finally:
_tokenize.tabsize = save_tabsize
return self.blkopenline, self.indentedline
### end autoindent code ###
def prepstr(s):
# Helper to extract the underscore from a string, e.g.
# prepstr("Co_py") returns (2, "Copy").
i = s.find('_')
if i >= 0:
s = s[:i] + s[i+1:]
return i, s
keynames = {
'bracketleft': '[',
'bracketright': ']',
'slash': '/',
}
def get_accelerator(keydefs, eventname):
keylist = keydefs.get(eventname)
# issue10940: temporary workaround to prevent hang with OS X Cocoa Tk 8.5
# if not keylist:
if (not keylist) or (macosxSupport.isCocoaTk() and eventname in {
"<<open-module>>",
"<<goto-line>>",
"<<change-indentwidth>>"}):
return ""
s = keylist[0]
s = re.sub(r"-[a-z]\b", lambda m: m.group().upper(), s)
s = re.sub(r"\b\w+\b", lambda m: keynames.get(m.group(), m.group()), s)
s = re.sub("Key-", "", s)
s = re.sub("Cancel","Ctrl-Break",s) # [email protected]
s = re.sub("Control-", "Ctrl-", s)
s = re.sub("-", "+", s)
s = re.sub("><", " ", s)
s = re.sub("<", "", s)
s = re.sub(">", "", s)
return s
def fixwordbreaks(root):
# Make sure that Tk's double-click and next/previous word
# operations use our definition of a word (i.e. an identifier)
tk = root.tk
tk.call('tcl_wordBreakAfter', 'a b', 0) # make sure word.tcl is loaded
tk.call('set', 'tcl_wordchars', '[a-zA-Z0-9_]')
tk.call('set', 'tcl_nonwordchars', '[^a-zA-Z0-9_]')
def _editor_window(parent): # htest #
# error if close master window first - timer event, after script
root = parent
fixwordbreaks(root)
if sys.argv[1:]:
filename = sys.argv[1]
else:
filename = None
macosxSupport.setupApp(root, None)
edit = EditorWindow(root=root, filename=filename)
edit.text.bind("<<close-all-windows>>", edit.close_event)
# Does not stop error, neither does following
# edit.text.bind("<<close-window>>", edit.close_event)
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_help_dialog, _editor_window)
| mit |
boonchu/pykickstart | pykickstart/handlers/f19.py | 10 | 4960 | #
# Chris Lumens <[email protected]>
#
# Copyright 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
__all__ = ["F19Handler"]
from pykickstart import commands
from pykickstart.base import BaseHandler
from pykickstart.version import F19
class F19Handler(BaseHandler):
version = F19
commandMap = {
"auth": commands.authconfig.FC3_Authconfig,
"authconfig": commands.authconfig.FC3_Authconfig,
"autopart": commands.autopart.F18_AutoPart,
"autostep": commands.autostep.FC3_AutoStep,
"bootloader": commands.bootloader.F19_Bootloader,
"btrfs": commands.btrfs.F17_BTRFS,
"cdrom": commands.cdrom.FC3_Cdrom,
"clearpart": commands.clearpart.F17_ClearPart,
"cmdline": commands.displaymode.FC3_DisplayMode,
"device": commands.device.F8_Device,
"deviceprobe": commands.deviceprobe.FC3_DeviceProbe,
"dmraid": commands.dmraid.FC6_DmRaid,
"driverdisk": commands.driverdisk.F14_DriverDisk,
"fcoe": commands.fcoe.F13_Fcoe,
"firewall": commands.firewall.F14_Firewall,
"firstboot": commands.firstboot.FC3_Firstboot,
"graphical": commands.displaymode.FC3_DisplayMode,
"group": commands.group.F12_Group,
"halt": commands.reboot.F18_Reboot,
"harddrive": commands.harddrive.FC3_HardDrive,
"ignoredisk": commands.ignoredisk.F14_IgnoreDisk,
"install": commands.upgrade.F11_Upgrade,
"iscsi": commands.iscsi.F17_Iscsi,
"iscsiname": commands.iscsiname.FC6_IscsiName,
"keyboard": commands.keyboard.F18_Keyboard,
"lang": commands.lang.F19_Lang,
"liveimg": commands.liveimg.F19_Liveimg,
"logging": commands.logging.FC6_Logging,
"logvol": commands.logvol.F18_LogVol,
"mediacheck": commands.mediacheck.FC4_MediaCheck,
"method": commands.method.F19_Method,
"multipath": commands.multipath.FC6_MultiPath,
"network": commands.network.F19_Network,
"nfs": commands.nfs.FC6_NFS,
"part": commands.partition.F18_Partition,
"partition": commands.partition.F18_Partition,
"poweroff": commands.reboot.F18_Reboot,
"raid": commands.raid.F19_Raid,
"realm": commands.realm.F19_Realm,
"reboot": commands.reboot.F18_Reboot,
"repo": commands.repo.F15_Repo,
"rescue": commands.rescue.F10_Rescue,
"rootpw": commands.rootpw.F18_RootPw,
"selinux": commands.selinux.FC3_SELinux,
"services": commands.services.FC6_Services,
"shutdown": commands.reboot.F18_Reboot,
"skipx": commands.skipx.FC3_SkipX,
"sshpw": commands.sshpw.F13_SshPw,
"text": commands.displaymode.FC3_DisplayMode,
"timezone": commands.timezone.F18_Timezone,
"updates": commands.updates.F7_Updates,
"upgrade": commands.upgrade.F11_Upgrade,
"url": commands.url.F18_Url,
"user": commands.user.F19_User,
"vnc": commands.vnc.F9_Vnc,
"volgroup": commands.volgroup.FC16_VolGroup,
"xconfig": commands.xconfig.F14_XConfig,
"zerombr": commands.zerombr.F9_ZeroMbr,
"zfcp": commands.zfcp.F14_ZFCP,
}
dataMap = {
"BTRFSData": commands.btrfs.F17_BTRFSData,
"DriverDiskData": commands.driverdisk.F14_DriverDiskData,
"DeviceData": commands.device.F8_DeviceData,
"DmRaidData": commands.dmraid.FC6_DmRaidData,
"FcoeData": commands.fcoe.F13_FcoeData,
"GroupData": commands.group.F12_GroupData,
"IscsiData": commands.iscsi.F17_IscsiData,
"LogVolData": commands.logvol.F20_LogVolData,
"MultiPathData": commands.multipath.FC6_MultiPathData,
"NetworkData": commands.network.F19_NetworkData,
"PartData": commands.partition.F18_PartData,
"RaidData": commands.raid.F18_RaidData,
"RepoData": commands.repo.F15_RepoData,
"SshPwData": commands.sshpw.F13_SshPwData,
"UserData": commands.user.F19_UserData,
"VolGroupData": commands.volgroup.FC16_VolGroupData,
"ZFCPData": commands.zfcp.F14_ZFCPData,
}
| gpl-2.0 |
openconnectome/open-connectome | django/resource/channelview.py | 2 | 2523 | # Copyright 2014 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.views.generic import View
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound
from rest_framework.authentication import SessionAuthentication, TokenAuthentication
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework import generics
from ndauth.authentication import PublicAuthentication
from ndproj.ndproject import NDProject
from nduser.models import Channel
from ndproj.ndchannel import NDChannel
# @authentication_classes((SessionAuthentication, TokenAuthentication))
# @permission_classes((IsAuthenticated,))
# class ChannelView(generics.GenericAPIView):
class ChannelView(View):
def get(self, request, dataset_name, project_name, channel_name):
try:
pr = NDProject.fromName(project_name)
ch = pr.getChannelObj(channel_name)
return HttpResponse(ch.serialize(), content_type='application/json')
except Channel.DoesNotExist as e:
return HttpResponseNotFound()
except Exception as e:
return HttpResponseBadRequest()
def post(self, request, dataset_name, project_name, channel_name):
try:
ch = NDChannel.fromJson(project_name, request.body)
if request.user.is_authenticated():
ch.user_id = request.user.id
else:
ch.user_id = User.objects.get(username='neurodata').id
ch.create()
return HttpResponse(status=201)
except Exception as e:
return HttpResponseBadRequest()
def put(self, request, web_args):
return NotImplemented
def delete(self, request, dataset_name, project_name, channel_name):
try:
pr = NDProject.fromName(project_name)
ch = pr.getChannelObj(channel_name)
ch.delete()
return HttpResponse(status=204)
except Exception as e:
return HttpResponseBadRequest()
| apache-2.0 |
hassanabidpk/django | tests/flatpages_tests/test_middleware.py | 290 | 8134 | from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from .settings import FLATPAGES_TEMPLATES
class TestDataMixin(object):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
cls.fp1 = FlatPage.objects.create(
url='/flatpage/', title='A Flatpage', content="Isn't it flat!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp2 = FlatPage.objects.create(
url='/location/flatpage/', title='A Nested Flatpage', content="Isn't it flat and deep!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp3 = FlatPage.objects.create(
url='/sekrit/', title='Sekrit Flatpage', content="Isn't it sekrit!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp4 = FlatPage.objects.create(
url='/location/sekrit/', title='Sekrit Nested Flatpage', content="Isn't it sekrit and deep!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp1.sites.add(cls.site1)
cls.fp2.sites.add(cls.site1)
cls.fp3.sites.add(cls.site1)
cls.fp4.sites.add(cls.site1)
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
LOGIN_URL='/accounts/login/',
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageMiddlewareTests(TestDataMixin, TestCase):
def test_view_flatpage(self):
"A flatpage can be served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
User.objects.create_user('testuser', '[email protected]', 's3krit')
self.client.login(username='testuser', password='s3krit')
response = self.client.get('/flatpage_root/sekrit/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A flatpage can be served by the fallback middleware"
response = self.client.get('/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middleware"
response = self.client.get('/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_fallback_authenticated_flatpage(self):
"A flatpage served by the middleware can require authentication"
response = self.client.get('/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/sekrit/')
User.objects.create_user('testuser', '[email protected]', 's3krit')
self.client.login(username='testuser', password='s3krit')
response = self.client.get('/sekrit/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served by the fallback middleware"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/some.very_special~chars-here/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it special!</p>")
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
APPEND_SLASH=True,
LOGIN_URL='/accounts/login/',
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageMiddlewareAppendSlashTests(TestDataMixin, TestCase):
def test_redirect_view_flatpage(self):
"A flatpage can be served through a view and should add a slash"
response = self.client.get('/flatpage_root/flatpage')
self.assertRedirects(response, '/flatpage_root/flatpage/', status_code=301)
def test_redirect_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view and should not add a slash"
response = self.client.get('/flatpage_root/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage(self):
"A flatpage can be served by the fallback middleware and should add a slash"
response = self.client.get('/flatpage')
self.assertRedirects(response, '/flatpage/', status_code=301)
def test_redirect_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middleware and should not add a slash"
response = self.client.get('/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served by the fallback middleware and should add a slash"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/some.very_special~chars-here')
self.assertRedirects(response, '/some.very_special~chars-here/', status_code=301)
def test_redirect_fallback_flatpage_root(self):
"A flatpage at / should not cause a redirect loop when APPEND_SLASH is set"
fp = FlatPage.objects.create(
url="/",
title="Root",
content="Root",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Root</p>")
| bsd-3-clause |
SlimRoms/kernel_samsung_smdk4412 | scripts/tracing/draw_functrace.py | 14676 | 3560 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <[email protected]>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
| gpl-2.0 |
dongguangming/django-filebrowser | filebrowser/templatetags/fb_versions.py | 13 | 5502 | # coding: utf-8
# PYTHON IMPORTS
import os
import re
from time import gmtime
# DJANGO IMPORTS
from django.template import Library, Node, Variable, VariableDoesNotExist, TemplateSyntaxError
from django.conf import settings
from django.core.files import File
# FILEBROWSER IMPORTS
from filebrowser.settings import VERSIONS, PLACEHOLDER, SHOW_PLACEHOLDER, FORCE_PLACEHOLDER
from filebrowser.base import FileObject
from filebrowser.sites import get_default_site
register = Library()
class VersionNode(Node):
def __init__(self, src, suffix):
self.src = src
self.suffix = suffix
def render(self, context):
try:
version_suffix = self.suffix.resolve(context)
source = self.src.resolve(context)
except VariableDoesNotExist:
return ""
if version_suffix not in VERSIONS:
return "" # FIXME: should this throw an error?
if isinstance(source, FileObject):
source = source.path
elif isinstance(source, File):
source = source.name
else: # string
source = source
site = context.get('filebrowser_site', get_default_site())
if FORCE_PLACEHOLDER or (SHOW_PLACEHOLDER and not site.storage.isfile(source)):
source = PLACEHOLDER
fileobject = FileObject(source, site=site)
try:
version = fileobject.version_generate(version_suffix)
return version.url
except Exception as e:
if settings.TEMPLATE_DEBUG:
raise e
return ""
def version(parser, token):
"""
Displaying a version of an existing Image according to the predefined VERSIONS settings (see filebrowser settings).
{% version fileobject version_suffix %}
Use {% version fileobject 'medium' %} in order to
display the medium-size version of an image.
version_suffix can be a string or a variable. if version_suffix is a string, use quotes.
"""
bits = token.split_contents()
if len(bits) != 3:
raise TemplateSyntaxError("'version' tag takes 4 arguments")
return VersionNode(parser.compile_filter(bits[1]), parser.compile_filter(bits[2]))
class VersionObjectNode(Node):
def __init__(self, src, suffix, var_name):
self.src = src
self.suffix = suffix
self.var_name = var_name
def render(self, context):
try:
version_suffix = self.suffix.resolve(context)
source = self.src.resolve(context)
except VariableDoesNotExist:
return None
if version_suffix not in VERSIONS:
return "" # FIXME: should this throw an error?
if isinstance(source, FileObject):
source = source.path
elif isinstance(source, File):
source = source.name
else: # string
source = source
site = context.get('filebrowser_site', get_default_site())
if FORCE_PLACEHOLDER or (SHOW_PLACEHOLDER and not site.storage.isfile(source)):
source = PLACEHOLDER
fileobject = FileObject(source, site=site)
try:
version = fileobject.version_generate(version_suffix)
context[self.var_name] = version
except Exception as e:
if settings.TEMPLATE_DEBUG:
raise e
context[self.var_name] = ""
return ""
def version_object(parser, token):
"""
Returns a context variable 'var_name' with the FileObject
{% version_object fileobject version_suffix as var_name %}
Use {% version_object fileobject 'medium' as version_medium %} in order to
retrieve the medium version of an image stored in a variable version_medium.
version_suffix can be a string or a variable. If version_suffix is a string, use quotes.
"""
bits = token.split_contents()
if len(bits) != 5:
raise TemplateSyntaxError("'version_object' tag takes 4 arguments")
if bits[3] != 'as':
raise TemplateSyntaxError("second argument to 'version_object' tag must be 'as'")
return VersionObjectNode(parser.compile_filter(bits[1]), parser.compile_filter(bits[2]), bits[4])
class VersionSettingNode(Node):
def __init__(self, version_suffix):
if (version_suffix[0] == version_suffix[-1] and version_suffix[0] in ('"', "'")):
self.version_suffix = version_suffix[1:-1]
else:
self.version_suffix = None
self.version_suffix_var = Variable(version_suffix)
def render(self, context):
if self.version_suffix:
version_suffix = self.version_suffix
else:
try:
version_suffix = self.version_suffix_var.resolve(context)
except VariableDoesNotExist:
return None
context['version_setting'] = VERSIONS[version_suffix]
return ''
def version_setting(parser, token):
"""
Get Information about a version setting.
"""
try:
tag, version_suffix = token.split_contents()
except:
raise TemplateSyntaxError("%s tag requires 1 argument" % token.contents.split()[0])
if (version_suffix[0] == version_suffix[-1] and version_suffix[0] in ('"', "'")) and version_suffix.lower()[1:-1] not in VERSIONS:
raise TemplateSyntaxError("%s tag received bad version_suffix %s" % (tag, version_suffix))
return VersionSettingNode(version_suffix)
register.tag(version)
register.tag(version_object)
register.tag(version_setting)
| bsd-3-clause |
divya-csekar/flask-microblog-server | flask/Lib/site-packages/whoosh/lang/snowball/bases.py | 96 | 4874 | # Base classes
class _ScandinavianStemmer(object):
"""
This subclass encapsulates a method for defining the string region R1.
It is used by the Danish, Norwegian, and Swedish stemmer.
"""
def _r1_scandinavian(self, word, vowels):
"""
Return the region R1 that is used by the Scandinavian stemmers.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel. But then R1 is adjusted so that the region
before it contains at least three letters.
:param word: The word whose region R1 is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region R1.
:type vowels: unicode
:return: the region R1 for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses DanishStemmer, NorwegianStemmer, and
SwedishStemmer. It is not to be invoked directly!
"""
r1 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i - 1] in vowels:
if len(word[:i + 1]) < 3 and len(word[:i + 1]) > 0:
r1 = word[3:]
elif len(word[:i + 1]) >= 3:
r1 = word[i + 1:]
else:
return word
break
return r1
class _StandardStemmer(object):
"""
This subclass encapsulates two methods for defining the standard versions
of the string regions R1, R2, and RV.
"""
def _r1r2_standard(self, word, vowels):
"""
Return the standard interpretations of the string regions R1 and R2.
R1 is the region after the first non-vowel following a vowel,
or is the null region at the end of the word if there is no
such non-vowel.
R2 is the region after the first non-vowel following a vowel
in R1, or is the null region at the end of the word if there
is no such non-vowel.
:param word: The word whose regions R1 and R2 are determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the regions R1 and R2.
:type vowels: unicode
:return: (r1,r2), the regions R1 and R2 for the respective word.
:rtype: tuple
:note: This helper method is invoked by the respective stem method of
the subclasses DutchStemmer, FinnishStemmer,
FrenchStemmer, GermanStemmer, ItalianStemmer,
PortugueseStemmer, RomanianStemmer, and SpanishStemmer.
It is not to be invoked directly!
:note: A detailed description of how to define R1 and R2
can be found at http://snowball.tartarus.org/texts/r1r2.html
"""
r1 = ""
r2 = ""
for i in range(1, len(word)):
if word[i] not in vowels and word[i - 1] in vowels:
r1 = word[i + 1:]
break
for i in range(1, len(r1)):
if r1[i] not in vowels and r1[i - 1] in vowels:
r2 = r1[i + 1:]
break
return (r1, r2)
def _rv_standard(self, word, vowels):
"""
Return the standard interpretation of the string region RV.
If the second letter is a consonant, RV is the region after the
next following vowel. If the first two letters are vowels, RV is
the region after the next following consonant. Otherwise, RV is
the region after the third letter.
:param word: The word whose region RV is determined.
:type word: str or unicode
:param vowels: The vowels of the respective language that are
used to determine the region RV.
:type vowels: unicode
:return: the region RV for the respective word.
:rtype: unicode
:note: This helper method is invoked by the respective stem method of
the subclasses ItalianStemmer, PortugueseStemmer,
RomanianStemmer, and SpanishStemmer. It is not to be
invoked directly!
"""
rv = ""
if len(word) >= 2:
if word[1] not in vowels:
for i in range(2, len(word)):
if word[i] in vowels:
rv = word[i + 1:]
break
elif word[:2] in vowels:
for i in range(2, len(word)):
if word[i] not in vowels:
rv = word[i + 1:]
break
else:
rv = word[3:]
return rv
| bsd-3-clause |
wanghongjuan/meta-iotqa-1 | lib/oeqa/runtime/sensor/test_gyro_mpu6050.py | 2 | 2458 | """
@file test_gyro_mpu6050.py
"""
##
# @addtogroup soletta sensor
# @brief This is sensor test based on soletta app
# @brief test sensor mpu6050 on Galileo/MinnowMax/Edison
##
import os
import time
from oeqa.utils.helper import shell_cmd
from oeqa.oetest import oeRuntimeTest
from oeqa.runtime.sensor.EnvirSetup import EnvirSetup
from oeqa.utils.decorators import tag
@tag(TestType="FVT", FeatureID="IOTOS-757")
class TestGyroMPU6050(oeRuntimeTest):
"""
@class TestGyroMPU6050
"""
def setUp(self):
'''Generate fbp file on target
@fn setUp
@param self
@return'''
print ('start!\n')
#connect sensor and DUT through board
#shell_cmd("sudo python "+ os.path.dirname(__file__) + "/Connector.py mpu6050")
envir = EnvirSetup(self.target)
envir.envirSetup("mpu6050","gyro")
def tearDown(self):
'''unload mpu6050 driver
@fn tearDown
@param self
@return'''
(status, output) = self.target.run("cat /sys/devices/virtual/dmi/id/board_name")
if "Minnow" in output:
(status, output) = self.target.run(
"rmmod i2c-minnow-mpu6050")
if "Galileo" in output:
(status, output) = self.target.run(
"rmmod i2c-quark-mpu6050")
if "BODEGA" in output:
(status, output) = self.target.run(
"rmmod i2c-edison-mpu6050")
def test_Gyro_MPU6050(self):
'''Execute the test app and verify sensor data
@fn test_Gyro_MPU6050
@param self
@return'''
print ('start reading data!')
(status, output) = self.target.run(
"chmod 777 /opt/apps/test_gyro_mpu6050.fbp")
(status, output) = self.target.run(
"cd /opt/apps; ./test_gyro_mpu6050.fbp >re.log")
error = output
(status, output) = self.target.run(
"cp /opt/apps/re.log /home/root/mpu6050.log")
(status, output) = self.target.run("cat /opt/apps/re.log|grep direction-vector")
print (output + "\n")
self.assertEqual(status, 0, msg="Error messages: %s" % error)
#make sure sensor data is valid
(status, output) = self.target.run("cat /opt/apps/re.log|grep '0.000000, 0.000000, 0.000000'")
self.assertEqual(status, 1, msg="Error messages: %s" % output)
| mit |
Giftingnation/GN-Oscar-Custom | oscar/apps/dashboard/views.py | 4 | 7404 | from datetime import timedelta
from decimal import Decimal as D, ROUND_UP
from django.utils.timezone import now
from django.views.generic import TemplateView
from django.db.models.loading import get_model
from django.db.models import Avg, Sum, Count
from oscar.core.compat import get_user_model
from oscar.apps.promotions.models import AbstractPromotion
ConditionalOffer = get_model('offer', 'ConditionalOffer')
Voucher = get_model('voucher', 'Voucher')
Basket = get_model('basket', 'Basket')
StockAlert = get_model('partner', 'StockAlert')
Product = get_model('catalogue', 'Product')
Order = get_model('order', 'Order')
Line = get_model('order', 'Line')
User = get_user_model()
class IndexView(TemplateView):
"""
An overview view which displays several reports about the shop.
Supports the permission-based dashboard. It is recommended to add a
index_nonstaff.html template because Oscar's default template will
display potentially sensitive store information.
"""
def get_template_names(self):
if self.request.user.is_staff:
return ['dashboard/index.html', ]
else:
return ['dashboard/index_nonstaff.html', 'dashboard/index.html']
def get_context_data(self, **kwargs):
ctx = super(IndexView, self).get_context_data(**kwargs)
ctx.update(self.get_stats())
return ctx
def get_active_site_offers(self):
"""
Return active conditional offers of type "site offer". The returned
``Queryset`` of site offers is filtered by end date greater then
the current date.
"""
return ConditionalOffer.objects.filter(end_datetime__gt=now(),
offer_type=ConditionalOffer.SITE)
def get_active_vouchers(self):
"""
Get all active vouchers. The returned ``Queryset`` of vouchers
is filtered by end date greater then the current date.
"""
return Voucher.objects.filter(end_date__gt=now())
def get_number_of_promotions(self, abstract_base=AbstractPromotion):
"""
Get the number of promotions for all promotions derived from
*abstract_base*. All subclasses of *abstract_base* are queried
and if another abstract base class is found this method is executed
recursively.
"""
total = 0
for cls in abstract_base.__subclasses__():
if cls._meta.abstract:
total += self.get_number_of_promotions(cls)
else:
total += cls.objects.count()
return total
def get_open_baskets(self, filters=None):
"""
Get all open baskets. If *filters* dictionary is provided they will
be applied on all open baskets and return only filtered results.
"""
if filters is None:
filters = {}
filters['status'] = Basket.OPEN
return Basket.objects.filter(**filters)
def get_hourly_report(self, hours=24, segments=10):
"""
Get report of order revenue split up in hourly chunks. A report is
generated for the last *hours* (default=24) from the current time.
The report provides ``max_revenue`` of the hourly order revenue sum,
``y-range`` as the labeling for the y-axis in a template and
``order_total_hourly``, a list of properties for hourly chunks.
*segments* defines the number of labeling segments used for the y-axis
when generating the y-axis labels (default=10).
"""
# Get datetime for 24 hours agao
time_now = now().replace(minute=0, second=0)
start_time = time_now - timedelta(hours=hours-1)
orders_last_day = Order.objects.filter(date_placed__gt=start_time)
order_total_hourly = []
for hour in range(0, hours, 2):
end_time = start_time + timedelta(hours=2)
hourly_orders = orders_last_day.filter(date_placed__gt=start_time,
date_placed__lt=end_time)
total = hourly_orders.aggregate(
Sum('total_incl_tax')
)['total_incl_tax__sum'] or D('0.0')
order_total_hourly.append({
'end_time': end_time,
'total_incl_tax': total
})
start_time = end_time
max_value = max([x['total_incl_tax'] for x in order_total_hourly])
divisor = 1
while divisor < max_value / 50:
divisor *= 10
max_value = (max_value / divisor).quantize(D('1'), rounding=ROUND_UP)
max_value *= divisor
if max_value:
segment_size = (max_value) / D('100.0')
for item in order_total_hourly:
item['percentage'] = int(item['total_incl_tax'] / segment_size)
y_range = []
y_axis_steps = max_value / D(str(segments))
for idx in reversed(range(segments+1)):
y_range.append(idx * y_axis_steps)
else:
y_range = []
for item in order_total_hourly:
item['percentage'] = 0
ctx = {
'order_total_hourly': order_total_hourly,
'max_revenue': max_value,
'y_range': y_range,
}
return ctx
def get_stats(self):
datetime_24hrs_ago = now() - timedelta(hours=24)
orders = Order.objects.filter()
orders_last_day = orders.filter(date_placed__gt=datetime_24hrs_ago)
open_alerts = StockAlert.objects.filter(status=StockAlert.OPEN)
closed_alerts = StockAlert.objects.filter(status=StockAlert.CLOSED)
stats = {
'total_orders_last_day': orders_last_day.count(),
'total_lines_last_day': Line.objects.filter(order__in=orders_last_day).count(),
'average_order_costs': orders_last_day.aggregate(
Avg('total_incl_tax')
)['total_incl_tax__avg'] or D('0.00'),
'total_revenue_last_day': orders_last_day.aggregate(
Sum('total_incl_tax')
)['total_incl_tax__sum'] or D('0.00'),
'hourly_report_dict': self.get_hourly_report(hours=24),
'total_customers_last_day': User.objects.filter(
date_joined__gt=datetime_24hrs_ago,
).count(),
'total_open_baskets_last_day': self.get_open_baskets({
'date_created__gt': datetime_24hrs_ago
}).count(),
'total_products': Product.objects.count(),
'total_open_stock_alerts': open_alerts.count(),
'total_closed_stock_alerts': closed_alerts.count(),
'total_site_offers': self.get_active_site_offers().count(),
'total_vouchers': self.get_active_vouchers().count(),
'total_promotions': self.get_number_of_promotions(),
'total_customers': User.objects.count(),
'total_open_baskets': self.get_open_baskets().count(),
'total_orders': orders.count(),
'total_lines': Line.objects.filter(order__in=orders).count(),
'total_revenue': orders.aggregate(
Sum('total_incl_tax')
)['total_incl_tax__sum'] or D('0.00'),
'order_status_breakdown': orders.order_by(
'status'
).values('status').annotate(freq=Count('id'))
}
return stats
| bsd-3-clause |
argonemyth/sentry | src/sentry/web/forms/invite_organization_member.py | 4 | 1598 | from __future__ import absolute_import
from django import forms
from django.db import transaction, IntegrityError
from sentry.models import (
AuditLogEntry, AuditLogEntryEvent, OrganizationMember,
OrganizationMemberType
)
class InviteOrganizationMemberForm(forms.ModelForm):
class Meta:
fields = ('email',)
model = OrganizationMember
def save(self, actor, organization, ip_address):
om = super(InviteOrganizationMemberForm, self).save(commit=False)
om.organization = organization
om.type = OrganizationMemberType.MEMBER
try:
existing = OrganizationMember.objects.get(
organization=organization,
user__email__iexact=om.email,
)
except OrganizationMember.DoesNotExist:
pass
else:
return existing, False
sid = transaction.savepoint(using='default')
try:
om.save()
except IntegrityError:
transaction.savepoint_rollback(sid, using='default')
return OrganizationMember.objects.get(
email__iexact=om.email,
organization=organization,
), False
transaction.savepoint_commit(sid, using='default')
AuditLogEntry.objects.create(
organization=organization,
actor=actor,
ip_address=ip_address,
target_object=om.id,
event=AuditLogEntryEvent.MEMBER_INVITE,
data=om.get_audit_log_data(),
)
om.send_invite_email()
return om, True
| bsd-3-clause |
edx/js-test-tool | js_test_tool/result_report.py | 3 | 7720 | """
Generate report of test results.
"""
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from jinja2 import Environment, PackageLoader
from xml.sax.saxutils import escape, quoteattr
import logging
LOGGER = logging.getLogger(__name__)
# Set up the template environment
TEMPLATE_LOADER = PackageLoader(__package__)
TEMPLATE_ENV = Environment(
loader=TEMPLATE_LOADER,
trim_blocks=True,
lstrip_blocks=True,
extensions=['jinja2.ext.with_']
)
class ResultData(object):
"""
Test result data.
"""
def __init__(self):
"""
Initialize an empty `ResultData` object.
"""
self._result_dict = defaultdict(list)
def add_results(self, browser_name, test_results):
"""
Add a new set of test results to the `ResultData` object.
`browser_name` is the name of the browser the tests
were run under.
`test_results` is a list of dictionaries of the form:
{
'test_group': TEST_GROUP_NAME,
'test_name': TEST_NAME,
'status': pass | fail | error | skip,
'detail': DETAILS
}
"""
self._result_dict[browser_name].extend(test_results)
def browsers(self):
"""
Return a list of browsers for which we have test results.
"""
return self._result_dict.keys()
def test_results(self, browser_name):
"""
Return test results for the browser named `browser_name`.
This is a list of dictionaries of the form:
{
'test_group': TEST_GROUP_NAME,
'test_name': TEST_NAME,
'status': pass | fail | error | skip,
'detail': DETAILS
}
If no results are available for `browser_name`, returns
an empty list.
"""
return self._result_dict[browser_name]
# Dict mapping status values to the counter
# that should be incremented in the stats dict
STATS_KEY_MAP = {
'pass': 'num_passed',
'fail': 'num_failed',
'error': 'num_error',
'skip': 'num_skipped'
}
def stats(self, browser_name):
"""
Return summary statistics for the test results
for the browser `browser_name`. This is a dict
of the form:
{
'num_failed': NUM_FAILED,
'num_error': NUM_ERROR,
'num_skipped': NUM_SKIPPED,
'num_passed': NUM_PASSED
'num_tests': NUM_TESTS
}
`NUM_TESTS` is the total number of tests (the sum
of failed, errored, skipped, and passed tests).
If there are no test results for the browser,
returns counts that are all 0.
If `browser_name` is `None`, returns aggregate
results for all browers.
"""
stats = {
'num_failed': 0, 'num_error': 0,
'num_skipped': 0, 'num_passed': 0,
'num_tests': 0
}
# If no browser name specified, aggregate results
# across all browsers.
if browser_name is None:
browser_list = self.browsers()
# Otherwise, get results only for the specified browser
else:
browser_list = [browser_name]
for browser in browser_list:
for test_result in self._result_dict[browser]:
status = test_result.get('status')
stats_key = self.STATS_KEY_MAP.get(status)
if stats_key is not None:
stats[stats_key] += 1
stats['num_tests'] += 1
else:
msg = "Invalid test result status: '{0}'".format(status)
LOGGER.warning(msg)
return stats
def all_passed(self):
"""
Return True only if all tests passed in all browsers.
Otherwise, return False.
If no results are available, return True.
"""
for browser_name in self.browsers():
stats = self.stats(browser_name)
if (stats.get('num_failed', 0) + stats.get('num_error', 0)) > 0:
return False
return True
class BaseResultReporter(object):
"""
Base class for generating test result reports.
"""
__metaclass__ = ABCMeta
def __init__(self, output_file):
"""
Initialize the reporter to write its
report to `output_file` (a file-like object).
"""
self._output_file = output_file
def write_report(self, result_data):
"""
Create a report of test results.
`result_data` is a `ResultData` object.
Writes the report to the `output_file` configured
in the initializer
"""
report_str = self.generate_report(result_data)
self._output_file.write(report_str)
@abstractmethod
def generate_report(self, results_data):
"""
Return a unicode string representation of
`results_data`, a `ResultData` object.
Concrete subclasses implement this.
"""
pass
class ConsoleResultReporter(BaseResultReporter):
"""
Generate a report that can be printed to the console.
"""
REPORT_TEMPLATE_NAME = 'console_report.txt'
def generate_report(self, results_data):
"""
See base class.
"""
context_dict = {
'browser_results': [
{
'browser_name': browser_name,
'test_results': results_data.test_results(browser_name),
'stats': results_data.stats(browser_name)
} for browser_name in results_data.browsers()
],
'stats': results_data.stats(None),
'all_passed': results_data.all_passed()
}
template = TEMPLATE_ENV.get_template(self.REPORT_TEMPLATE_NAME)
return template.render(context_dict)
class XUnitResultReporter(BaseResultReporter):
"""
Generate an XUnit XML report.
"""
REPORT_TEMPLATE_NAME = 'xunit_report.txt'
def generate_report(self, results_data):
"""
See base class.
"""
context_dict = {
'browser_results': [
{
'browser_name': browser_name,
'test_results': results_data.test_results(browser_name),
'stats': results_data.stats(browser_name)
} for browser_name in results_data.browsers()
],
'stats': results_data.stats(None),
'all_passed': results_data.all_passed()
}
template = TEMPLATE_ENV.get_template(self.REPORT_TEMPLATE_NAME)
return template.render(self._sanitize_context_dict(context_dict))
def _sanitize_context_dict(self, context):
"""
Sanitize the strings in the context dict for XML.
"""
for browser_dict in context.get('browser_results', []):
browser_dict['browser_name'] = self._sanitize_attr(browser_dict['browser_name'])
for result_dict in browser_dict.get('test_results', []):
result_dict['test_group'] = self._sanitize_attr(result_dict['test_group'])
result_dict['test_name'] = self._sanitize_attr(result_dict['test_name'])
return context
def _sanitize_attr(self, string):
"""
Replace characters that can cause XML parse errors in attributes.
"""
# Escape <, &, and > to corresponding entity references
# Escape quotes (for attributes)
return escape(string, {'"': '"', "'": """})
| apache-2.0 |
denisff/python-for-android | python-modules/twisted/twisted/internet/threads.py | 49 | 3861 | # Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Extended thread dispatching support.
For basic support see reactor threading API docs.
Maintainer: Itamar Shtull-Trauring
"""
import Queue
from twisted.python import failure
from twisted.internet import defer
def deferToThreadPool(reactor, threadpool, f, *args, **kwargs):
"""
Call the function C{f} using a thread from the given threadpool and return
the result as a Deferred.
This function is only used by client code which is maintaining its own
threadpool. To run a function in the reactor's threadpool, use
C{deferToThread}.
@param reactor: The reactor in whose main thread the Deferred will be
invoked.
@param threadpool: An object which supports the C{callInThreadWithCallback}
method of C{twisted.python.threadpool.ThreadPool}.
@param f: The function to call.
@param *args: positional arguments to pass to f.
@param **kwargs: keyword arguments to pass to f.
@return: A Deferred which fires a callback with the result of f, or an
errback with a L{twisted.python.failure.Failure} if f throws an
exception.
"""
d = defer.Deferred()
def onResult(success, result):
if success:
reactor.callFromThread(d.callback, result)
else:
reactor.callFromThread(d.errback, result)
threadpool.callInThreadWithCallback(onResult, f, *args, **kwargs)
return d
def deferToThread(f, *args, **kwargs):
"""
Run a function in a thread and return the result as a Deferred.
@param f: The function to call.
@param *args: positional arguments to pass to f.
@param **kwargs: keyword arguments to pass to f.
@return: A Deferred which fires a callback with the result of f,
or an errback with a L{twisted.python.failure.Failure} if f throws
an exception.
"""
from twisted.internet import reactor
return deferToThreadPool(reactor, reactor.getThreadPool(),
f, *args, **kwargs)
def _runMultiple(tupleList):
"""
Run a list of functions.
"""
for f, args, kwargs in tupleList:
f(*args, **kwargs)
def callMultipleInThread(tupleList):
"""
Run a list of functions in the same thread.
tupleList should be a list of (function, argsList, kwargsDict) tuples.
"""
from twisted.internet import reactor
reactor.callInThread(_runMultiple, tupleList)
def blockingCallFromThread(reactor, f, *a, **kw):
"""
Run a function in the reactor from a thread, and wait for the result
synchronously. If the function returns a L{Deferred}, wait for its
result and return that.
@param reactor: The L{IReactorThreads} provider which will be used to
schedule the function call.
@param f: the callable to run in the reactor thread
@type f: any callable.
@param a: the arguments to pass to C{f}.
@param kw: the keyword arguments to pass to C{f}.
@return: the result of the L{Deferred} returned by C{f}, or the result
of C{f} if it returns anything other than a L{Deferred}.
@raise: If C{f} raises a synchronous exception,
C{blockingCallFromThread} will raise that exception. If C{f}
returns a L{Deferred} which fires with a L{Failure},
C{blockingCallFromThread} will raise that failure's exception (see
L{Failure.raiseException}).
"""
queue = Queue.Queue()
def _callFromThread():
result = defer.maybeDeferred(f, *a, **kw)
result.addBoth(queue.put)
reactor.callFromThread(_callFromThread)
result = queue.get()
if isinstance(result, failure.Failure):
result.raiseException()
return result
__all__ = ["deferToThread", "deferToThreadPool", "callMultipleInThread",
"blockingCallFromThread"]
| apache-2.0 |
mirzawaqasahmed/avocado-vt | examples/tests/ls_disk.py | 35 | 1224 | """
Shows all existing disk partitions.
This test requires test-provider to be qemu.
Try this test without config, than put ls_disk.cfg into $tests/cfg/ directory
and see the difference.
Additionally you might put ls_disk_v2.cfg into $tests/cfg/ directory and
execute ls_disk_v2 test (which also uses this script!) and watch for even
bigger differences.
:difficulty: advanced
:copyright: 2014 Red Hat Inc.
"""
import logging
def run(test, params, env):
"""
Logs guest's disk partitions
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
vm = env.get_vm(params["main_vm"])
session = vm.wait_for_login()
output = session.cmd_output("ls /dev/[hsv]d* -1")
logging.info("Guest disks are:\n%s", output)
# Let's get some monitor data
monitor = vm.monitor
# Following two provides different output for HMP and QMP monitors
# output = monitor.cmd("info block", debug=False)
# output = monitor.info("block", debug=False)
# Following command unifies the response no matter which monitor is used
output = monitor.info_block(debug=False)
logging.info("info block:\n%s", output)
| gpl-2.0 |
guke001/QMarkdowner | markdown/util.py | 30 | 4224 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import sys
"""
Python 3 Stuff
=============================================================================
"""
PY3 = sys.version_info[0] == 3
if PY3:
string_type = str
text_type = str
int2str = chr
else:
string_type = basestring
text_type = unicode
int2str = unichr
"""
Constants you might want to modify
-----------------------------------------------------------------------------
"""
BLOCK_LEVEL_ELEMENTS = re.compile("^(p|div|h[1-6]|blockquote|pre|table|dl|ol|ul"
"|script|noscript|form|fieldset|iframe|math"
"|hr|hr/|style|li|dt|dd|thead|tbody"
"|tr|th|td|section|footer|header|group|figure"
"|figcaption|aside|article|canvas|output"
"|progress|video)$", re.IGNORECASE)
# Placeholders
STX = '\u0002' # Use STX ("Start of text") for start-of-placeholder
ETX = '\u0003' # Use ETX ("End of text") for end-of-placeholder
INLINE_PLACEHOLDER_PREFIX = STX+"klzzwxh:"
INLINE_PLACEHOLDER = INLINE_PLACEHOLDER_PREFIX + "%s" + ETX
INLINE_PLACEHOLDER_RE = re.compile(INLINE_PLACEHOLDER % r'([0-9]{4})')
AMP_SUBSTITUTE = STX+"amp"+ETX
"""
Constants you probably do not need to change
-----------------------------------------------------------------------------
"""
RTL_BIDI_RANGES = ( ('\u0590', '\u07FF'),
# Hebrew (0590-05FF), Arabic (0600-06FF),
# Syriac (0700-074F), Arabic supplement (0750-077F),
# Thaana (0780-07BF), Nko (07C0-07FF).
('\u2D30', '\u2D7F'), # Tifinagh
)
# Extensions should use "markdown.util.etree" instead of "etree" (or do `from
# markdown.util import etree`). Do not import it by yourself.
try: # Is the C implemenation of ElementTree available?
import xml.etree.cElementTree as etree
from xml.etree.ElementTree import Comment
# Serializers (including ours) test with non-c Comment
etree.test_comment = Comment
if etree.VERSION < "1.0.5":
raise RuntimeError("cElementTree version 1.0.5 or higher is required.")
except (ImportError, RuntimeError):
# Use the Python implementation of ElementTree?
import xml.etree.ElementTree as etree
if etree.VERSION < "1.1":
raise RuntimeError("ElementTree version 1.1 or higher is required")
"""
AUXILIARY GLOBAL FUNCTIONS
=============================================================================
"""
def isBlockLevel(tag):
"""Check if the tag is a block level HTML tag."""
if isinstance(tag, string_type):
return BLOCK_LEVEL_ELEMENTS.match(tag)
# Some ElementTree tags are not strings, so return False.
return False
"""
MISC AUXILIARY CLASSES
=============================================================================
"""
class AtomicString(text_type):
"""A string which should not be further processed."""
pass
class Processor(object):
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class HtmlStash(object):
"""
This class is used for stashing HTML objects that we extract
in the beginning and replace with place-holders.
"""
def __init__ (self):
""" Create a HtmlStash. """
self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks=[]
def store(self, html, safe=False):
"""
Saves an HTML segment for later reinsertion. Returns a
placeholder string that needs to be inserted into the
document.
Keyword arguments:
* html: an html segment
* safe: label an html segment as safe for safemode
Returns : a placeholder string
"""
self.rawHtmlBlocks.append((html, safe))
placeholder = self.get_placeholder(self.html_counter)
self.html_counter += 1
return placeholder
def reset(self):
self.html_counter = 0
self.rawHtmlBlocks = []
def get_placeholder(self, key):
return "%swzxhzdk:%d%s" % (STX, key, ETX)
| mit |
jw2100/beginning.github.io | DeepLearning/wuenda/01_NeuralNetworksAndDeepLearning/week2-02.py | 1 | 12615 | import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
%matplotlib inline
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
# Example of a picture
index = 1
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
### START CODE HERE ### (β 3 lines of code)
m_train = None
m_test = None
num_px = None
m_train = train_set_x_orig.shape[0]
m_test = test_set_x_orig.shape[0]
num_px = train_set_x_orig.shape[1]
### END CODE HERE ###
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
# Reshape the training and test examples
### START CODE HERE ### (β 2 lines of code)
train_set_x_flatten = None
test_set_x_flatten = None
train_set_x_flatten = train_set_x_orig.reshape(m_train, -1).T
test_set_x_flatten = test_set_x_orig.reshape(m_test, -1).T
### END CODE HERE ###
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
### START CODE HERE ### (β 1 line of code)
s = None
s = 1.0 / (1+np.exp(-z))
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
### START CODE HERE ### (β 1 line of code)
w = None
b = None
w = np.zeros((dim, 1))
b = 0
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (β 2 lines of code)
A = None # compute activation
cost = None # compute cost
A = sigmoid(np.dot(w.T, X) + b) # 1*m
cost = (-1.0/m) * sum(sum(Y*np.log(A) + (1-Y)*np.log(1-A)))
print(cost.shape)
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (β 2 lines of code)
dw = None
db = None
dw = np.dot(X, (A-Y).T) / m
db = sum(sum(A-Y)) / m
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,"db": db}
return grads, cost
w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (β 1-4 lines of code)
### START CODE HERE ###
grads, cost = propagate(w, b, X, Y)
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (β 2 lines of code)
### START CODE HERE ###
#w = None
#b = None
w = w - learning_rate*dw
b = b - learning_rate*db
### END CODE HERE ###
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training examples
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
# GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (β 1 line of code)
A = None
A = sigmoid(np.dot(w.T, X) + b)
### END CODE HERE ###
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
### START CODE HERE ### (β 4 lines of code)
#pass
#Y_prediction
Y_prediction[0,i] = A[0, i] > 0.5 and 1 or 0
### END CODE HERE ###
assert(Y_prediction.shape == (1, m))
return Y_prediction
w = np.array([[0.1124579],[0.23106775]])
b = -0.3
X = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]])
print ("predictions = " + str(predict(w, b, X)))
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
### START CODE HERE ###
# initialize parameters with zeros (β 1 line of code)
w, b = initialize_with_zeros(X_train.shape[0])
# Gradient descent (β 1 line of code)
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (β 2 lines of code)
Y_prediction_test = predict(w, b, X_test)
Y_prediction_train = predict(w, b, X_train)
### END CODE HERE ###
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "my_image.jpg" # change this to the name of your image file
## END CODE HERE ##
# We preprocess the image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.") | gpl-3.0 |
aashish24/tangelo | data-processing/charitynet-fixup.py | 1 | 2153 | import sys
import pymongo
geonames = pymongo.Connection("mongo")["xdata"]["geonames"]
donors = pymongo.Connection("mongo")["xdata"]["charitynet.normalized.donors"]
pr = 0
city_country_state = 0
alternate_city_country_state = 0
not_found = 0
count = 0
for donor in donors.find():
if (count > 0 and count % 1000 == 0):
sys.stderr.write("%d\n" % count)
count = count + 1
country = "US"
state = donor["state"]
city = donor["city"]
lat = 0
lng = 0
found = False
if not found and state == "PR":
for d in geonames.find({"name": city, "country_code": state,
"feature_class": "P"}, sort=[("population", -1)],
fields=["latitude", "longitude"]):
lat = d["latitude"]
lng = d["longitude"]
pr = pr + 1
found = True
break
if not found:
query = {"name": city, "country_code": country,
"feature_class": "P",
"admin1_code": state}
for d in geonames.find(query, sort=[("population", -1)],
fields=["latitude", "longitude"]):
lat = d["latitude"]
lng = d["longitude"]
city_country_state = city_country_state + 1
found = True
break
if not found:
for d in geonames.find({"alternate": city, "country_code": country,
"feature_class": "P",
"admin1_code": state}, sort=[("population", -1)],
fields=["latitude", "longitude"]):
lat = d["latitude"]
lng = d["longitude"]
alternate_city_country_state = alternate_city_country_state + 1
found = True
break
if not found:
not_found = not_found + 1
sys.stderr.write("not found: \"%s\" \"%s\"\n" % (city, state))
else:
donor["loc"] = [lng, lat]
donors.save(donor)
sys.stderr.write("== %d pr\n" % pr)
sys.stderr.write("== %d city_country_state\n" % city_country_state)
sys.stderr.write("== %d alternate_city_country_state\n" % alternate_city_country_state)
sys.stderr.write("== %d not_found\n" % not_found)
| apache-2.0 |
joymarquis/mscc | projects/swtec/utils/fwdl/lib-python/pexpect-4.0.1/tests/test_ansi.py | 19 | 11822 | #!/usr/bin/env python
'''
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <[email protected]>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
from pexpect import ANSI
import unittest
from . import PexpectTestCase
import sys
PY3 = (sys.version_info[0] >= 3)
write_target = 'I\'ve got a ferret sticking up my nose. \n' +\
'(He\'s got a ferret sticking up his nose.) \n' +\
'How it got there I can\'t tell \n' +\
'But now it\'s there it hurts like hell \n' +\
'And what is more it radically affects my sense of smell. \n' +\
'(His sense of smell.) '
write_text = 'I\'ve got a ferret sticking up my nose.\n' + \
'(He\'s got a ferret sticking up his nose.)\n' + \
'How it got there I can\'t tell\n' + \
'But now it\'s there it hurts like hell\n' + \
'And what is more it radically affects my sense of smell.\n' + \
'(His sense of smell.)\n' + \
'I can see a bare-bottomed mandril.\n' + \
'(Slyly eyeing his other nostril.)\n' + \
'If it jumps inside there too I really don\'t know what to do\n' + \
'I\'ll be the proud posessor of a kind of nasal zoo.\n' + \
'(A nasal zoo.)\n' + \
'I\'ve got a ferret sticking up my nose.\n' + \
'(And what is worst of all it constantly explodes.)\n' + \
'"Ferrets don\'t explode," you say\n' + \
'But it happened nine times yesterday\n' + \
'And I should know for each time I was standing in the way.\n' + \
'I\'ve got a ferret sticking up my nose.\n' + \
'(He\'s got a ferret sticking up his nose.)\n' + \
'How it got there I can\'t tell\n' + \
'But now it\'s there it hurts like hell\n' + \
'And what is more it radically affects my sense of smell.\n' + \
'(His sense of smell.)'
tetris_target=' XX XXXX XX \n' +\
' XXXXXX XXXXXXXX XX \n' +\
' XXXXXX XXXXXXXX XX \n' +\
' XX XX XX XXXX XX \n' +\
' XXXXXX XXXX XXXX XX \n' +\
' XXXXXXXXXX XXXX XX \n' +\
' XX XXXXXX XX XX \n' +\
' XXXXXX XX XX \n' +\
' XXXX XXXXXX XX XX \n' +\
' XXXXXX XXXX XX XX \n' +\
' XX XX XXXX XX XX \n' +\
' XX XX XX XX XX \n' +\
' XX XX XXXX XXXX XX \n' +\
' XXXXXXXX XXXX XXXX XX \n' +\
' XXXXXXXXXXXXXX XXXXXXXX \n' +\
' XX XXXXXXXX XX XX \n' +\
' XXXXXXXXXXXXXX XX XX \n' +\
' XX XXXX XXXXXX XX \n' +\
' XXXXXX XXXXXXXX \n' +\
' XXXXXXXXXX XX XX \n' +\
' XXXXXXXXXXXXXXXXXXXXXXXX \n' +\
' \n' +\
' J->LEFT K->ROTATE L->RIGHT SPACE->DROP P->PAUSE Q->QUIT \n' +\
' '
torture_target='+--------------------------------------------------------------------------------+\n' +\
'|a`opqrs` This is the `srqpo`a |\n' +\
'|VT100 series Torture Test Demonstration. |\n' +\
'|VT100 series Torture Test Demonstration. |\n' +\
'|This is a normal line __________________________________________________y_ |\n' +\
'|This is a bold line (normal unless the Advanced Video Option is installed) |\n' +\
'|This line is underlined _ " " " " " " _y_ |\n' +\
'|This is a blinking line _ " " " " " " _y_ |\n' +\
'|This is inverse video _ (underlined if no AVO and cursor is underline) _y_ |\n' +\
'|Normal gjpqy Underline Blink Underline+Blink gjpqy |\n' +\
'|Bold gjpqy Underline Blink Underline+Blink gjpqy |\n' +\
'|Inverse Underline Blink Underline+Blink |\n' +\
'|Bold+Inverse Underline Blink Underline+Blink |\n' +\
'|This is double width |\n' +\
'|This is double height |\n' +\
'|This is double height |\n' +\
'|_ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ioy |\n' +\
'|_ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ioy |\n' +\
'|_ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ioy |\n' +\
'|`abcdefghijklmnopqrstuvwxyz{|}~ lqwqk |\n' +\
'|`abcdefghijklmnopqrstuvwxyz{|}~ tqnqu |\n' +\
'|`abcdefghijklmnopqrstuvwxyz{|}~ tqnqu |\n' +\
'|`abcdefghijklmnopqrstuvwxyz{|}~ mqvqj |\n' +\
'| This test created by Joe Smith, 8-May-85 |\n' +\
'| |\n' +\
'+--------------------------------------------------------------------------------+\n'
class ansiTestCase (PexpectTestCase.PexpectTestCase):
def test_write (self):
s = ANSI.ANSI (6,65)
s.fill('.')
s.cursor_home()
for c in write_text:
s.write (c)
assert str(s) == write_target
def test_torturet (self):
s = ANSI.ANSI (24,80)
with open('torturet.vt') as f:
sample_text = f.read()
for c in sample_text:
s.process (c)
assert s.pretty() == torture_target, 'processed: \n' + s.pretty() + '\nexpected:\n' + torture_target
def test_tetris (self):
s = ANSI.ANSI (24,80)
with open('tetris.data') as f:
tetris_text = f.read()
for c in tetris_text:
s.process (c)
assert str(s) == tetris_target
def test_lines(self):
s = ANSI.ANSI(5, 5)
s.write('a'*6 + '\n')
s.write('ab\bcd\n')
s.write('ab\rcd\n')
assert str(s) == ('aaaaa\n'
'a \n'
'acd \n'
'cd \n'
' ')
def test_number_x(self):
"""Test the FSM state used to handle more than 2 numeric parameters."""
class TestANSI(ANSI.ANSI):
captured_memory = None
def do_sgr(self, fsm):
assert self.captured_memory is None
self.captured_memory = fsm.memory
s = TestANSI(1, 20)
s.write('\x1b[0;1;32;45mtest')
assert str(s) == ('test ')
assert s.captured_memory is not None
assert s.captured_memory == [s, '0', '1', '32', '45']
def test_fsm_memory(self):
"""Test the FSM stack/memory does not have numbers left on it
after some sequences with numbers are passed in."""
s = ANSI.ANSI(1, 20)
s.write('\x1b[0;1;2;3m\x1b[4;5;6;7q\x1b[?8h\x1b[?9ltest')
assert str(s) == ('test ')
assert s.state.memory == [s]
def test_utf8_bytes(self):
"""Test that when bytes are passed in containing UTF-8 encoded
characters, where the encoding of each character consists of
multiple bytes, the characters are correctly decoded.
Incremental decoding is also tested."""
s = ANSI.ANSI(2, 10, encoding='utf-8')
# This is the UTF-8 encoding of the UCS character "HOURGLASS"
# followed by the UTF-8 encoding of the UCS character
# "KEYBOARD". These characters can't be encoded in cp437 or
# latin-1. The "KEYBOARD" character is split into two
# separate writes.
s.write(b'\xe2\x8c\x9b')
s.write(b'\xe2\x8c')
s.write(b'\xa8')
if PY3:
assert str(s) == u'\u231b\u2328 \n '
else:
assert unicode(s) == u'\u231b\u2328 \n '
assert str(s) == b'\xe2\x8c\x9b\xe2\x8c\xa8 \n '
assert s.dump() == u'\u231b\u2328 '
assert s.pretty() == u'+----------+\n|\u231b\u2328 |\n| |\n+----------+\n'
assert s.get_abs(1, 1) == u'\u231b'
assert s.get_region(1, 1, 1, 5) == [u'\u231b\u2328 ']
def test_unicode(self):
"""Test passing in of a unicode string."""
s = ANSI.ANSI(2, 10, encoding="utf-8")
s.write(u'\u231b\u2328')
if PY3:
assert str(s) == u'\u231b\u2328 \n '
else:
assert unicode(s) == u'\u231b\u2328 \n '
assert str(s) == b'\xe2\x8c\x9b\xe2\x8c\xa8 \n '
assert s.dump() == u'\u231b\u2328 '
assert s.pretty() == u'+----------+\n|\u231b\u2328 |\n| |\n+----------+\n'
assert s.get_abs(1, 1) == u'\u231b'
assert s.get_region(1, 1, 1, 5) == [u'\u231b\u2328 ']
def test_decode_error(self):
"""Test that default handling of decode errors replaces the
invalid characters."""
s = ANSI.ANSI(2, 10, encoding="ascii")
s.write(b'\xff') # a non-ASCII character
# In unicode, the non-ASCII character is replaced with
# REPLACEMENT CHARACTER.
if PY3:
assert str(s) == u'\ufffd \n '
else:
assert unicode(s) == u'\ufffd \n '
assert str(s) == b'? \n '
assert s.dump() == u'\ufffd '
assert s.pretty() == u'+----------+\n|\ufffd |\n| |\n+----------+\n'
assert s.get_abs(1, 1) == u'\ufffd'
assert s.get_region(1, 1, 1, 5) == [u'\ufffd ']
if __name__ == '__main__':
unittest.main()
suite = unittest.makeSuite(ansiTestCase,'test')
| gpl-3.0 |
grimmjow8/ansible | lib/ansible/playbook/conditional.py | 5 | 10147 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import re
from jinja2.compiler import generate
from jinja2.exceptions import UndefinedError
from ansible.compat.six import text_type
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
from ansible.template.safe_eval import safe_eval
from ansible.module_utils._text import to_native
DEFINED_REGEX = re.compile(r'(hostvars\[.+\]|[\w_]+)\s+(not\s+is|is|is\s+not)\s+(defined|undefined)')
LOOKUP_REGEX = re.compile(r'lookup\s*\(')
VALID_VAR_REGEX = re.compile("^[_A-Za-z][_a-zA-Z0-9]*$")
class Conditional:
'''
This is a mix-in class, to be used with Base to allow the object
to be run conditionally when a condition is met or skipped.
'''
_when = FieldAttribute(isa='list', default=[])
def __init__(self, loader=None):
# when used directly, this class needs a loader, but we want to
# make sure we don't trample on the existing one if this class
# is used as a mix-in with a playbook base class
if not hasattr(self, '_loader'):
if loader is None:
raise AnsibleError("a loader must be specified when using Conditional() directly")
else:
self._loader = loader
super(Conditional, self).__init__()
def _validate_when(self, attr, name, value):
if not isinstance(value, list):
setattr(self, name, [ value ])
def _get_attr_when(self):
'''
Override for the 'tags' getattr fetcher, used from Base.
'''
when = self._attributes['when']
if when is None:
when = []
if hasattr(self, '_get_parent_attribute'):
when = self._get_parent_attribute('when', extend=True, prepend=True)
return when
def extract_defined_undefined(self, conditional):
results = []
cond = conditional
m = DEFINED_REGEX.search(cond)
while m:
results.append(m.groups())
cond = cond[m.end():]
m = DEFINED_REGEX.search(cond)
return results
def evaluate_conditional(self, templar, all_vars):
'''
Loops through the conditionals set on this object, returning
False if any of them evaluate as such.
'''
# since this is a mix-in, it may not have an underlying datastructure
# associated with it, so we pull it out now in case we need it for
# error reporting below
ds = None
if hasattr(self, '_ds'):
ds = getattr(self, '_ds')
try:
# this allows for direct boolean assignments to conditionals "when: False"
if isinstance(self.when, bool):
return self.when
for conditional in self.when:
if not self._check_conditional(conditional, templar, all_vars):
return False
except Exception as e:
raise AnsibleError(
"The conditional check '%s' failed. The error was: %s" % (to_native(conditional), to_native(e)), obj=ds
)
return True
def _check_conditional(self, conditional, templar, all_vars):
'''
This method does the low-level evaluation of each conditional
set on this object, using jinja2 to wrap the conditionals for
evaluation.
'''
original = conditional
if conditional is None or conditional == '':
return True
# pull the "bare" var out, which allows for nested conditionals
# and things like:
# - assert:
# that:
# - item
# with_items:
# - 1 == 1
if conditional in all_vars and VALID_VAR_REGEX.match(conditional):
conditional = all_vars[conditional]
# make sure the templar is using the variables specified with this method
templar.set_available_variables(variables=all_vars)
try:
# if the conditional is "unsafe", disable lookups
disable_lookups = hasattr(conditional, '__UNSAFE__')
conditional = templar.template(conditional, disable_lookups=disable_lookups)
if not isinstance(conditional, text_type) or conditional == "":
return conditional
# update the lookups flag, as the string returned above may now be unsafe
# and we don't want future templating calls to do unsafe things
disable_lookups |= hasattr(conditional, '__UNSAFE__')
# First, we do some low-level jinja2 parsing involving the AST format of the
# statement to ensure we don't do anything unsafe (using the disable_lookup flag above)
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False, inside_yield=False):
if isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Yield):
inside_yield = True
elif isinstance(node, ast.Str):
if disable_lookups:
if inside_call and node.s.startswith("__"):
# calling things with a dunder is generally bad at this point...
raise AnsibleError(
"Invalid access found in the conditional: '%s'" % conditional
)
elif inside_yield:
# we're inside a yield, so recursively parse and traverse the AST
# of the result to catch forbidden syntax from executing
parsed = ast.parse(node.s, mode='exec')
cnv = CleansingNodeVisitor()
cnv.visit(parsed)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(
child_node,
inside_call=inside_call,
inside_yield=inside_yield
)
try:
e = templar.environment.overlay()
e.filters.update(templar._get_filters())
e.tests.update(templar._get_tests())
res = e._parse(conditional, None, None)
res = generate(res, e, None, None)
parsed = ast.parse(res, mode='exec')
cnv = CleansingNodeVisitor()
cnv.visit(parsed)
except Exception as e:
raise AnsibleError("Invalid conditional detected: %s" % to_native(e))
# and finally we generate and template the presented string and look at the resulting string
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
val = templar.template(presented, disable_lookups=disable_lookups).strip()
if val == "True":
return True
elif val == "False":
return False
else:
raise AnsibleError("unable to evaluate conditional: %s" % original)
except (AnsibleUndefinedVariable, UndefinedError) as e:
# the templating failed, meaning most likely a variable was undefined. If we happened
# to be looking for an undefined variable, return True, otherwise fail
try:
# first we extract the variable name from the error message
var_name = re.compile(r"'(hostvars\[.+\]|[\w_]+)' is undefined").search(str(e)).groups()[0]
# next we extract all defined/undefined tests from the conditional string
def_undef = self.extract_defined_undefined(conditional)
# then we loop through these, comparing the error variable name against
# each def/undef test we found above. If there is a match, we determine
# whether the logic/state mean the variable should exist or not and return
# the corresponding True/False
for (du_var, logic, state) in def_undef:
# when we compare the var names, normalize quotes because something
# like hostvars['foo'] may be tested against hostvars["foo"]
if var_name.replace("'", '"') == du_var.replace("'", '"'):
# the should exist is a xor test between a negation in the logic portion
# against the state (defined or undefined)
should_exist = ('not' in logic) != (state == 'defined')
if should_exist:
return False
else:
return True
# as nothing above matched the failed var name, re-raise here to
# trigger the AnsibleUndefinedVariable exception again below
raise
except Exception as new_e:
raise AnsibleUndefinedVariable(
"error while evaluating conditional (%s): %s" % (original, e)
)
| gpl-3.0 |
devdelay/home-assistant | homeassistant/components/zone.py | 12 | 4624 | """
Support for the definition of zones.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zone/
"""
import logging
from homeassistant.const import (
ATTR_HIDDEN, ATTR_ICON, ATTR_LATITUDE, ATTR_LONGITUDE, CONF_NAME)
from homeassistant.helpers import extract_domain_configs
from homeassistant.helpers.entity import Entity, generate_entity_id
from homeassistant.util.location import distance
from homeassistant.util import convert
DOMAIN = "zone"
ENTITY_ID_FORMAT = 'zone.{}'
ENTITY_ID_HOME = ENTITY_ID_FORMAT.format('home')
STATE = 'zoning'
DEFAULT_NAME = 'Unnamed zone'
ATTR_RADIUS = 'radius'
DEFAULT_RADIUS = 100
ATTR_PASSIVE = 'passive'
DEFAULT_PASSIVE = False
ICON_HOME = 'mdi:home'
def active_zone(hass, latitude, longitude, radius=0):
"""Find the active zone for given latitude, longitude."""
# Sort entity IDs so that we are deterministic if equal distance to 2 zones
zones = (hass.states.get(entity_id) for entity_id
in sorted(hass.states.entity_ids(DOMAIN)))
min_dist = None
closest = None
for zone in zones:
if zone.attributes.get(ATTR_PASSIVE):
continue
zone_dist = distance(
latitude, longitude,
zone.attributes[ATTR_LATITUDE], zone.attributes[ATTR_LONGITUDE])
within_zone = zone_dist - radius < zone.attributes[ATTR_RADIUS]
closer_zone = closest is None or zone_dist < min_dist
smaller_zone = (zone_dist == min_dist and
zone.attributes[ATTR_RADIUS] <
closest.attributes[ATTR_RADIUS])
if within_zone and (closer_zone or smaller_zone):
min_dist = zone_dist
closest = zone
return closest
def in_zone(zone, latitude, longitude, radius=0):
"""Test if given latitude, longitude is in given zone."""
zone_dist = distance(
latitude, longitude,
zone.attributes[ATTR_LATITUDE], zone.attributes[ATTR_LONGITUDE])
return zone_dist - radius < zone.attributes[ATTR_RADIUS]
def setup(hass, config):
"""Setup zone."""
entities = set()
for key in extract_domain_configs(config, DOMAIN):
entries = config[key]
if not isinstance(entries, list):
entries = entries,
for entry in entries:
name = entry.get(CONF_NAME, DEFAULT_NAME)
latitude = convert(entry.get(ATTR_LATITUDE), float)
longitude = convert(entry.get(ATTR_LONGITUDE), float)
radius = convert(entry.get(ATTR_RADIUS, DEFAULT_RADIUS), float)
icon = entry.get(ATTR_ICON)
passive = entry.get(ATTR_PASSIVE, DEFAULT_PASSIVE)
if None in (latitude, longitude):
logging.getLogger(__name__).error(
'Each zone needs a latitude and longitude.')
continue
zone = Zone(hass, name, latitude, longitude, radius, icon, passive)
zone.entity_id = generate_entity_id(ENTITY_ID_FORMAT, name,
entities)
zone.update_ha_state()
entities.add(zone.entity_id)
if ENTITY_ID_HOME not in entities:
zone = Zone(hass, hass.config.location_name, hass.config.latitude,
hass.config.longitude, DEFAULT_RADIUS, ICON_HOME, False)
zone.entity_id = ENTITY_ID_HOME
zone.update_ha_state()
return True
class Zone(Entity):
"""Representation of a Zone."""
# pylint: disable=too-many-arguments, too-many-instance-attributes
def __init__(self, hass, name, latitude, longitude, radius, icon, passive):
"""Initialize the zone."""
self.hass = hass
self._name = name
self._latitude = latitude
self._longitude = longitude
self._radius = radius
self._icon = icon
self._passive = passive
@property
def name(self):
"""Return the name of the zone."""
return self._name
@property
def state(self):
"""Return the state property really does nothing for a zone."""
return STATE
@property
def icon(self):
"""Return the icon if any."""
return self._icon
@property
def state_attributes(self):
"""Return the state attributes of the zone."""
data = {
ATTR_HIDDEN: True,
ATTR_LATITUDE: self._latitude,
ATTR_LONGITUDE: self._longitude,
ATTR_RADIUS: self._radius,
}
if self._passive:
data[ATTR_PASSIVE] = self._passive
return data
| mit |
eayunstack/fuel-ostf | fuel_health/tests/smoke/test_neutron_actions.py | 2 | 5679 | # Copyright 2014 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from fuel_health.common.utils.data_utils import rand_name
from fuel_health import neutronmanager
LOG = logging.getLogger(__name__)
class TestNeutron(neutronmanager.NeutronBaseTest):
"""Test suite verifies:
- router creation
- network creation
- subnet creation
- opportunity to attach network to router
- instance creation in created network
- instance network connectivity
"""
def test_check_neutron_objects_creation(self):
"""Check network connectivity from instance via floating IP
Target component: Neutron
Scenario:
1. Create a new security group (if it doesn`t exist yet).
2. Create router
3. Create network
4. Create subnet
5. Uplink subnet to router.
6. Create an instance using the new security group
in created subnet.
7. Create a new floating IP
8. Assign the new floating IP to the instance.
9. Check connectivity to the floating IP using ping command.
10. Check that public IP 8.8.8.8 can be pinged from instance.
11. Disassociate server floating ip.
12. Delete floating ip
13. Delete server.
14. Remove router.
15. Remove subnet
16. Remove network
Duration: 300 s.
Deployment tags: neutron
"""
if not self.config.compute.compute_nodes:
self.skipTest('There are no compute nodes')
self.check_image_exists()
if not self.security_groups:
self.security_groups[self.tenant_id] = self.verify(
25, self._create_security_group, 1,
"Security group can not be created.",
'security group creation',
self.compute_client)
name = rand_name('ost1_test-server-smoke-')
security_groups = [self.security_groups[self.tenant_id].name]
router = self.verify(30, self.create_router, 2,
'Router can not be created', 'Router creation',
name)
network = self.verify(20, self.create_network, 3,
'Network can not be created',
'Network creation', name)
subnet = self.verify(20, self.create_subnet, 4,
'Subnet can not be created',
'Subnet creation', network)
self.verify(20, self.uplink_subnet_to_router, 5,
'Can not uplink subnet to router',
'Uplink subnet to router', router, subnet)
server = self.verify(200, self._create_server, 6,
"Server can not be created.",
"server creation",
self.compute_client, name, security_groups,
net_id=network['id'])
floating_ip = self.verify(
20,
self._create_floating_ip,
7,
"Floating IP can not be created.",
'floating IP creation')
self.verify(20, self._assign_floating_ip_to_instance,
8, "Floating IP can not be assigned.",
'floating IP assignment',
self.compute_client, server, floating_ip)
self.floating_ips.append(floating_ip)
ip_address = floating_ip.ip
LOG.info('is address is {0}'.format(ip_address))
LOG.debug(ip_address)
self.verify(600, self._check_vm_connectivity, 9,
"VM connectivity doesn`t function properly.",
'VM connectivity checking', ip_address,
30, (9, 60))
self.verify(600, self._check_connectivity_from_vm,
10, ("Connectivity to 8.8.8.8 from the VM doesn`t "
"function properly."),
'public connectivity checking from VM', ip_address,
30, (9, 60))
self.verify(20, self.compute_client.servers.remove_floating_ip,
11, "Floating IP cannot be removed.",
"removing floating IP", server, floating_ip)
self.verify(20, self.compute_client.floating_ips.delete,
12, "Floating IP cannot be deleted.",
"floating IP deletion", floating_ip)
if self.floating_ips:
self.floating_ips.remove(floating_ip)
self.verify(40, self._delete_server, 13,
"Server can not be deleted. ",
"server deletion", server)
self.verify(40, self._remove_router, 14, "Router can not be deleted",
"router deletion", router, [subnet['id']])
self.verify(20, self._remove_subnet, 15, "Subnet can not be deleted",
"Subnet deletion", subnet)
self.verify(20, self._remove_network, 16,
"Network can not be deleted", "Network deletion", network)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.