gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for Google Connection classes.
"""
import datetime
import mock
import os
import sys
import unittest
try:
import simplejson as json
except ImportError:
import json
from libcloud.common.google import (
GoogleAuthError,
GoogleAuthType,
GoogleBaseAuthConnection,
GoogleInstalledAppAuthConnection,
GoogleServiceAcctAuthConnection,
GoogleGCEServiceAcctAuthConnection,
GoogleOAuth2Credential,
GoogleBaseConnection,
_utcnow,
_utc_timestamp,
)
from libcloud.test import MockHttp, LibcloudTestCase
from libcloud.utils.py3 import httplib
# Skip some tests if cryptography is unavailable
try:
from cryptography.hazmat.primitives.hashes import SHA256
except ImportError:
SHA256 = None
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
PEM_KEY = os.path.join(SCRIPT_PATH, "fixtures", "google", "pkey.pem")
JSON_KEY = os.path.join(SCRIPT_PATH, "fixtures", "google", "pkey.json")
JSON_KEY_INVALID = os.path.join(SCRIPT_PATH, "fixtures", "google", "pkey_invalid.json")
with open(JSON_KEY, "r") as f:
KEY_STR = json.loads(f.read())["private_key"]
GCE_PARAMS = ("[email protected]", "key")
GCE_PARAMS_PEM_KEY = ("[email protected]", PEM_KEY)
GCE_PARAMS_JSON_KEY = ("[email protected]", JSON_KEY)
GCE_PARAMS_KEY = ("[email protected]", KEY_STR)
GCE_PARAMS_IA = ("client_id", "client_secret")
GCE_PARAMS_IA_2 = ("[email protected]", "client_secret")
GCE_PARAMS_GCE = ("foo", "bar")
# GOOG + 16 alphanumeric chars
GCS_S3_PARAMS_20 = (
"GOOG0123456789ABCXYZ",
# 40 base64 chars
"0102030405060708091011121314151617181920",
)
# GOOG + 20 alphanumeric chars
GCS_S3_PARAMS_24 = (
"GOOGDF5OVRRGU4APFNSTVCXI",
# 40 base64 chars
"0102030405060708091011121314151617181920",
)
# GOOG + 57 alphanumeric chars
GCS_S3_PARAMS_61 = (
"GOOGDF5OVRRGU4APFNSTVCXIRRGU4AP56789ABCX56789ABCXRRGU4APFNSTV",
# 40 base64 chars
"0102030405060708091011121314151617181920",
)
PEM_KEY_FILE = os.path.join(SCRIPT_PATH, "fixtures", "google", "pkey.pem")
PEM_KEY_FILE_INVALID = os.path.join(
SCRIPT_PATH, "fixtures", "google", "pkey_invalid.pem"
)
JSON_KEY_FILE = os.path.join(SCRIPT_PATH, "fixtures", "google", "pkey.json")
with open(JSON_KEY_FILE, "r") as f:
PEM_KEY_STR = json.loads(f.read())["private_key"]
with open(JSON_KEY_FILE, "r") as f:
JSON_KEY_STR = f.read()
JSON_KEY = json.loads(JSON_KEY_STR)
GCE_USERID_EMAIL = "[email protected]"
GCE_PARAMS = (GCE_USERID_EMAIL, "key")
GCE_PARAMS_PEM_KEY_FILE = (GCE_USERID_EMAIL, PEM_KEY_FILE)
GCE_PARAMS_PEM_KEY_FILE_INVALID = (GCE_USERID_EMAIL, PEM_KEY_FILE_INVALID)
GCE_PARAMS_PEM_KEY = (GCE_USERID_EMAIL, PEM_KEY_STR)
GCE_PARAMS_JSON_KEY_FILE = (GCE_USERID_EMAIL, JSON_KEY_FILE)
GCE_PARAMS_JSON_KEY = (GCE_USERID_EMAIL, JSON_KEY)
GCE_PARAMS_JSON_KEY_INVALID = (GCE_USERID_EMAIL, JSON_KEY_INVALID)
GCE_PARAMS_JSON_KEY_STR = (GCE_USERID_EMAIL, JSON_KEY_STR)
GCE_PARAMS_IA = ("client_id", "client_secret")
GCE_PARAMS_GCE = ("foo", "bar")
GCS_S3_PARAMS_20 = (
"GOOG0123456789ABCXYZ", # GOOG + 16 alphanumeric chars
"0102030405060708091011121314151617181920",
) # 40 base64 chars
GCS_S3_PARAMS_24 = (
"GOOGDF5OVRRGU4APFNSTVCXI", # GOOG + 20 alphanumeric chars
"0102030405060708091011121314151617181920",
) # 40 base64 chars
STUB_UTCNOW = _utcnow()
STUB_TOKEN = {"access_token": "tokentoken", "token_type": "Bearer", "expires_in": 3600}
STUB_IA_TOKEN = {
"access_token": "installedapp",
"token_type": "Bearer",
"expires_in": 3600,
"refresh_token": "refreshrefresh",
}
STUB_REFRESH_TOKEN = {
"access_token": "refreshrefresh",
"token_type": "Bearer",
"expires_in": 3600,
}
STUB_TOKEN_FROM_FILE = {
"access_token": "token_from_file",
"token_type": "Bearer",
"expire_time": _utc_timestamp(STUB_UTCNOW + datetime.timedelta(seconds=3600)),
"expires_in": 3600,
}
class MockJsonResponse(object):
def __init__(self, body):
self.object = body
class GoogleTestCase(LibcloudTestCase):
"""
Assists in making Google tests hermetic and deterministic.
Add anything that needs to be mocked here. Create a patcher with the
suffix '_patcher'.
e.g.
_foo_patcher = mock.patch('module.submodule.class.foo', ...)
Patchers are started at setUpClass and stopped at tearDownClass.
Ideally, you should make a note in the thing being mocked, for clarity.
"""
PATCHER_SUFFIX = "_patcher"
_utcnow_patcher = mock.patch(
"libcloud.common.google._utcnow", return_value=STUB_UTCNOW
)
_authtype_is_gce_patcher = mock.patch(
"libcloud.common.google.GoogleAuthType._is_gce", return_value=False
)
_read_token_file_patcher = mock.patch(
"libcloud.common.google.GoogleOAuth2Credential._get_token_from_file",
return_value=STUB_TOKEN_FROM_FILE,
)
_write_token_file_patcher = mock.patch(
"libcloud.common.google.GoogleOAuth2Credential._write_token_to_file"
)
_ia_get_code_patcher = mock.patch(
"libcloud.common.google.GoogleInstalledAppAuthConnection.get_code",
return_value=1234,
)
@classmethod
def setUpClass(cls):
super(GoogleTestCase, cls).setUpClass()
for patcher in [a for a in dir(cls) if a.endswith(cls.PATCHER_SUFFIX)]:
getattr(cls, patcher).start()
@classmethod
def tearDownClass(cls):
super(GoogleTestCase, cls).tearDownClass()
for patcher in [a for a in dir(cls) if a.endswith(cls.PATCHER_SUFFIX)]:
getattr(cls, patcher).stop()
class GoogleBaseAuthConnectionTest(GoogleTestCase):
"""
Tests for GoogleBaseAuthConnection
"""
def setUp(self):
GoogleBaseAuthConnection.conn_class = GoogleAuthMockHttp
self.mock_scopes = ["foo", "bar"]
kwargs = {"scopes": self.mock_scopes}
self.conn = GoogleInstalledAppAuthConnection(*GCE_PARAMS, **kwargs)
def test_scopes(self):
self.assertEqual(self.conn.scopes, "foo bar")
def test_add_default_headers(self):
old_headers = {}
expected_headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Host": "accounts.google.com",
}
new_headers = self.conn.add_default_headers(old_headers)
self.assertEqual(new_headers, expected_headers)
def test_token_request(self):
request_body = {
"code": "asdf",
"client_id": self.conn.user_id,
"client_secret": self.conn.key,
"redirect_uri": self.conn.redirect_uri,
"grant_type": "authorization_code",
}
new_token = self.conn._token_request(request_body)
self.assertEqual(new_token["access_token"], STUB_IA_TOKEN["access_token"])
exp = STUB_UTCNOW + datetime.timedelta(seconds=STUB_IA_TOKEN["expires_in"])
self.assertEqual(new_token["expire_time"], _utc_timestamp(exp))
class GoogleInstalledAppAuthConnectionTest(GoogleTestCase):
"""
Tests for GoogleInstalledAppAuthConnection
"""
def setUp(self):
GoogleInstalledAppAuthConnection.conn_class = GoogleAuthMockHttp
self.mock_scopes = ["https://www.googleapis.com/auth/foo"]
kwargs = {"scopes": self.mock_scopes}
self.conn = GoogleInstalledAppAuthConnection(*GCE_PARAMS, **kwargs)
def test_refresh_token(self):
# This token info doesn't have a refresh token, so a new token will be
# requested
token_info1 = {
"access_token": "tokentoken",
"token_type": "Bearer",
"expires_in": 3600,
}
new_token1 = self.conn.refresh_token(token_info1)
self.assertEqual(new_token1["access_token"], STUB_IA_TOKEN["access_token"])
# This token info has a refresh token, so it will be able to be
# refreshed.
token_info2 = {
"access_token": "tokentoken",
"token_type": "Bearer",
"expires_in": 3600,
"refresh_token": "refreshrefresh",
}
new_token2 = self.conn.refresh_token(token_info2)
self.assertEqual(new_token2["access_token"], STUB_REFRESH_TOKEN["access_token"])
# Both sets should have refresh info
self.assertTrue("refresh_token" in new_token1)
self.assertTrue("refresh_token" in new_token2)
class GoogleAuthTypeTest(GoogleTestCase):
def test_guess(self):
self.assertEqual(GoogleAuthType.guess_type(GCE_PARAMS_IA[0]), GoogleAuthType.IA)
with mock.patch.object(GoogleAuthType, "_is_gce", return_value=True):
# Since _is_gce currently depends on the environment, not on
# parameters, other auths should override GCE. It does not make
# sense for IA auth to happen on GCE, which is why it's left out.
self.assertEqual(
GoogleAuthType.guess_type(GCE_PARAMS[0]), GoogleAuthType.SA
)
self.assertEqual(
GoogleAuthType.guess_type(GCS_S3_PARAMS_20[0]), GoogleAuthType.GCS_S3
)
self.assertEqual(
GoogleAuthType.guess_type(GCS_S3_PARAMS_24[0]), GoogleAuthType.GCS_S3
)
self.assertEqual(
GoogleAuthType.guess_type(GCS_S3_PARAMS_61[0]), GoogleAuthType.GCS_S3
)
self.assertEqual(
GoogleAuthType.guess_type(GCE_PARAMS_GCE[0]), GoogleAuthType.GCE
)
def test_guess_gce_metadata_server_not_called_for_ia(self):
# Verify that we don't try to contact GCE metadata server in case IA
# credentials are used
with mock.patch.object(GoogleAuthType, "_is_gce", return_value=False):
self.assertEqual(GoogleAuthType._is_gce.call_count, 0)
self.assertEqual(
GoogleAuthType.guess_type(GCE_PARAMS_IA_2[0]), GoogleAuthType.IA
)
self.assertEqual(GoogleAuthType._is_gce.call_count, 0)
class GoogleOAuth2CredentialTest(GoogleTestCase):
def test_init_oauth2(self):
kwargs = {"auth_type": GoogleAuthType.IA}
cred = GoogleOAuth2Credential(*GCE_PARAMS, **kwargs)
# If there is a viable token file, this gets used first
self.assertEqual(cred.token, STUB_TOKEN_FROM_FILE)
# No token file, get a new token. Check that it gets written to file.
with mock.patch.object(
GoogleOAuth2Credential, "_get_token_from_file", return_value=None
):
cred = GoogleOAuth2Credential(*GCE_PARAMS, **kwargs)
expected = STUB_IA_TOKEN
expected["expire_time"] = cred.token["expire_time"]
self.assertEqual(cred.token, expected)
cred._write_token_to_file.assert_called_once_with()
def test_refresh(self):
args = list(GCE_PARAMS) + [GoogleAuthType.GCE]
cred = GoogleOAuth2Credential(*args)
cred._refresh_token = mock.Mock()
# Test getting an unexpired access token.
tomorrow = datetime.datetime.now() + datetime.timedelta(days=1)
cred.token = {
"access_token": "Access Token!",
"expire_time": _utc_timestamp(tomorrow),
}
cred.access_token
self.assertFalse(cred._refresh_token.called)
# Test getting an expired access token.
yesterday = datetime.datetime.now() - datetime.timedelta(days=1)
cred.token = {
"access_token": "Access Token!",
"expire_time": _utc_timestamp(yesterday),
}
cred.access_token
self.assertTrue(cred._refresh_token.called)
def test_auth_connection(self):
# Test a bogus auth type
self.assertRaises(
GoogleAuthError, GoogleOAuth2Credential, *GCE_PARAMS, **{"auth_type": "XX"}
)
# Try to create an OAuth2 credential when dealing with a GCS S3
# interoperability auth type.
self.assertRaises(
GoogleAuthError,
GoogleOAuth2Credential,
*GCE_PARAMS,
**{"auth_type": GoogleAuthType.GCS_S3},
)
kwargs = {}
if SHA256:
kwargs["auth_type"] = GoogleAuthType.SA
cred1 = GoogleOAuth2Credential(*GCE_PARAMS_PEM_KEY_FILE, **kwargs)
self.assertTrue(
isinstance(cred1.oauth2_conn, GoogleServiceAcctAuthConnection)
)
cred1 = GoogleOAuth2Credential(*GCE_PARAMS_JSON_KEY_FILE, **kwargs)
self.assertTrue(
isinstance(cred1.oauth2_conn, GoogleServiceAcctAuthConnection)
)
cred1 = GoogleOAuth2Credential(*GCE_PARAMS_PEM_KEY, **kwargs)
self.assertTrue(
isinstance(cred1.oauth2_conn, GoogleServiceAcctAuthConnection)
)
cred1 = GoogleOAuth2Credential(*GCE_PARAMS_JSON_KEY, **kwargs)
self.assertTrue(
isinstance(cred1.oauth2_conn, GoogleServiceAcctAuthConnection)
)
cred1 = GoogleOAuth2Credential(*GCE_PARAMS_KEY, **kwargs)
self.assertTrue(
isinstance(cred1.oauth2_conn, GoogleServiceAcctAuthConnection)
)
kwargs["auth_type"] = GoogleAuthType.SA
cred1 = GoogleOAuth2Credential(*GCE_PARAMS_JSON_KEY_STR, **kwargs)
self.assertTrue(
isinstance(cred1.oauth2_conn, GoogleServiceAcctAuthConnection)
)
self.assertRaises(
GoogleAuthError, GoogleOAuth2Credential, *GCE_PARAMS, **kwargs
)
# Invalid pem key
kwargs["auth_type"] = GoogleAuthType.SA
expected_msg = "Unable to decode provided PEM key:"
self.assertRaisesRegex(
GoogleAuthError,
expected_msg,
GoogleOAuth2Credential,
*GCE_PARAMS_PEM_KEY_FILE_INVALID,
**kwargs,
)
kwargs["auth_type"] = GoogleAuthType.SA
expected_msg = "Unable to decode provided PEM key:"
self.assertRaisesRegex(
GoogleAuthError,
expected_msg,
GoogleOAuth2Credential,
*GCE_PARAMS_JSON_KEY_INVALID,
**kwargs,
)
kwargs["auth_type"] = GoogleAuthType.IA
cred2 = GoogleOAuth2Credential(*GCE_PARAMS_IA, **kwargs)
self.assertTrue(isinstance(cred2.oauth2_conn, GoogleInstalledAppAuthConnection))
kwargs["auth_type"] = GoogleAuthType.GCE
cred3 = GoogleOAuth2Credential(*GCE_PARAMS_GCE, **kwargs)
self.assertTrue(
isinstance(cred3.oauth2_conn, GoogleGCEServiceAcctAuthConnection)
)
class GoogleBaseConnectionTest(GoogleTestCase):
"""
Tests for GoogleBaseConnection
"""
def setUp(self):
GoogleBaseAuthConnection.conn_class = GoogleAuthMockHttp
self.mock_scopes = ["https://www.googleapis.com/auth/foo"]
kwargs = {"scopes": self.mock_scopes, "auth_type": GoogleAuthType.IA}
self.conn = GoogleBaseConnection(*GCE_PARAMS, **kwargs)
def test_add_default_headers(self):
old_headers = {}
new_expected_headers = {
"Content-Type": "application/json",
"Host": "www.googleapis.com",
}
new_headers = self.conn.add_default_headers(old_headers)
self.assertEqual(new_headers, new_expected_headers)
def test_pre_connect_hook(self):
old_params = {}
old_headers = {}
auth_str = "%s %s" % (
STUB_TOKEN_FROM_FILE["token_type"],
STUB_TOKEN_FROM_FILE["access_token"],
)
new_expected_params = {}
new_expected_headers = {"Authorization": auth_str}
new_params, new_headers = self.conn.pre_connect_hook(old_params, old_headers)
self.assertEqual(new_params, new_expected_params)
self.assertEqual(new_headers, new_expected_headers)
def test_encode_data(self):
data = {"key": "value"}
json_data = '{"key": "value"}'
encoded_data = self.conn.encode_data(data)
self.assertEqual(encoded_data, json_data)
def test_has_completed(self):
body1 = {
"endTime": "2013-06-26T10:05:07.630-07:00",
"id": "3681664092089171723",
"kind": "compute#operation",
"status": "DONE",
"targetId": "16211908079305042870",
}
body2 = {
"endTime": "2013-06-26T10:05:07.630-07:00",
"id": "3681664092089171723",
"kind": "compute#operation",
"status": "RUNNING",
"targetId": "16211908079305042870",
}
response1 = MockJsonResponse(body1)
response2 = MockJsonResponse(body2)
self.assertTrue(self.conn.has_completed(response1))
self.assertFalse(self.conn.has_completed(response2))
def test_get_poll_request_kwargs(self):
body = {
"endTime": "2013-06-26T10:05:07.630-07:00",
"id": "3681664092089171723",
"kind": "compute#operation",
"selfLink": "https://www.googleapis.com/operations-test",
}
response = MockJsonResponse(body)
expected_kwargs = {"action": "https://www.googleapis.com/operations-test"}
kwargs = self.conn.get_poll_request_kwargs(response, None, {})
self.assertEqual(kwargs, expected_kwargs)
def test_morph_action_hook(self):
self.conn.request_path = "/compute/apiver/project/project-name"
action1 = (
"https://www.googleapis.com/compute/apiver/project"
"/project-name/instances"
)
action2 = "/instances"
expected_request = "/compute/apiver/project/project-name/instances"
request1 = self.conn.morph_action_hook(action1)
request2 = self.conn.morph_action_hook(action2)
self.assertEqual(request1, expected_request)
self.assertEqual(request2, expected_request)
class GoogleAuthMockHttp(MockHttp):
"""
Mock HTTP Class for Google Auth Connections.
"""
json_hdr = {"content-type": "application/json; charset=UTF-8"}
def _o_oauth2_token(self, method, url, body, headers):
if "code" in body:
body = json.dumps(STUB_IA_TOKEN)
elif "refresh_token" in body:
body = json.dumps(STUB_REFRESH_TOKEN)
else:
body = json.dumps(STUB_TOKEN)
return (httplib.OK, body, self.json_hdr, httplib.responses[httplib.OK])
if __name__ == "__main__":
sys.exit(unittest.main())
|
|
from typing import List, Dict, Set, Iterable, Iterator, Union, Optional
from pathlib import Path
import numpy
from numpy import ndarray
import zlib
import srsly
from thinc.api import NumpyOps
from .doc import Doc
from ..vocab import Vocab
from ..compat import copy_reg
from ..attrs import SPACY, ORTH, intify_attr, IDS
from ..errors import Errors
from ..util import ensure_path, SimpleFrozenList
from ._dict_proxies import SpanGroups
# fmt: off
ALL_ATTRS = ("ORTH", "NORM", "TAG", "HEAD", "DEP", "ENT_IOB", "ENT_TYPE", "ENT_KB_ID", "ENT_ID", "LEMMA", "MORPH", "POS", "SENT_START")
# fmt: on
class DocBin:
"""Pack Doc objects for binary serialization.
The DocBin class lets you efficiently serialize the information from a
collection of Doc objects. You can control which information is serialized
by passing a list of attribute IDs, and optionally also specify whether the
user data is serialized. The DocBin is faster and produces smaller data
sizes than pickle, and allows you to deserialize without executing arbitrary
Python code.
The serialization format is gzipped msgpack, where the msgpack object has
the following structure:
{
"attrs": List[uint64], # e.g. [TAG, HEAD, ENT_IOB, ENT_TYPE]
"tokens": bytes, # Serialized numpy uint64 array with the token data
"spans": List[Dict[str, bytes]], # SpanGroups data for each doc
"spaces": bytes, # Serialized numpy boolean array with spaces data
"lengths": bytes, # Serialized numpy int32 array with the doc lengths
"strings": List[str] # List of unique strings in the token data
"version": str, # DocBin version number
}
Strings for the words, tags, labels etc are represented by 64-bit hashes in
the token data, and every string that occurs at least once is passed via the
strings object. This means the storage is more efficient if you pack more
documents together, because you have less duplication in the strings.
A notable downside to this format is that you can't easily extract just one
document from the DocBin.
"""
def __init__(
self,
attrs: Iterable[str] = ALL_ATTRS,
store_user_data: bool = False,
docs: Iterable[Doc] = SimpleFrozenList(),
) -> None:
"""Create a DocBin object to hold serialized annotations.
attrs (Iterable[str]): List of attributes to serialize. 'orth' and
'spacy' are always serialized, so they're not required.
store_user_data (bool): Whether to write the `Doc.user_data` to bytes/file.
docs (Iterable[Doc]): Docs to add.
DOCS: https://spacy.io/api/docbin#init
"""
int_attrs = [intify_attr(attr) for attr in attrs]
if None in int_attrs:
non_valid = [attr for attr in attrs if intify_attr(attr) is None]
raise KeyError(
Errors.E983.format(dict="attrs", key=non_valid, keys=IDS.keys())
) from None
attrs = sorted(int_attrs)
self.version = "0.1"
self.attrs = [attr for attr in attrs if attr != ORTH and attr != SPACY]
self.attrs.insert(0, ORTH) # Ensure ORTH is always attrs[0]
self.tokens: List[ndarray] = []
self.spaces: List[ndarray] = []
self.cats: List[Dict] = []
self.span_groups: List[bytes] = []
self.user_data: List[Optional[bytes]] = []
self.flags: List[Dict] = []
self.strings: Set[str] = set()
self.store_user_data = store_user_data
for doc in docs:
self.add(doc)
def __len__(self) -> int:
"""RETURNS: The number of Doc objects added to the DocBin."""
return len(self.tokens)
def add(self, doc: Doc) -> None:
"""Add a Doc's annotations to the DocBin for serialization.
doc (Doc): The Doc object to add.
DOCS: https://spacy.io/api/docbin#add
"""
array = doc.to_array(self.attrs)
if len(array.shape) == 1:
array = array.reshape((array.shape[0], 1))
self.tokens.append(array)
spaces = doc.to_array(SPACY)
assert array.shape[0] == spaces.shape[0] # this should never happen
spaces = spaces.reshape((spaces.shape[0], 1))
self.spaces.append(numpy.asarray(spaces, dtype=bool))
self.flags.append({"has_unknown_spaces": doc.has_unknown_spaces})
for token in doc:
self.strings.add(token.text)
self.strings.add(token.tag_)
self.strings.add(token.lemma_)
self.strings.add(token.norm_)
self.strings.add(str(token.morph))
self.strings.add(token.dep_)
self.strings.add(token.ent_type_)
self.strings.add(token.ent_kb_id_)
self.strings.add(token.ent_id_)
self.cats.append(doc.cats)
if self.store_user_data:
self.user_data.append(srsly.msgpack_dumps(doc.user_data))
self.span_groups.append(doc.spans.to_bytes())
for key, group in doc.spans.items():
for span in group:
self.strings.add(span.label_)
def get_docs(self, vocab: Vocab) -> Iterator[Doc]:
"""Recover Doc objects from the annotations, using the given vocab.
Note that the user data of each doc will be read (if available) and returned,
regardless of the setting of 'self.store_user_data'.
vocab (Vocab): The shared vocab.
YIELDS (Doc): The Doc objects.
DOCS: https://spacy.io/api/docbin#get_docs
"""
for string in self.strings:
vocab[string]
orth_col = self.attrs.index(ORTH)
for i in range(len(self.tokens)):
flags = self.flags[i]
tokens = self.tokens[i]
spaces: Optional[ndarray] = self.spaces[i]
if flags.get("has_unknown_spaces"):
spaces = None
doc = Doc(vocab, words=tokens[:, orth_col], spaces=spaces) # type: ignore
doc = doc.from_array(self.attrs, tokens) # type: ignore
doc.cats = self.cats[i]
if self.span_groups[i] != SpanGroups._EMPTY_BYTES:
doc.spans.from_bytes(self.span_groups[i])
else:
doc.spans.clear()
if i < len(self.user_data) and self.user_data[i] is not None:
user_data = srsly.msgpack_loads(self.user_data[i], use_list=False)
doc.user_data.update(user_data)
yield doc
def merge(self, other: "DocBin") -> None:
"""Extend the annotations of this DocBin with the annotations from
another. Will raise an error if the pre-defined attrs of the two
DocBins don't match, or if they differ in whether or not to store
user data.
other (DocBin): The DocBin to merge into the current bin.
DOCS: https://spacy.io/api/docbin#merge
"""
if self.attrs != other.attrs:
raise ValueError(
Errors.E166.format(param="attrs", current=self.attrs, other=other.attrs)
)
if self.store_user_data != other.store_user_data:
raise ValueError(
Errors.E166.format(
param="store_user_data",
current=self.store_user_data,
other=other.store_user_data,
)
)
self.tokens.extend(other.tokens)
self.spaces.extend(other.spaces)
self.strings.update(other.strings)
self.cats.extend(other.cats)
self.span_groups.extend(other.span_groups)
self.flags.extend(other.flags)
self.user_data.extend(other.user_data)
def to_bytes(self) -> bytes:
"""Serialize the DocBin's annotations to a bytestring.
RETURNS (bytes): The serialized DocBin.
DOCS: https://spacy.io/api/docbin#to_bytes
"""
for tokens in self.tokens:
assert len(tokens.shape) == 2, tokens.shape # this should never happen
lengths = [len(tokens) for tokens in self.tokens]
tokens = numpy.vstack(self.tokens) if self.tokens else numpy.asarray([])
spaces = numpy.vstack(self.spaces) if self.spaces else numpy.asarray([])
msg = {
"version": self.version,
"attrs": self.attrs,
"tokens": tokens.tobytes("C"),
"spaces": spaces.tobytes("C"),
"lengths": numpy.asarray(lengths, dtype="int32").tobytes("C"),
"strings": list(sorted(self.strings)),
"cats": self.cats,
"flags": self.flags,
"span_groups": self.span_groups,
}
if self.store_user_data:
msg["user_data"] = self.user_data
return zlib.compress(srsly.msgpack_dumps(msg))
def from_bytes(self, bytes_data: bytes) -> "DocBin":
"""Deserialize the DocBin's annotations from a bytestring.
bytes_data (bytes): The data to load from.
RETURNS (DocBin): The loaded DocBin.
DOCS: https://spacy.io/api/docbin#from_bytes
"""
try:
msg = srsly.msgpack_loads(zlib.decompress(bytes_data))
except zlib.error:
raise ValueError(Errors.E1014)
self.attrs = msg["attrs"]
self.strings = set(msg["strings"])
lengths = numpy.frombuffer(msg["lengths"], dtype="int32")
flat_spaces = numpy.frombuffer(msg["spaces"], dtype=bool)
flat_tokens = numpy.frombuffer(msg["tokens"], dtype="uint64")
shape = (flat_tokens.size // len(self.attrs), len(self.attrs))
flat_tokens = flat_tokens.reshape(shape)
flat_spaces = flat_spaces.reshape((flat_spaces.size, 1))
self.tokens = NumpyOps().unflatten(flat_tokens, lengths)
self.spaces = NumpyOps().unflatten(flat_spaces, lengths)
self.cats = msg["cats"]
self.span_groups = msg.get("span_groups", [b"" for _ in lengths])
self.flags = msg.get("flags", [{} for _ in lengths])
if "user_data" in msg:
self.user_data = list(msg["user_data"])
else:
self.user_data = [None] * len(self)
for tokens in self.tokens:
assert len(tokens.shape) == 2, tokens.shape # this should never happen
return self
def to_disk(self, path: Union[str, Path]) -> None:
"""Save the DocBin to a file (typically called .spacy).
path (str / Path): The file path.
DOCS: https://spacy.io/api/docbin#to_disk
"""
path = ensure_path(path)
with path.open("wb") as file_:
try:
file_.write(self.to_bytes())
except ValueError:
raise ValueError(Errors.E870)
def from_disk(self, path: Union[str, Path]) -> "DocBin":
"""Load the DocBin from a file (typically called .spacy).
path (str / Path): The file path.
RETURNS (DocBin): The loaded DocBin.
DOCS: https://spacy.io/api/docbin#to_disk
"""
path = ensure_path(path)
with path.open("rb") as file_:
self.from_bytes(file_.read())
return self
def merge_bins(bins):
merged = None
for byte_string in bins:
if byte_string is not None:
doc_bin = DocBin(store_user_data=True).from_bytes(byte_string)
if merged is None:
merged = doc_bin
else:
merged.merge(doc_bin)
if merged is not None:
return merged.to_bytes()
else:
return b""
def pickle_bin(doc_bin):
return (unpickle_bin, (doc_bin.to_bytes(),))
def unpickle_bin(byte_string):
return DocBin().from_bytes(byte_string)
copy_reg.pickle(DocBin, pickle_bin, unpickle_bin)
# Compatibility, as we had named it this previously.
Binder = DocBin
__all__ = ["DocBin"]
|
|
import math
import numpy
import random
"""Helper classes."""
"""Begin data input functions"""
def read_ibrl_data(data_file):
"""Reads IBRL data from file and returns dict mapping
temp./humidity sensor data to the node that collected them
:param data_file: string representing path to ibrl dataset
:return: dictionary mapping sensor node to list of tuples containing sensor data
"""
with open(data_file, 'r') as fp:
row_count = 0
bad_count = 0
measurements = {}
for line in fp:
row_count = row_count + 1
line = line.strip() # remove edge whitespace
tokens = line.split(',') # segregate each section
try:
if len(tokens) != 5: # dump incomplete sensor readings
bad_count = bad_count + 1
elif tokens[3] in measurements: # if sensor id is in the sensor dict
# append new temp/humidity tuple
measurements[tokens[3]].append((float(tokens[0]), float(tokens[1])))
else:
# else create a new entry in sensor_dict and add it's respective sensor data
measurements[tokens[3]] = [(float(tokens[0]), float(tokens[1]))]
except Exception as e:
raise e
print "Total rows: %s" % row_count
print "Total incomplete rows: %s" % bad_count
return measurements
"""Begin data transformation functions"""
def randomize_readings(dictionary):
"""For each list mapped to a sensor, randomize the tuples within and returns the resulting dictionary
:param dictionary: Dictionary of sensors whose lists will be shuffled
:return: Dictionary mapping sensors to randomized lists of temp. and humidity readings
"""
import random
for sensor in dictionary:
random.shuffle(dictionary[sensor])
return dictionary
def generate_differences(dictionary):
"""Generates a dictionary that maps each sensor to a list of length n and containing tuples of temp. and humidity
data to a new list of tuples size n-1 where each tuple is the difference between the original list at index n+1 and
the original list at index n
:param dictionary: dictionary mapping sensors to original tuples of temp. and humidity data.
:return: tuple containing dictionary mapping sensors to new list of tuple differences and a lookup table containing
back references to the raw measurements used to calculate the new measurements in the differences dict
"""
differences = {}
lookup_table = {}
for sensor in dictionary:
for index in range(len(dictionary[sensor]) - 1):
difference_tuple = (
dictionary[sensor][index + 1][0] - dictionary[sensor][index][0],
dictionary[sensor][index + 1][1] - dictionary[sensor][index][1]
)
if sensor in differences:
differences[sensor].append(difference_tuple)
else:
differences[sensor] = [difference_tuple]
return (differences, lookup_table)
def standardize_readings(sensor_readings):
"""Standardize sensor readings
:param dictionary: dictionary of sensors whose readings need to be normalized
:return: dictionary mapping sensors to normalized lists of temp .and humidity readings
"""
for sensor, readings in sensor_readings.iteritems():
# Calculate temperature and humidity means
temp_mean = numpy.mean([reading[0] for reading in readings])
humidity_mean = numpy.mean([reading[1] for reading in readings])
# Calculate tempeature and humidity standard deviations
temp_sd = numpy.std([reading[0] for reading in readings])
humidity_sd = numpy.std([reading[0] for reading in readings])
standardized_readings = []
for reading in readings:
standardized_readings.append(
(((reading[0] - temp_mean) / temp_sd),
((reading[1] - humidity_mean) / humidity_sd)))
sensor_readings[sensor] = standardized_readings
return sensor_readings
"""Begin ellipsoid modeling functions"""
def generate_regional_ellipsoid_parameters(sensors_ellipsoid_parameters):
""" Generates the aggregate ellipsoid parameters from a list of ellipsoids
within a region
:param ellipsoid_parameters: list of dictionaries representing ellipsoid
parameters from individual sensors
:return: dictionary representing the aggregate ellipsoid parameters for a
given region
"""
num_of_ellipsoids = len(sensors_ellipsoid_parameters)
ave_a = sum([sensors_ellipsoid_parameters[ellipsoid]['a'] for ellipsoid in sensors_ellipsoid_parameters]) / num_of_ellipsoids
ave_b = sum([sensors_ellipsoid_parameters[ellipsoid]['b'] for ellipsoid in sensors_ellipsoid_parameters]) / num_of_ellipsoids
ave_theta = sum([sensors_ellipsoid_parameters[ellipsoid]['theta'] for ellipsoid in sensors_ellipsoid_parameters]) / num_of_ellipsoids
return (ave_a, ave_b, ave_theta)
def generate_ellipsoid(sensor_readings, a, b, theta=None):
"""Calculates points representing an ellipsoid for a given a and b
over a set of sensor readings.
:param sensor_readings: list of tuples representing sensor readings
:param a: a parameter used in calculating ellipsoid parameters
:param b: b parameter used in calculating ellipsoid parameters
:param theta: optional hardcoded theta value
:return: ellipsoid_parameters: dictionary containing parameters used in creation of
as well as results from modeling ellipsoid boundaries
"""
if theta is None:
theta = calculate_ellipsoid_orientation(sensor_readings)
A = calc_A(a, b, theta) # A is independent of the temperatures
ellipsoid_parameters = {
'a': a,
'b': b,
'theta': theta,
'original_sensor_readings': sensor_readings,
'ellipsoid_points': []
}
for reading in sensor_readings:
#print "Temp: %s" % temp
B = calc_B(a, b, reading[0], theta)
C = calc_C(a, b, reading[0], theta)
hi1 = calc_hi1(A, B, C)
ellipsoid_parameters['ellipsoid_points'].append((reading[0], hi1))
hi2 = calc_hi2(A, B, C)
ellipsoid_parameters['ellipsoid_points'].append((reading[0], hi2))
return ellipsoid_parameters
def calculate_ellipsoid_orientation(sensor_readings):
"""
:param sensor_readings: list of tuples (temp., humidity) representing readings
:return: float, theta of ellipsoid orientation
"""
n = len(sensor_readings)
temperature_readings = [reading[0] for reading in sensor_readings]
humidity_readings = [reading[1] for reading in sensor_readings]
#FIXME(hrybacki): Come up with a better way of breaking this components down
# part_one
part_one_multiplicands = [temperature_readings[i]*humidity_readings[i] for i in range(n)]
part_one_value = n * sum(part_one_multiplicands)
# part two
part_two_value = sum(temperature_readings) * sum(humidity_readings)
# part three
part_three_value = n * sum([math.pow(temp, 2) for temp in temperature_readings])
# part four
part_four_value = math.pow(sum(temperature_readings), 2)
# arctan(theta)
tan_theta = (part_one_value - part_two_value) / (part_three_value - part_four_value)
#return math.atan(tan_theta)
# @FIXME(hrybacki): Dr. Shan want's this to be absolute value. Do we need that? WHy?
#return math.fabs(math.atan(tan_theta))
return math.atan(tan_theta)
def calc_A(a, b, theta):
""" Returns the A value used in ellipsoid boundary modeling
:param a: represents the major axis of the ellipsoid
:param b: represents the mini axis os the ellipsoid
:param theta: represents the orientation of the raw measurements
:return: A value used in ellipsoid boundary modeling
"""
A = (math.pow(math.sin(theta), 2) / math.pow(a, 2)) + (math.pow(math.cos(theta), 2) / math.pow(b, 2))
return A
def calc_B(a, b, ti, theta):
""" Returns the B value used in ellipsoid boundary modeling
:param a: represents the major axis of the ellipsoid
:param b: represents the mini axis os the ellipsoid
:param ti: temperature (independent variable) used in calculation
:param theta: represents the orientation of the raw measurements
:return: B value used in ellipsoid boundary modeling
"""
B = ((1/math.pow(a, 2)) - (1/math.pow(b, 2))) * ti * math.sin(2*theta)
return B
def calc_C(a, b, ti, theta):
""" Returns the C value used in ellipsoid boundary modeling
:param a: represents the major axis of the ellipsoid
:param b: represents the mini axis os the ellipsoid
:param ti: temperature (independent variable) used in calculation
:param theta: represents the orientation of the raw measurements
:return: C value used in ellipsoid boundary modeling
"""
C = ((math.pow(ti, 2) * math.pow(math.cos(theta), 2)) / math.pow(a, 2)) + \
((math.pow(ti, 2) * math.pow(math.sin(theta), 2)) / math.pow(b, 2)) - 1
return C
def calc_hi1(A, B, C):
""" Calculates the upper point for a given temp modeling an ellipsoid
:param A: A value used in ellipsoid boundary modeling
:param B: B value used in ellipsoid boundary modeling
:param C: C value used in ellipsoid boundary modeling
:return: Upper point for given temperature
"""
try:
return (-B + math.sqrt(math.pow(B, 2) - (4*A*C))) / (2*A)
except ValueError:
pass # skip domain errors
def calc_hi2(A, B, C):
""" Calculates the lower point for a given temp modeling an ellipsoid
:param A: A value used in ellipsoid boundary modeling
:param B: B value used in ellipsoid boundary modeling
:param C: C value used in ellipsoid boundary modeling
:return: Lower point for given temperature
"""
try:
return (-B - math.sqrt(math.pow(B, 2) - (4*A*C))) / (2*A)
except ValueError:
pass # ignore domain errors
"""Begin misc. functions"""
# FIXME: Are we picking the correct values here? Why are the sigmas
# FIXME: 'swapped' in the calculations?
# FIXME: Flip the h's and t's
def calculate_dist(point_one, point_two, sigma_one, sigma_two):
""" Calculates the distance between two points
d(pi, pj) = (h1-h2)^2*sigma_one+(t1-t2)^2*sigma_two + 2*(h1-h2)(t1-t2)*sigma_one*sigma_two
:param point_one: first tuple (temp., humidity)
:param point_two: second tuple (temp., humidity)
:param sigma_one: std. dev. of temperature readings
:param sigma_two: std. dev. of humidity readings
:return: distance
"""
t1, h1 = point_one
t2, h2 = point_two
return math.fabs(math.pow(h1-h2, 2)*sigma_one + math.pow(t1-t2, 2)*sigma_two + 2*(h1-h2)*(t1-t2)*sigma_one*sigma_two)
def calculate_humidity_mean(sensor_readings):
"""Calculates the mean humidity of a given sensors list of readings
:param list: list of tuples representing sensor readings (temp., humidity)
:return: mean
"""
return numpy.mean([reading[1] for reading in sensor_readings])
def calculate_temp_mean(sensor_readings):
"""Calculates the mean temp. of a given sensors list of readings
:param list: list of tuples representing sensor readings (humidity, temp.)
:return: mean
"""
return numpy.mean([reading[0] for reading in sensor_readings])
"""Begin incomplete functions"""
def model_ellipsoid(sensor_data):
"""Generates and returns a three tuple of ellipsoid parameter for a single sensor
:param sensor_data: Dictionary mapping a sensor to it's normalized readings
:return: 3-tuple with ellipsoid parameters
"""
pass
def inverse_transformation(lookup_table, aggregate_ellipsoid):
""" Generates a tuple of two dicts mapping sensors to anomalies and true measurements
:param lookup_table: dictionary mapping difference readings to their raw measurements
:param aggregate_ellipsoid: 3-tuple containing aggregate ellipsoid parameters
:return: tuple containing two dicts, one of true measurements and another of anomalies
each mapped to their original sensors
"""
true_measurements = {}
anomalies = {}
for sensor in lookup_table:
for reading in sensor:
if is_anomaly(reading):
anomalies[sensor] = reading
else:
true_measurements[sensor] = reading
return (true_measurements, anomalies)
def is_anomaly(reading, aggregate_ellipsoid):
""" Determines if reading is anomaly with respect to an ellipsoid
:param reading: temperature and humidity readings
:param aggregate_ellipsoid: parameters for aggregate ellipsoid
:return: True if an anomaly, else False
"""
pass
|
|
from typing import Any, Dict, Optional
from urllib.parse import urljoin
from django.conf import settings
from django.http import HttpRequest
from version import (
LATEST_MAJOR_VERSION,
LATEST_RELEASE_ANNOUNCEMENT,
LATEST_RELEASE_VERSION,
ZULIP_VERSION,
)
from zerver.decorator import get_client_name
from zerver.lib.realm_description import get_realm_rendered_description, get_realm_text_description
from zerver.lib.realm_icon import get_realm_icon_url
from zerver.lib.send_email import FromAddress
from zerver.lib.subdomains import get_subdomain
from zerver.models import Realm, UserProfile, get_realm
from zproject.backends import (
AUTH_BACKEND_NAME_MAP,
any_social_backend_enabled,
auth_enabled_helper,
get_external_method_dicts,
password_auth_enabled,
require_email_format_usernames,
)
def common_context(user: UserProfile) -> Dict[str, Any]:
"""Common context used for things like outgoing emails that don't
have a request.
"""
return {
'realm_uri': user.realm.uri,
'realm_name': user.realm.name,
'root_domain_uri': settings.ROOT_DOMAIN_URI,
'external_uri_scheme': settings.EXTERNAL_URI_SCHEME,
'external_host': settings.EXTERNAL_HOST,
'user_name': user.full_name,
}
def get_realm_from_request(request: HttpRequest) -> Optional[Realm]:
if hasattr(request, "user") and hasattr(request.user, "realm"):
return request.user.realm
if not hasattr(request, "realm"):
# We cache the realm object from this function on the request,
# so that functions that call get_realm_from_request don't
# need to do duplicate queries on the same realm while
# processing a single request.
subdomain = get_subdomain(request)
try:
request.realm = get_realm(subdomain)
except Realm.DoesNotExist:
request.realm = None
return request.realm
def zulip_default_context(request: HttpRequest) -> Dict[str, Any]:
"""Context available to all Zulip Jinja2 templates that have a request
passed in. Designed to provide the long list of variables at the
bottom of this function in a wide range of situations: logged-in
or logged-out, subdomains or not, etc.
The main variable in the below is whether we know what realm the
user is trying to interact with.
"""
realm = get_realm_from_request(request)
if realm is None:
realm_uri = settings.ROOT_DOMAIN_URI
realm_name = None
realm_icon = None
else:
realm_uri = realm.uri
realm_name = realm.name
realm_icon = get_realm_icon_url(realm)
register_link_disabled = settings.REGISTER_LINK_DISABLED
login_link_disabled = settings.LOGIN_LINK_DISABLED
find_team_link_disabled = settings.FIND_TEAM_LINK_DISABLED
allow_search_engine_indexing = False
if (settings.ROOT_DOMAIN_LANDING_PAGE
and get_subdomain(request) == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN):
register_link_disabled = True
login_link_disabled = True
find_team_link_disabled = False
allow_search_engine_indexing = True
apps_page_url = 'https://zulip.com/apps/'
if settings.ZILENCER_ENABLED:
apps_page_url = '/apps/'
apps_page_web = settings.ROOT_DOMAIN_URI + '/accounts/go/'
user_is_authenticated = False
if hasattr(request, 'user') and hasattr(request.user, 'is_authenticated'):
user_is_authenticated = request.user.is_authenticated
if settings.DEVELOPMENT:
secrets_path = "zproject/dev-secrets.conf"
settings_path = "zproject/dev_settings.py"
settings_comments_path = "zproject/prod_settings_template.py"
else:
secrets_path = "/etc/zulip/zulip-secrets.conf"
settings_path = "/etc/zulip/settings.py"
settings_comments_path = "/etc/zulip/settings.py"
# We can't use request.client here because we might not be using
# an auth decorator that sets it, but we can call its helper to
# get the same result.
platform = get_client_name(request)
context = {
'root_domain_landing_page': settings.ROOT_DOMAIN_LANDING_PAGE,
'custom_logo_url': settings.CUSTOM_LOGO_URL,
'register_link_disabled': register_link_disabled,
'login_link_disabled': login_link_disabled,
'terms_of_service': settings.TERMS_OF_SERVICE,
'privacy_policy': settings.PRIVACY_POLICY,
'login_url': settings.HOME_NOT_LOGGED_IN,
'only_sso': settings.ONLY_SSO,
'external_host': settings.EXTERNAL_HOST,
'external_uri_scheme': settings.EXTERNAL_URI_SCHEME,
'realm_uri': realm_uri,
'realm_name': realm_name,
'realm_icon': realm_icon,
'root_domain_uri': settings.ROOT_DOMAIN_URI,
'apps_page_url': apps_page_url,
'apps_page_web': apps_page_web,
'open_realm_creation': settings.OPEN_REALM_CREATION,
'development_environment': settings.DEVELOPMENT,
'support_email': FromAddress.SUPPORT,
'find_team_link_disabled': find_team_link_disabled,
'password_min_length': settings.PASSWORD_MIN_LENGTH,
'password_min_guesses': settings.PASSWORD_MIN_GUESSES,
'jitsi_server_url': settings.JITSI_SERVER_URL,
'zulip_version': ZULIP_VERSION,
'user_is_authenticated': user_is_authenticated,
'settings_path': settings_path,
'secrets_path': secrets_path,
'settings_comments_path': settings_comments_path,
'platform': platform,
'allow_search_engine_indexing': allow_search_engine_indexing,
'landing_page_navbar_message': settings.LANDING_PAGE_NAVBAR_MESSAGE,
}
context['OPEN_GRAPH_URL'] = f'{realm_uri}{request.path}'
if realm is not None and realm.icon_source == realm.ICON_UPLOADED:
context['OPEN_GRAPH_IMAGE'] = urljoin(realm_uri, realm_icon)
return context
def login_context(request: HttpRequest) -> Dict[str, Any]:
realm = get_realm_from_request(request)
if realm is None:
realm_description = None
realm_invite_required = False
else:
realm_description = get_realm_rendered_description(realm)
realm_invite_required = realm.invite_required
context: Dict[str, Any] = {
'realm_invite_required': realm_invite_required,
'realm_description': realm_description,
'require_email_format_usernames': require_email_format_usernames(realm),
'password_auth_enabled': password_auth_enabled(realm),
'any_social_backend_enabled': any_social_backend_enabled(realm),
'two_factor_authentication_enabled': settings.TWO_FACTOR_AUTHENTICATION_ENABLED,
}
if realm is not None and realm.description:
context['OPEN_GRAPH_TITLE'] = realm.name
context['OPEN_GRAPH_DESCRIPTION'] = get_realm_text_description(realm)
# Add the keys for our standard authentication backends.
no_auth_enabled = True
for auth_backend_name in AUTH_BACKEND_NAME_MAP:
name_lower = auth_backend_name.lower()
key = f"{name_lower}_auth_enabled"
is_enabled = auth_enabled_helper([auth_backend_name], realm)
context[key] = is_enabled
if is_enabled:
no_auth_enabled = False
context['external_authentication_methods'] = get_external_method_dicts(realm)
context['no_auth_enabled'] = no_auth_enabled
# Include another copy of external_authentication_methods in page_params for use
# by the desktop client. We expand it with IDs of the <button> elements corresponding
# to the authentication methods.
context['page_params'] = dict(
external_authentication_methods = get_external_method_dicts(realm),
)
for auth_dict in context['page_params']['external_authentication_methods']:
auth_dict['button_id_suffix'] = "auth_button_{}".format(auth_dict['name'])
return context
def latest_info_context() -> Dict[str, str]:
context = {
'latest_release_version': LATEST_RELEASE_VERSION,
'latest_major_version': LATEST_MAJOR_VERSION,
'latest_release_announcement': LATEST_RELEASE_ANNOUNCEMENT,
}
return context
|
|
# vim:ts=4:sw=4:et:
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from watchman.integration.lib import WatchmanEdenTestCase
def possible_cookie(name):
return ".watchman-cookie-" in name
class TestEdenSubscribe(WatchmanEdenTestCase.WatchmanEdenTestCase):
def requiresPersistentSession(self):
return True
def test_eden_subscribe(self):
commits = []
def populate(repo):
# We ignore ".hg" here just so some of the tests that list files don't have
# to explicitly filter out the contents of this directory. However, in most
# situations the .hg directory normally should not be ignored.
repo.write_file(".watchmanconfig", '{"ignore_dirs":[".buckd", ".hg"]}')
repo.write_file("hello", "hola\n")
commits.append(repo.commit("initial commit."))
repo.write_file("welcome", "bienvenue\n")
commits.append(repo.commit("commit 2"))
repo.write_file("readme.txt", "important docs\n")
commits.append(repo.commit("commit 3"))
# Switch back to the first commit at the start of the test
repo.update(commits[0])
root = self.makeEdenMount(populate)
repo = self.repoForPath(root)
res = self.watchmanCommand("watch", root)
self.assertEqual("eden", res["watcher"])
self.watchmanCommand(
"subscribe",
root,
"myname",
{"fields": ["name"], "expression": ["not", ["match", ".watchman-cookie*"]]},
)
dat = self.waitForSub("myname", root=root)[0]
self.assertTrue(dat["is_fresh_instance"])
self.assertFileListsEqual(
dat["files"], self.eden_dir_entries + [".eden", ".watchmanconfig", "hello"]
)
self.touchRelative(root, "w0000t")
dat = self.waitForSub("myname", root=root)[0]
self.assertEqual(False, dat["is_fresh_instance"])
self.assertFileListsEqual(dat["files"], ["w0000t"])
# we should not observe .buckd in the subscription results
# because it is listed in the ignore_dirs config section.
os.mkdir(os.path.join(root, ".buckd"))
self.touchRelative(root, "hello")
dat = self.waitForSub("myname", root=root)[0]
self.assertEqual(False, dat["is_fresh_instance"])
self.assertFileListsEqual(dat["files"], ["hello"])
# performing an hg checkout should notify us of the files changed between
# commits
repo.update(commits[2])
dat = self.waitForSub("myname", root=root)[0]
self.assertEqual(False, dat["is_fresh_instance"])
self.assertFileListsEqual(dat["files"], ["welcome", "readme.txt"])
# make another subscription and assert that we get a fresh
# instance result with all the files in it
self.watchmanCommand(
"subscribe",
root,
"othersub",
{"fields": ["name"], "expression": ["not", ["match", ".watchman-cookie*"]]},
)
dat = self.waitForSub("othersub", root=root)[0]
self.assertEqual(True, dat["is_fresh_instance"])
self.assertFileListsEqual(
dat["files"],
self.eden_dir_entries
+ [".eden", ".watchmanconfig", "hello", "w0000t", "welcome", "readme.txt"],
)
def assertWaitForAssertedStates(self, root, states):
def sortStates(states):
"""Deterministically sort the states for comparison.
We sort by name and rely on the sort being stable as the
relative ordering of the potentially multiple queueued
entries per name is important to preserve"""
return sorted(states, key=lambda x: x["name"])
states = sortStates(states)
def getStates():
res = self.watchmanCommand("debug-get-asserted-states", root)
return sortStates(res["states"])
self.assertWaitForEqual(states, getStates)
def test_state_enter_leave(self):
"""Check that state-enter and state-leave are basically working.
This is a subset of the tests that are performed in test_subscribe.py;
we only strictly need to check the basic plumbing here and need not
replicate the entire set of tests"""
def populate(repo):
repo.write_file("hello", "hola\n")
repo.commit("initial commit.")
root = self.makeEdenMount(populate)
res = self.watchmanCommand("watch", root)
self.assertEqual("eden", res["watcher"])
result = self.watchmanCommand("debug-get-asserted-states", root)
self.assertEqual([], result["states"])
self.watchmanCommand("state-enter", root, "foo")
self.watchmanCommand("state-enter", root, "bar")
self.assertWaitForAssertedStates(
root,
[
{"name": "bar", "state": "Asserted"},
{"name": "foo", "state": "Asserted"},
],
)
self.watchmanCommand("state-leave", root, "foo")
self.assertWaitForAssertedStates(root, [{"name": "bar", "state": "Asserted"}])
self.watchmanCommand("state-leave", root, "bar")
self.assertWaitForAssertedStates(root, [])
def test_hg_failure(self):
commits = []
def populate(repo):
# We ignore ".hg" here just so some of the tests that list files don't have
# to explicitly filter out the contents of this directory. However, in most
# situations the .hg directory normally should not be ignored.
repo.write_file(".watchmanconfig", '{"ignore_dirs":[".buckd", ".hg"]}')
repo.write_file("hello", "hola\n")
commits.append(repo.commit("initial commit."))
repo.write_file("hello", "aloha\n")
commits.append(repo.commit("commit2."))
repo.write_file("welcome", "bienvenue\n")
commits.append(repo.commit("commit3."))
root = self.makeEdenMount(populate)
repo = self.repoForPath(root)
# Point EDEN_HG_BINARY to /bin/false so that watchman will be unable to
# successfully query hg.
watchman_env = {"EDEN_HG_BINARY": "/bin/false"}
self.eden_watchman.stop()
self.eden_watchman.start(extra_env=watchman_env)
self.client = self.getClient(self.eden_watchman)
res = self.watchmanCommand("watch", root)
self.assertEqual("eden", res["watcher"])
self.watchmanCommand("subscribe", root, "myname", {"fields": ["name"]})
dat = self.waitForSub("myname", root=root)[0]
self.assertTrue(dat["is_fresh_instance"])
self.assertFileListsEqual(
self.eden_dir_entries + [".eden", ".watchmanconfig", "hello", "welcome"],
dat["files"],
)
# Sanity check that the subscription is working
self.touchRelative(root, "w0000t")
dat = self.waitForSub("myname", root=root)[0]
self.assertEqual(False, dat["is_fresh_instance"])
self.assertFileListsEqual(dat["files"], ["w0000t"])
# Do a checkout, and make sure the subscription is updated.
# Since watchman won't be able to run hg to query the list of files changed
# between commits, it will generate a fresh instance result.
repo.update(commits[0])
# hg update may issue multiple subscription changes. Wait for the first one that is a fresh instance.
while True:
dat = self.waitForSub("myname", root=root)[0]
if "is_fresh_instance" not in dat:
print("dat", dat)
if dat["is_fresh_instance"]:
break
self.assertEqual(True, dat["is_fresh_instance"])
self.assertFileListsEqual(
self.eden_dir_entries + [".eden", ".watchmanconfig", "hello", "w0000t"],
[x for x in dat["files"] if not possible_cookie(x)],
)
# Make sure the subscription still delivers normal file update events
self.touchRelative(root, "new2")
while True:
dat = self.waitForSub("myname", root=root)[0]
self.assertEqual(False, dat["is_fresh_instance"])
if "new2" in dat["files"]:
break
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from permabots.models import Bot, TelegramBot, KikBot, MessengerBot
from rest_framework import status
from permabots.views import BotDetail, TelegramBotDetail, KikBotDetail, MessengerBotDetail
import json
from tests.api.base import BaseTestAPI
from unittest import skip
class TestBotAPI(BaseTestAPI):
def assertBot(self, id, created_at, updated_at, name, telegram_bot_token=None, kik_bot_api_key=None, messenger_bot_token=None, bot=None):
if not bot:
bot = self.bot
self.assertEqual(bot.name, name)
if bot.telegram_bot:
self.assertEqual(telegram_bot_token, bot.telegram_bot.token)
if bot.kik_bot:
self.assertEqual(kik_bot_api_key, bot.kik_bot.api_key)
if bot.messenger_bot:
self.assertEqual(messenger_bot_token, bot.messenger_bot.token)
self.assertPermabotsModel(id, created_at, updated_at, bot)
def _bot_list_url(self):
return '%s/bots/' % self.api
def _bot_detail_url(self, bot_pk=None):
if not bot_pk:
bot_pk = self.bot.pk
return '%s/bots/%s/' % (self.api, bot_pk)
def test_get_bots_ok(self):
data = self._test_get_list_ok(self._bot_list_url())
self.assertBot(data[0]['id'], data[0]['created_at'], data[0]['updated_at'], data[0]['name'],
data[0]['telegram_bot']['token'], data[0]['kik_bot']['api_key'], data[0]['messenger_bot']['token'], None)
def test_get_bots_not_auth(self):
self._test_get_list_not_auth(self._bot_list_url())
def test_post_bots_ok(self):
data = self._test_post_list_ok(self._bot_list_url(), Bot, {'name': 'new_name'})
new_bot = Bot.objects.all()[0]
self.assertEqual(new_bot.name, 'new_name')
self.assertBot(data['id'], data['created_at'], data['updated_at'], data['name'], None, None, None, new_bot)
def test_post_bots_not_auth(self):
self._test_post_list_not_auth(self._bot_list_url(), {'name': 'new_name'})
def test_get_bot_ok(self):
data = self._test_get_detail_ok(self._bot_detail_url())
self.assertBot(data['id'], data['created_at'], data['updated_at'], data['name'], data['telegram_bot']['token'], data['kik_bot']['api_key'],
data['messenger_bot']['token'])
def test_get_bot_not_auth(self):
self._test_get_detail_not_auth(self._bot_detail_url())
def test_get_bot_not_found(self):
self._test_get_detail_not_found(self._bot_detail_url(self.unlikely_id))
def test_put_bot_ok(self):
data = self._test_put_detail_ok(self._bot_detail_url(), {'name': 'new_name'}, BotDetail, self.bot.pk)
updated = Bot.objects.get(pk=self.bot.pk)
self.assertEqual(updated.name, 'new_name')
self.assertBot(data['id'], data['created_at'], data['updated_at'], data['name'], data['telegram_bot']['token'],
data['kik_bot']['api_key'], data['messenger_bot']['token'], updated)
def test_put_bot_not_auth(self):
self._test_put_detail_not_auth(self._bot_detail_url(), {'name': 'new_name'}, BotDetail, self.bot.pk)
def test_put_bot_not_found(self):
self._test_put_detail_not_found(self._bot_detail_url(self.unlikely_id), {'name': 'new_name'}, BotDetail, self.unlikely_id)
def test_delete_bot_ok(self):
self._test_delete_detail_ok(self._bot_detail_url(), BotDetail, self.bot.pk)
self.assertEqual(Bot.objects.count(), 0)
def test_delete_bot_not_auth(self):
self._test_delete_detail_not_auth(self._bot_detail_url(), BotDetail, self.bot.pk)
def test_delete_bot_not_found(self):
self._test_delete_detail_not_found(self._bot_detail_url(self.unlikely_id), BotDetail, self.unlikely_id)
class TestTelegramBotAPI(BaseTestAPI):
def assertTelegramBot(self, id, created_at, updated_at, token, enabled, username, first_name, last_name, telegram_bot=None):
if not telegram_bot:
telegram_bot = self.bot.telegram_bot
self.assertEqual(telegram_bot.token, token)
self.assertEqual(telegram_bot.enabled, enabled)
self.assertEqual(telegram_bot.user_api.username, username)
self.assertEqual(telegram_bot.user_api.first_name, first_name)
self.assertEqual(telegram_bot.user_api.last_name, last_name)
self.assertPermabotsModel(id, created_at, updated_at, telegram_bot)
def _telegram_bot_list_url(self, bot_pk=None):
if not bot_pk:
bot_pk = self.bot.pk
return '%s/bots/%s/telegram/' % (self.api, bot_pk)
def _telegram_bot_detail_url(self, bot_pk=None, telegram_bot_pk=None):
if not bot_pk:
bot_pk = self.bot.pk
if not telegram_bot_pk:
telegram_bot_pk = self.bot.telegram_bot.pk
return '%s/bots/%s/telegram/%s/' % (self.api, bot_pk, telegram_bot_pk)
def test_get_telegram_bots_ok(self):
data = self._test_get_list_ok(self._telegram_bot_list_url())
self.assertTelegramBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'], data['info']['username'],
data['info']['first_name'], data['info']['last_name'], None)
def test_get_telegram_bots_not_auth(self):
self._test_get_list_not_auth(self._telegram_bot_list_url())
def test_telegram_post_bots_ok(self):
data = self._test_post_list_ok(self._telegram_bot_list_url(), TelegramBot, {'token': self.mytoken, 'enabled': 'True'})
new_bot = TelegramBot.objects.get(token=self.mytoken)
self.assertEqual(new_bot.token, self.mytoken)
self.assertTrue(new_bot.enabled)
self.assertTelegramBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'],
data['info']['username'], data['info']['first_name'], data['info']['last_name'], new_bot)
def test_telegram_post_bots_with_no_enabled_field(self):
data = self._test_post_list_ok(self._telegram_bot_list_url(), TelegramBot, {'token': self.mytoken})
new_bot = TelegramBot.objects.get(token=self.mytoken)
self.assertEqual(new_bot.token, self.mytoken)
self.assertTrue(new_bot.enabled)
self.assertTelegramBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'],
data['info']['username'], data['info']['first_name'], data['info']['last_name'], new_bot)
def test_post_telegram_bots_token_not_valid(self):
TelegramBot.objects.all().delete()
response = self.client.post(self._telegram_bot_list_url(),
data=json.dumps({"token": 'invalidtoken', "enabled": True}),
content_type='application/json',
HTTP_AUTHORIZATION=self._gen_token(self.bot.owner.auth_token))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('not a valid token', response.data['token'][0])
self.assertEqual(TelegramBot.objects.count(), 0)
def test_post_telegram_bots_token_not_exists_in_telegram(self):
TelegramBot.objects.all().delete()
response = self.client.post(self._telegram_bot_list_url(),
data=json.dumps({"token": self.mytoken + 'a', "enabled": True}),
content_type='application/json',
HTTP_AUTHORIZATION=self._gen_token(self.bot.owner.auth_token))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('Telegram Error', response.data['error'])
self.assertEqual(TelegramBot.objects.count(), 0)
def test_post_telegram_bots_not_auth(self):
self._test_post_list_not_auth(self._telegram_bot_list_url(), {'token': self.mytoken, 'enabled': 'True'})
def test_get_telegram_bot_ok(self):
data = self._test_get_detail_ok(self._telegram_bot_detail_url())
self.assertTelegramBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'], data['info']['username'],
data['info']['first_name'], data['info']['last_name'])
def test_get_telegram_bot_not_auth(self):
self._test_get_detail_not_auth(self._telegram_bot_detail_url())
def test_get_telegram_bot_not_found(self):
self._test_get_detail_not_found(self._telegram_bot_detail_url(telegram_bot_pk=self.unlikely_id))
def test_put_telegram_bot_ok(self):
data = self._test_put_detail_ok(self._telegram_bot_detail_url(), {'enabled': 'False'}, TelegramBotDetail, self.bot.pk, self.bot.telegram_bot.pk)
updated = TelegramBot.objects.get(pk=self.bot.telegram_bot.pk)
self.assertFalse(updated.enabled)
self.assertTelegramBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'],
data['info']['username'], data['info']['first_name'], data['info']['last_name'], updated)
def test_put_telegram_bot_not_auth(self):
self._test_put_detail_not_auth(self._telegram_bot_detail_url(),
{'token': self.mytoken, 'enabled': 'False'}, TelegramBotDetail, self.bot.pk, self.bot.telegram_bot.pk)
def test_put_telegram_bot_not_found(self):
self._test_put_detail_not_found(self._telegram_bot_detail_url(telegram_bot_pk=self.unlikely_id),
{'token': self.mytoken, 'enabled': 'False'}, TelegramBotDetail, self.bot.pk, self.unlikely_id)
def test_delete_telegram_bot_ok(self):
self._test_delete_detail_ok(self._telegram_bot_detail_url(), TelegramBotDetail, self.bot.pk, self.bot.telegram_bot.pk)
self.assertEqual(TelegramBot.objects.count(), 0)
def test_delete_telegram_bot_not_auth(self):
self._test_delete_detail_not_auth(self._telegram_bot_detail_url(), TelegramBotDetail, self.bot.pk, self.bot.telegram_bot.pk)
def test_delete_telegram_bot_not_found(self):
self._test_delete_detail_not_found(self._telegram_bot_detail_url(telegram_bot_pk=self.unlikely_id), TelegramBotDetail, self.bot.pk, self.unlikely_id)
class TestKikBotAPI(BaseTestAPI):
def assertKikBot(self, id, created_at, updated_at, api_key, enabled, username, kik_bot=None):
if not kik_bot:
kik_bot = self.bot.kik_bot
self.assertEqual(kik_bot.api_key, api_key)
self.assertEqual(kik_bot.enabled, enabled)
self.assertEqual(kik_bot.username, username)
self.assertPermabotsModel(id, created_at, updated_at, kik_bot)
def _kik_bot_list_url(self, bot_pk=None):
if not bot_pk:
bot_pk = self.bot.pk
return '%s/bots/%s/kik/' % (self.api, bot_pk)
def _kik_bot_detail_url(self, bot_pk=None, kik_bot_pk=None):
if not bot_pk:
bot_pk = self.bot.pk
if not kik_bot_pk:
kik_bot_pk = self.bot.kik_bot.pk
return '%s/bots/%s/kik/%s/' % (self.api, bot_pk, kik_bot_pk)
def test_get_kik_bots_ok(self):
data = self._test_get_list_ok(self._kik_bot_list_url())
self.assertKikBot(data['id'], data['created_at'], data['updated_at'], data['api_key'], data['enabled'], data['username'], None)
def test_get_kik_bots_not_auth(self):
self._test_get_list_not_auth(self._kik_bot_list_url())
def test_kik_post_bots_ok(self):
data = self._test_post_list_ok(self._kik_bot_list_url(), KikBot, {'api_key': self.my_api_key, 'username': self.my_username, 'enabled': 'True'})
new_bot = KikBot.objects.get(api_key=self.my_api_key, username=self.my_username)
self.assertEqual(new_bot.api_key, self.my_api_key)
self.assertEqual(new_bot.username, self.my_username)
self.assertTrue(new_bot.enabled)
self.assertKikBot(data['id'], data['created_at'], data['updated_at'], data['api_key'], data['enabled'],
data['username'], new_bot)
def test_kik_post_bots_ok_with_no_enabled_field(self):
data = self._test_post_list_ok(self._kik_bot_list_url(), KikBot, {'api_key': self.my_api_key, 'username': self.my_username})
new_bot = KikBot.objects.get(api_key=self.my_api_key, username=self.my_username)
self.assertEqual(new_bot.api_key, self.my_api_key)
self.assertEqual(new_bot.username, self.my_username)
self.assertTrue(new_bot.enabled)
self.assertKikBot(data['id'], data['created_at'], data['updated_at'], data['api_key'], data['enabled'],
data['username'], new_bot)
def test_post_kik_bots_api_not_exists_in_kik(self):
TelegramBot.objects.all().delete()
response = self.client.post(self._kik_bot_list_url(),
data=json.dumps({"api_key": self.my_api_key + 'a', "enabled": True, 'username': self.my_username}),
content_type='application/json',
HTTP_AUTHORIZATION=self._gen_token(self.bot.owner.auth_token))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('Kik Error', response.data['error'])
self.assertEqual(TelegramBot.objects.count(), 0)
def test_post_kik_bots_user_not_exists_in_kik(self):
TelegramBot.objects.all().delete()
response = self.client.post(self._kik_bot_list_url(),
data=json.dumps({"api_key": self.my_api_key, "enabled": True, 'username': self.my_username + 'o'}),
content_type='application/json',
HTTP_AUTHORIZATION=self._gen_token(self.bot.owner.auth_token))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('Kik Error', response.data['error'])
self.assertEqual(TelegramBot.objects.count(), 0)
def test_post_kik_bots_not_auth(self):
self._test_post_list_not_auth(self._kik_bot_list_url(), {'api_key': self.my_api_key, 'enabled': 'True', 'username': self.my_username})
def test_get_kik_bot_ok(self):
data = self._test_get_detail_ok(self._kik_bot_detail_url())
self.assertKikBot(data['id'], data['created_at'], data['updated_at'], data['api_key'], data['enabled'], data['username'],)
def test_get_kik_bot_not_auth(self):
self._test_get_detail_not_auth(self._kik_bot_detail_url())
def test_get_kik_bot_not_found(self):
self._test_get_detail_not_found(self._kik_bot_detail_url(kik_bot_pk=self.unlikely_id))
def test_put_kik_bot_ok(self):
data = self._test_put_detail_ok(self._kik_bot_detail_url(), {'enabled': 'False'}, KikBotDetail, self.bot.pk, self.bot.kik_bot.pk)
updated = KikBot.objects.get(pk=self.bot.kik_bot.pk)
self.assertFalse(updated.enabled)
self.assertKikBot(data['id'], data['created_at'], data['updated_at'], data['api_key'], data['enabled'],
data['username'], updated)
def test_put_kik_bot_not_auth(self):
self._test_put_detail_not_auth(self._kik_bot_detail_url(),
{'api_key': self.my_api_key, 'username': self.my_username, 'enabled': 'False'},
KikBotDetail, self.bot.pk, self.bot.kik_bot.pk)
def test_put_kik_bot_not_found(self):
self._test_put_detail_not_found(self._kik_bot_detail_url(kik_bot_pk=self.unlikely_id),
{'api_key': self.my_api_key, 'username': self.my_username, 'enabled': 'False'},
KikBotDetail, self.bot.pk, self.unlikely_id)
def test_delete_kik_bot_ok(self):
self._test_delete_detail_ok(self._kik_bot_detail_url(), KikBotDetail, self.bot.pk, self.bot.kik_bot.pk)
self.assertEqual(KikBot.objects.count(), 0)
def test_delete_kik_bot_not_auth(self):
self._test_delete_detail_not_auth(self._kik_bot_detail_url(), KikBotDetail, self.bot.pk, self.bot.kik_bot.pk)
def test_delete_kik_bot_not_found(self):
self._test_delete_detail_not_found(self._kik_bot_detail_url(kik_bot_pk=self.unlikely_id), KikBotDetail, self.bot.pk, self.unlikely_id)
class TestMessengerBotAPI(BaseTestAPI):
def assertMessengerBot(self, id, created_at, updated_at, token, enabled, messenger_bot=None):
if not messenger_bot:
messenger_bot = self.bot.messenger_bot
self.assertEqual(messenger_bot.token, token)
self.assertEqual(messenger_bot.enabled, enabled)
self.assertPermabotsModel(id, created_at, updated_at, messenger_bot)
def _messenger_bot_list_url(self, bot_pk=None):
if not bot_pk:
bot_pk = self.bot.pk
return '%s/bots/%s/messenger/' % (self.api, bot_pk)
def _messenger_bot_detail_url(self, bot_pk=None, messenger_bot_pk=None):
if not bot_pk:
bot_pk = self.bot.pk
if not messenger_bot_pk:
messenger_bot_pk = self.bot.messenger_bot.pk
return '%s/bots/%s/messenger/%s/' % (self.api, bot_pk, messenger_bot_pk)
def test_get_messenger_bots_ok(self):
data = self._test_get_list_ok(self._messenger_bot_list_url())
self.assertMessengerBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'], None)
def test_get_messenger_bots_not_auth(self):
self._test_get_list_not_auth(self._messenger_bot_list_url())
def test_messenger_post_bots_ok(self):
data = self._test_post_list_ok(self._messenger_bot_list_url(), MessengerBot, {'token': self.my_messenger_token, 'enabled': 'True'})
new_bot = MessengerBot.objects.get(token=self.my_messenger_token)
self.assertEqual(new_bot.token, self.my_messenger_token)
self.assertTrue(new_bot.enabled)
self.assertMessengerBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'], new_bot)
def test_messenger_post_bots_ok_with_no_enabled_field(self):
data = self._test_post_list_ok(self._messenger_bot_list_url(), MessengerBot, {'token': self.my_messenger_token})
new_bot = MessengerBot.objects.get(token=self.my_messenger_token)
self.assertEqual(new_bot.token, self.my_messenger_token)
self.assertTrue(new_bot.enabled)
self.assertMessengerBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'], new_bot)
@skip("wait for real token")
def test_post_messenger_bots_token_not_exists_in_messenger(self):
TelegramBot.objects.all().delete()
response = self.client.post(self._messenger_bot_list_url(),
data=json.dumps({"token": self.my_messenger_token + 'a', "enabled": True}),
content_type='application/json',
HTTP_AUTHORIZATION=self._gen_token(self.bot.owner.auth_token))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('Messenger Error', response.data['error'])
self.assertEqual(TelegramBot.objects.count(), 0)
def test_post_messenger_bots_not_auth(self):
self._test_post_list_not_auth(self._messenger_bot_list_url(), {'token': self.my_messenger_token, 'enabled': 'True'})
def test_get_messenger_bot_ok(self):
data = self._test_get_detail_ok(self._messenger_bot_detail_url())
self.assertMessengerBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'])
def test_get_messenger_bot_not_auth(self):
self._test_get_detail_not_auth(self._messenger_bot_detail_url())
def test_get_messenger_bot_not_found(self):
self._test_get_detail_not_found(self._messenger_bot_detail_url(messenger_bot_pk=self.unlikely_id))
def test_put_messenger_bot_ok(self):
data = self._test_put_detail_ok(self._messenger_bot_detail_url(), {'enabled': 'False'}, MessengerBotDetail, self.bot.pk, self.bot.messenger_bot.pk)
updated = MessengerBot.objects.get(pk=self.bot.messenger_bot.pk)
self.assertFalse(updated.enabled)
self.assertMessengerBot(data['id'], data['created_at'], data['updated_at'], data['token'], data['enabled'], updated)
def test_put_messenger_bot_not_auth(self):
self._test_put_detail_not_auth(self._messenger_bot_detail_url(),
{'token': self.my_api_key, 'enabled': 'False'},
MessengerBotDetail, self.bot.pk, self.bot.messenger_bot.pk)
def test_put_messenger_bot_not_found(self):
self._test_put_detail_not_found(self._messenger_bot_detail_url(messenger_bot_pk=self.unlikely_id),
{'token': self.my_api_key, 'enabled': 'False'},
MessengerBotDetail, self.bot.pk, self.unlikely_id)
def test_delete_messenger_bot_ok(self):
self._test_delete_detail_ok(self._messenger_bot_detail_url(), MessengerBotDetail, self.bot.pk, self.bot.messenger_bot.pk)
self.assertEqual(MessengerBot.objects.count(), 0)
def test_delete_messenger_bot_not_auth(self):
self._test_delete_detail_not_auth(self._messenger_bot_detail_url(), MessengerBotDetail, self.bot.pk, self.bot.messenger_bot.pk)
def test_delete_messenger_bot_not_found(self):
self._test_delete_detail_not_found(self._messenger_bot_detail_url(messenger_bot_pk=self.unlikely_id), MessengerBotDetail, self.bot.pk, self.unlikely_id)
|
|
#!/usr/bin/env python
import sys
import numpy
import h5py
import logging
logger = logging.getLogger(__file__)
import kaldi_io, kaldi_argparse
from fuel.datasets.hdf5 import H5PYDataset
def get_parser(datasets={}):
parser = kaldi_argparse.KaldiArgumentParser(description="""Exchange data between Kaldi and Fuel's hdf5 dataset""", )
parser.add_argument("h5file")
subparsers = parser.add_subparsers(help="action")
parser_add_data = subparsers.add_parser('add', help="add data to the hdf5 file from a Kaldi archive")
parser_add_data.add_argument("rxfilename")
parser_add_data.add_argument("sourcename")
parser_add_data.add_argument("--type", default="BaseFloatMatrix",
help="Kaldi reader type, the value type can be later changed via the --transform argument")
parser_add_data.add_argument("--transform", default=None,
help="string whose eval()uation should produce a lambda function to porcess elements")
parser_add_data.add_argument("--applymap", default=None,
help="path to file which converts data into numeric values. If a transform function is given, the data is first transformtd, then mapped")
parser_add_data.set_defaults(func=add_data)
parser_add_raw_text = subparsers.add_parser('add_raw_text', help="add raw text to the hdf5 file from a Kaldi text file")
parser_add_raw_text.add_argument("textfilename")
parser_add_raw_text.add_argument("sourcename")
parser_add_raw_text.set_defaults(func=add_raw_text, transform=None, applymap=None)
parser_readdata = subparsers.add_parser('read_raw_text', help="read data from the hdf5 as text")
parser_readdata.add_argument("sourcename")
parser_readdata.add_argument("wxfilename")
parser_readdata.add_argument("--subset", default=None,
help="Which subset to read, by default read all data")
parser_readdata.set_defaults(func=read_raw_text)
parser_add_text = subparsers.add_parser('add_text', help="add raw text to the hdf5 file from a Kaldi text file")
parser_add_text.add_argument("--applymap", default=None, required=True,
help="path to file which converts data into numeric values. If a transform function is given, the data is first transformtd, then mapped")
parser_add_text.add_argument("textfilename")
parser_add_text.add_argument("sourcename")
parser_add_text.set_defaults(func=add_text, transform=None, applymap=None)
parser_readdata = subparsers.add_parser('read_text', help="read data from the hdf5 and convert to text")
parser_readdata.add_argument("sourcename")
parser_readdata.add_argument("wxfilename")
parser_readdata.add_argument("--subset", default=None,
help="Which subset to read, by default read all data")
parser_readdata.set_defaults(func=read_text)
parser_readdata = subparsers.add_parser('read', help="read data from the hdf5 into a kaldi archive")
parser_readdata.add_argument("type")
parser_readdata.add_argument("sourcename")
parser_readdata.add_argument("rxfilename")
parser_readdata.add_argument("--subset", default=None,
help="Which subset to read, by default read all data")
parser_readdata.add_argument("--transform", default=None,
help="string whose eval()uation should produce a lambda function to porcess elements")
parser_readdata.set_defaults(func=read_data)
parser_read_symbols = subparsers.add_parser('read_symbols', help="read a symbol table")
parser_read_symbols.add_argument('sourcename')
parser_read_symbols.add_argument('outfilename', default='-',
help="file to which write the extracted symbol table")
parser_read_symbols.set_defaults(func=read_symbols)
parser_adddata = subparsers.add_parser('split', help="Write down the split table.",
description="""
Provide split names along with files, whose first column is treated as list of utterance ids belonging to that split.
Note: this has to be performed after each source addition.
"""
)
parser_adddata.add_argument("sets", nargs="*", help="Subset definitions", default="")
parser_adddata.set_defaults(func=add_sets)
parser.add_standard_arguments()
return parser
def add_from_iter(args, data_iter, peeked_val):
"""
Add data from the data_iter iterator. Will work for 1D and 2D numpy arrays and strings.
"""
if args.transform is None:
T = lambda x:x
else:
T = eval(args.transform)
if args.applymap is not None:
with open(args.applymap ,'r') as mf:
value_map = {}
for l in mf:
val, num = l.strip().split()
value_map[val] = int(num)
_oldT = T
T = lambda x: numpy.asarray([value_map[e] for e in _oldT(x)])
with h5py.File(args.h5file, 'a') as h5file:
if 'uttids' in h5file:
has_uttids=True
uttids = h5file['uttids']
else:
has_uttids=False
uttids = h5file.create_dataset("uttids", (0,),
dtype=h5py.special_dtype(vlen=unicode),
maxshape=(None,))
uttids.dims[0].label = 'batch'
if has_uttids:
num_utts = uttids.shape[0]
max_utts = num_utts
else:
num_utts = 0
max_utts = None
peeked_val = T(peeked_val)
if isinstance(peeked_val, numpy.ndarray):
shapes = h5file.create_dataset("{}_shapes".format(args.sourcename), (num_utts,peeked_val.ndim),
dtype='int32',
maxshape=(max_utts,peeked_val.ndim))
shape_labels = h5file.create_dataset("{}_shape_labels".format(args.sourcename), (peeked_val.ndim,),
dtype='S7')
shape_labels[...] = ['frame'.encode('utf8'),
'feature'.encode('utf8')][:peeked_val.ndim]
dataset = h5file.create_dataset(args.sourcename, (num_utts,),
dtype=h5py.special_dtype(vlen=peeked_val.dtype),
maxshape=(max_utts,))
dataset.dims[0].label = 'batch'
dataset.dims.create_scale(shapes, 'shapes')
dataset.dims[0].attach_scale(shapes)
dataset.dims.create_scale(shape_labels, 'shape_labels')
dataset.dims[0].attach_scale(shape_labels)
elif isinstance(peeked_val, (str, unicode)):
dataset = h5file.create_dataset(args.sourcename, (num_utts,),
dtype=h5py.special_dtype(vlen=unicode),
maxshape=(max_utts,))
dataset.dims[0].label = 'batch'
else:
raise Exception('Can only add numpy arrays and strings')
if args.applymap is not None:
value_map_arr = numpy.fromiter(value_map.iteritems(),
dtype=[('key','S{}'.format(max(len(k) for k in value_map.keys()))),
('val','int32')])
dataset.attrs['value_map'] = value_map_arr
for utt_num, (uttid, value) in enumerate(data_iter):
value = T(value)
if dataset.shape[0]<=utt_num:
dataset.resize((utt_num+1,))
if isinstance(value, numpy.ndarray):
if shapes.shape[0]<=utt_num:
shapes.resize((utt_num+1, shapes.shape[1]))
shapes[utt_num,:] = value.shape
dataset[utt_num] = value.ravel()
else:
dataset[utt_num] = value
if has_uttids:
if uttids[utt_num] != uttid:
raise Exception("Warning, read uttid: {}, expected: {}".format(uttid, uttids[utt_num]))
else:
uttids.resize((utt_num+1,))
uttids[utt_num] = uttid
if has_uttids:
if utt_num != uttids.shape[0]-1:
raise Exception("Too few values provided: got {}, expected: {}".format(utt_num+1, uttids.shape[0]))
def read_data(args):
raise NotImplementedError()
def add_data(args):
kaldi_reader = getattr(kaldi_io, "Sequential{}Reader".format(args.type))
with kaldi_reader(args.rxfilename) as data_iter:
return add_from_iter(args, data_iter, data_iter._kaldi_value())
def add_raw_text(args):
if args.textfilename == '-':
tf = sys.stdin
else:
tf = open(args.textfilename)
try:
line_iter = iter(tf)
first_line = next(line_iter)
all_lines = (l.strip().split(None, 1) for g in ([first_line], line_iter) for l in g)
uttid, rest = first_line.strip().split(None, 1)
return add_from_iter(args, all_lines, rest)
finally:
if tf != sys.stdin:
tf.close()
def add_text(args):
if args.textfilename == '-':
tf = sys.stdin
else:
tf = open(args.textfilename)
try:
line_iter = iter(tf)
first_line = next(line_iter)
all_lines = (l.strip().split(None, 1) for g in ([first_line], line_iter) for l in g)
split_lines = ((uttid, r.split()) for (uttid,r) in all_lines)
first_line = first_line.strip().split()
return add_from_iter(args, split_lines, first_line[1:])
finally:
if tf != sys.stdin:
tf.close()
def add_sets(args):
with h5py.File(args.h5file, 'a') as h5file:
sources = []
for dataset in h5file:
if (dataset.endswith('_indices') or dataset.endswith('_shapes') or
dataset.endswith('_shape_labels')):
continue
sources.append(dataset)
uttid2idx = {uttid:idx for (idx,uttid) in enumerate(h5file['uttids']) }
split_dict = {}
for subset in args.sets:
name, uttids_fname = subset.split('=')
idxs = []
with open(uttids_fname) as uf:
for l in uf:
uttid = l.strip().split()[0]
idxs.append(uttid2idx[uttid])
indices_name = '{}_indices'.format(name)
if indices_name in h5file:
del h5file[indices_name]
#
# Note: ideally, we would sort the indeces and do:
# h5file[indices_name] = numpy.array(sorted(idxs))
# but this would cause incompatibility with Kaldi, which keeps utterances sorted by uttid!
#
h5file[indices_name] = numpy.array(idxs)
indices_ref = h5file[indices_name].ref
split_dict[name] = {source : (-1, -1, indices_ref) for source in sources}
h5file.attrs['split'] = H5PYDataset.create_split_array(split_dict)
def read_symbols(args):
with h5py.File(args.h5file, 'r') as h5file:
value_map = h5file[args.sourcename].attrs['value_map']
if args.outfilename == '-':
out_file = sys.stdout
else:
out_file=args.outfilename
value_map.sort(order=('val',))
numpy.savetxt(out_file, value_map, fmt="%s %d")
def get_indices(h5file, subset=None):
if subset is None:
return range(h5file['uttids'].shape[0])
else:
return h5file[subset + '_indices']
def read_raw_text(args):
out_file = sys.stdout
h5file = None
try:
if args.wxfilename != '-':
out_file=open(args.wxfilename, 'w')
h5file = h5py.File(args.h5file, 'r')
indices = get_indices(h5file, args.subset)
uttids = h5file['uttids']
data = h5file[args.sourcename]
for idx in indices:
out_file.write("{} {}\n".format(uttids[idx], data[idx]))
finally:
if out_file != sys.stdout:
out_file.close()
if h5file is not None:
h5file.close()
def read_text(args):
h5file = None
out_file = sys.stdout
try:
if args.wxfilename != '-':
out_file=open(args.wxfilename, 'w')
h5file = h5py.File(args.h5file, 'r')
indices = get_indices(h5file, args.subset)
uttids = h5file['uttids']
data = h5file[args.sourcename]
value_map = lambda x: x
if 'value_map' in data.attrs:
_map = dict((v,k) for k,v in data.attrs['value_map'])
value_map = lambda x: _map[x]
for idx in indices:
chars = data[idx]
chars = [value_map(c) for c in chars]
out_file.write("{} {}\n".format(uttids[idx], ' '.join(chars)))
finally:
if out_file != sys.stdout:
out_file.close()
if h5file is not None:
h5file.close()
if __name__=="__main__":
logging.basicConfig(level=logging.INFO)
parser = get_parser()
args = parser.parse_args()
args.func(args)
|
|
"""Python utilities required by Keras."""
from __future__ import absolute_import
import numpy as np
import time
import sys
import six
import marshal
import types as python_types
import inspect
_GLOBAL_CUSTOM_OBJECTS = {}
class CustomObjectScope(object):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
# Example
Consider a custom object `MyObject`
```python
with CustomObjectScope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
"""
def __init__(self, *args):
self.custom_objects = args
self.backup = None
def __enter__(self):
self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
for objects in self.custom_objects:
_GLOBAL_CUSTOM_OBJECTS.update(objects)
return self
def __exit__(self, *args, **kwargs):
_GLOBAL_CUSTOM_OBJECTS.clear()
_GLOBAL_CUSTOM_OBJECTS.update(self.backup)
def custom_object_scope(*args):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Convenience wrapper for `CustomObjectScope`.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
# Example
Consider a custom object `MyObject`
```python
with custom_object_scope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
# Arguments
*args: Variable length list of dictionaries of name,
class pairs to add to custom objects.
# Returns
Object of type `CustomObjectScope`.
"""
return CustomObjectScope(*args)
def get_custom_objects():
"""Retrieves a live reference to the global dictionary of custom objects.
Updating and clearing custom objects using `custom_object_scope`
is preferred, but `get_custom_objects` can
be used to directly access `_GLOBAL_CUSTOM_OBJECTS`.
# Example
```python
get_custom_objects().clear()
get_custom_objects()['MyObject'] = MyObject
```
# Returns
Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).
"""
return _GLOBAL_CUSTOM_OBJECTS
def serialize_keras_object(instance):
if instance is None:
return None
if hasattr(instance, 'get_config'):
return {
'class_name': instance.__class__.__name__,
'config': instance.get_config()
}
if hasattr(instance, '__name__'):
return instance.__name__
else:
raise ValueError('Cannot serialize', instance)
def deserialize_keras_object(identifier, module_objects=None,
custom_objects=None,
printable_module_name='object'):
if isinstance(identifier, dict):
# In this case we are dealing with a Keras config dictionary.
config = identifier
if 'class_name' not in config or 'config' not in config:
raise ValueError('Improper config format: ' + str(config))
class_name = config['class_name']
if custom_objects and class_name in custom_objects:
cls = custom_objects[class_name]
elif class_name in _GLOBAL_CUSTOM_OBJECTS:
cls = _GLOBAL_CUSTOM_OBJECTS[class_name]
else:
module_objects = module_objects or {}
cls = module_objects.get(class_name)
if cls is None:
raise ValueError('Unknown ' + printable_module_name +
': ' + class_name)
if hasattr(cls, 'from_config'):
arg_spec = inspect.getargspec(cls.from_config)
if 'custom_objects' in arg_spec.args:
custom_objects = custom_objects or {}
return cls.from_config(config['config'],
custom_objects=dict(list(_GLOBAL_CUSTOM_OBJECTS.items()) +
list(custom_objects.items())))
return cls.from_config(config['config'])
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
return cls(**config['config'])
elif isinstance(identifier, six.string_types):
function_name = identifier
if custom_objects and function_name in custom_objects:
fn = custom_objects.get(function_name)
elif function_name in _GLOBAL_CUSTOM_OBJECTS:
fn = _GLOBAL_CUSTOM_OBJECTS[function_name]
else:
fn = module_objects.get(function_name)
if fn is None:
raise ValueError('Unknown ' + printable_module_name +
':' + function_name)
return fn
else:
raise ValueError('Could not interpret serialized ' +
printable_module_name + ': ' + identifier)
def func_dump(func):
"""Serializes a user defined function.
# Arguments
func: the function to serialize.
# Returns
A tuple `(code, defaults, closure)`.
"""
code = marshal.dumps(func.__code__).decode('raw_unicode_escape')
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
# Arguments
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
# Returns
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
code = marshal.loads(code.encode('raw_unicode_escape'))
if globs is None:
globs = globals()
return python_types.FunctionType(code, globs,
name=code.co_name,
argdefs=defaults,
closure=closure)
class Progbar(object):
"""Displays a progress bar.
# Arguments
target: Total number of steps expected.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=30, verbose=1, interval=0.05):
self.width = width
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.last_update = 0
self.interval = interval
self.total_width = 0
self.seen_so_far = 0
self.verbose = verbose
def update(self, current, values=None, force=False):
"""Updates the progress bar.
# Arguments
current: Index of current step.
values: List of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
force: Whether to force visual progress update.
"""
values = values or []
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (current - self.seen_so_far),
current - self.seen_so_far]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
self.seen_so_far = current
now = time.time()
if self.verbose == 1:
if not force and (now - self.last_update) < self.interval:
return
prev_total_width = self.total_width
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)
bar = barstr % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit * (self.target - current)
info = ''
if current < self.target:
info += ' - ETA: %ds' % eta
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s:' % k
if isinstance(self.sum_values[k], list):
avg = self.sum_values[k][0] / max(1, self.sum_values[k][1])
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self.sum_values[k]
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width - self.total_width) * ' ')
sys.stdout.write(info)
sys.stdout.flush()
if current >= self.target:
sys.stdout.write('\n')
if self.verbose == 2:
if current >= self.target:
info = '%ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s:' % k
avg = self.sum_values[k][0] / max(1, self.sum_values[k][1])
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
sys.stdout.write(info + "\n")
self.last_update = now
def add(self, n, values=None):
self.update(self.seen_so_far + n, values)
|
|
""" Basic Clousmesh finctions.
.. role:: strikethrough
This file contains some very basic utility functions that must not
need any import from cloudmesh. That is no statement such as
* :strikethrough:`import cloudmesh`
must occur in the list of import. If functions are needed hat need to
be used they must be decleard without dependencies in::
import cloudmesh.util
The reasonong is that during th einitialization where cloudmesh is not
yet installed, the __init__ function for cloudmesh may include some
configuration files that are not yet present at the tome of the first
instalation.
"""
from __future__ import print_function
from cloudmesh_base.util import path_expand
import inspect
import os
import sys
import uuid
import functools
# import warnings
import string
import random
from cloudmesh_base.util import banner
from cloudmesh_base.util import grep
from cloudmesh_base.locations import config_file
try:
from progress.bar import Bar
except:
try:
os.system("pip install progress")
from progress.bar import Bar
except Exception, e:
print("ERROR: can not install progress")
print(e)
print(70 * "=")
print("please make sure that a virtualenv and pip are installed")
sys.exit()
class PROGRESS(object):
defined = False
bar = None
@classmethod
def set(cls, msg, limit):
if not cls.defined:
cls.bar = Bar(msg, max=limit)
cls.defined = True
@classmethod
def next(cls):
cls.bar.next()
@classmethod
def finish(cls):
cls.bar.finish()
@classmethod
def __del__(cls):
cls.bar.finish()
def cat(filename):
"""prints the contents of a file with the given name.
:param filename: name of the file, which can include ~ and $
environment variables
:type: string
"""
location = path_expand(filename)
banner(filename)
with open(location, 'r') as f:
print(f.read())
def not_implemented():
"""prins simply an error that this is not implemented. This can be
used when you protortype things."""
print("ERROR: not yet implemented")
def check_file_for_tabs(filename, verbose=True):
"""identifies if the file contains tabs and returns True if it
does. It also prints the location of the lines and columns. If
verbose is set to False, the location is not printed.
:param filename: the filename
:rtype: True if there are tabs in the file
"""
file_contains_tabs = False
with file(filename) as f:
lines = f.read().split("\n")
line_no = 1
for line in lines:
if "\t" in line:
file_contains_tabs = True
location = [
i for i in range(len(line)) if line.startswith('\t', i)]
if verbose:
print("Tab found in line", line_no, "and column(s)", location)
line_no = line_no + 1
return file_contains_tabs
def deprecated(func):
'''This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used. Just use @deprecated before
the definition.::
@deprecated
def my_func():
pass
@other_decorators_must_be_before
@deprecated
def my_func():
pass
'''
@functools.wraps(func)
def new_func(*args, **kwargs):
'''
warnings.warn_explicit(
"Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning,
filename=func.func_code.co_filename,
lineno=func.func_code.co_firstlineno + 1
)
'''
print()
print(70 * "-")
print("Warning: Call to deprecated function {}.".format(func.__name__))
print(" filename=", func.func_code.co_filename)
print(" lineno=", func.func_code.co_firstlineno + 1)
print(70 * "-")
return func(*args, **kwargs)
return new_func
def cond_decorator(flag, dec):
"""conditional decorator that is used if the flag is true.
:param flag: the boolean flag
:type flag: boolean
"""
def decorate(fn):
"""the internal decorator"""
return dec(fn) if flag else fn
return decorate
def status_color(status):
"""returns some predefined color values.
* ACTIVE ::= green
* BUILDING ::= blue
* ERROR ::= red
* default ::= black
:param status: 'ACTIVE', 'BUILDING', 'ERROR'
:rtype: string
"""
if status == 'ACTIVE':
return "green"
if status == 'BUILDING':
return "blue"
if status in ['ERROR']:
return "red"
return "black"
''' ref:
http://stackoverflow.com/questions/2257441/python-random-string-generation-with-upper-case-letters-and-digits
'''
def get_rand_string(size=6, chars=string.ascii_uppercase + string.digits):
"""generates a random string.
:param size: length of the string
:param chars: string of charaters to chese form, by default a-zA-Z0-9
"""
return ''.join(random.choice(chars) for _ in range(size))
def get_unique_name(prefix="", **kargs):
"""Make a UUID without some characters such as '-', '_', ' ', '.'
:param prefix: a prefix added to the UUID
:param **kargs: keyword arguments for additional options
"""
change = ['-', '_', ' ', '.']
id = uuid.uuid1()
text = str(id).replace("-", "")
if 'change' in kargs:
change = kargs['change']
for character in change:
if character in prefix:
prefix = prefix.replace(character, "")
return str(prefix) + text
def address_string(content, labels=False):
"""content is a dict of the form::
{u'private': [{u'version': 4,
u'addr': u'10.35.23.30',
u'OS-EXT-IPS:kind':u'fixed'},
{u'version': 4,
u'addr': u'198.202.120.194',
u'OS-EXT-IPS:kind': u'floating'}]}
it will return::
"fixed: 10.35.23.30, floating: 198.202.120.194'
The 'private' could be any other string indicating the vlan.
E.g., HP_east cloud might return result which is not 'private'.
Not necessarilly vlan102 either.
For now we will assume an arbitry string exists as the name for vlan.
"""
try:
result = ""
vlan = content.keys()[0]
for address in content[vlan]:
if labels:
# 2/11/15 By Mark X. change "kind" which seems wrong to "type"
# address['OS-EXT-IPS:kind'] => address['OS-EXT-IPS:type']
result = result + address['OS-EXT-IPS:type'] + "="
result = result + address['addr']
result = result + ", "
result = result[:-2]
except:
# THIS SEEMS WRONG
# {u'vlan102': [{u'version': 4, u'addr': u'10.1.2.104'}, {
# u'version': 4, u'addr': u'149.165.158.34'}]}
# try:
# position = 0
# for address in content['vlan102']:
# if position == 0:
# kind = "fixed"
# else:
# kind = "floating"
# if labels:
# result = result + kind
# result = result + address['addr']
# result = result + ", "
# position = +1
# result = result[:-2]
# except:
result = content
return result
def dict_uni_to_ascii(d):
'''
convert mongodb document content from unicode to ascii
'''
if isinstance(d, dict):
d1 = {}
for k, v in d.iteritems():
k = k.encode("ascii")
if isinstance(v, dict):
v = dict_uni_to_ascii(v)
elif type(v) is list and v != []:
v1 = []
for item in v:
v1.append(dict_uni_to_ascii(item))
v = v1
elif isinstance(v, unicode):
v = v.encode("ascii")
d1[k] = v
return d1
elif type(d) is list:
if d == []:
return []
else:
d1 = []
for item in d:
d1.append(dict_uni_to_ascii(item))
return d1
else:
try:
return d.encode("ascii")
except:
return d
def _getFromDict(dataDict, mapList):
# ref:
# http://stackoverflow.com/questions/14692690/access-python-nested-dictionary-items-via-a-list-of-keys
return reduce(lambda d, k: d[k], mapList, dataDict)
|
|
"""
A command-line interface for Ravel.
Ravel's CLI provides a user-friendly way to interact the backend
PostgreSQL database and Mininet.
"""
import cmd
import getpass
import os
import sys
import time
from functools import partial
import ravel.mndeps
import ravel.profiling
from ravel.db import RavelDb, BASE_SQL
from ravel.env import Environment
from ravel.log import logger
from ravel.of import PoxInstance
from ravel.util import Config, resource_file
from ravel.cmdlog import cmdLogger
class RavelConsole(cmd.Cmd):
"Command line interface for Ravel."
prompt = "ravel> "
doc_header = "Commands (type help <topic>):"
def __init__(self, env, quiet=False):
self.env = env
if not quiet:
self.intro = "RavelConsole: interactive console for Ravel.\n" \
"Configuration:\n" + self.env.pprint()
cmd.Cmd.__init__(self)
self.env.set_cli(self)
self.logOn = False
def default(self, line):
"Check loaded applications before raising unknown command error"
# should we execute a script?
if os.path.isfile(line):
self.do_exec(line)
return
if "orch" in self.env.loaded:
auto_orch = self.env.loaded["orch"].console.auto
cmd = line.strip().split()[0]
if cmd in self.env.loaded:
self.env.loaded[cmd].cmd(line[len(cmd):])
if auto_orch:
self.env.loaded["orch"].console.onecmd("run")
else:
print "*** Unknown command:", line
def onecmd(self, line):
"Run command and report execution time for each execution line"
if line:
if self.logOn:
startTime = time.time()
stop = cmd.Cmd.onecmd(self, line)
endTime = time.time()
elapsed = round((endTime - startTime)*1000, 3)
cmdLogger.logline('cmd: '+line)
logger.info("Execution time: {0}ms".format(elapsed))
cmdLogger.logline('start time: {0}'.format(time.asctime(time.localtime(startTime))))
cmdLogger.logline('time span: {0}ms'.format(elapsed))
return stop
else:
return cmd.Cmd.onecmd(self, line)
def emptyline(self):
"Don't repeat the last line when hitting return on empty line"
return
def do_load(self, line):
"""Start one or more applications
Usage: load [app1] [app2] ..."""
apps = line.split()
for app in apps:
if app in self.env.apps:
self.env.load_app(app)
else:
print "Unknown application", app
def do_unload(self, line):
"""Stop one or more applications
Usage: unload [app1] [app2] ..."""
apps = line.split()
for app in apps:
if app in self.env.apps:
self.env.unload_app(app)
else:
print "Unknown application", app
def do_exec(self, line):
"Execute a Ravel script"
if os.path.isdir(line):
print "ravel: {0}: Is a directory".format(line)
return
if not os.path.isfile(line):
print "ravel: {0}: No such file or directory".format(line)
return
with open(line) as f:
for cmd in f.readlines():
cmd = cmd.strip()
if cmd == "" or cmd[0] == "#":
continue
print "{0}{1}".format(RavelConsole.prompt, cmd)
self.onecmd(cmd)
# may need to wait for flows/database changes
time.sleep(0.5)
def do_cmdlogger(self, line):
if str(line).lower() == 'on':
self.logOn = True
logger.info('Cmd logger on.')
elif str(line).lower() == 'off':
self.logOn = False
logger.info('Cmd logger off.')
else:
logger.info("Input 'on' to turn on cmd logger and 'off' to turn it off.")
def do_apps(self, line):
"List available applications and their status"
for app in self.env.apps.values():
shortcut = ""
description = ""
status = "\033[91m[offline]\033[0m"
if app.name in self.env.loaded:
status = "\033[92m[online]\033[0m"
if app.shortcut:
shortcut = " ({0})".format(app.shortcut)
if app.description:
description = ": {0}".format(app.description)
print " {0} {1}{2}{3}".format(status, app.name,
shortcut, description)
def do_profile(self, line):
"""Run command and report detailed execution time.
Note - if no counters are found, try enabling auto-orchestration
with orch auto on"""
if line:
pe = ravel.profiling.ProfiledExecution()
pe.start()
self.onecmd(line)
# wait for straggling counters to report
time.sleep(0.5)
pe.stop()
sys.stdout.write("\n")
pe.print_summary()
def do_reinit(self, line):
"Reinitialize the database, deleting all data except topology"
self.env.db.truncate()
def do_stat(self, line):
"Show running configuration, state"
print self.env.pprint()
def do_time(self, line):
"Run command and report execution time"
elapsed = time.time()
if line:
self.onecmd(line)
elapsed = time.time() - elapsed
print "\nTime: {0}ms".format(round(elapsed * 1000, 3))
def do_watch(self, line):
"""Launch an xterm window to watch database tables in real-time
Usage: watch [table1(,max_rows)] [table2(,max_rows)] ...
Example: watch hosts switches cf,5"""
if not line:
return
args = line.split()
if len(args) == 0:
print "Invalid syntax"
return
cmd, cmdfile = ravel.app.mk_watchcmd(self.env.db, args)
self.env.mkterm(cmd, cmdfile)
def do_EOF(self, line):
"Quit Ravel console"
sys.stdout.write("\n")
return True
def do_exit(self, line):
"Quit Ravel console"
return True
def do_help(self, arg):
"List available commands with 'help' or detailed help with 'help cmd'"
# extend to include loaded apps and their help methods
tokens = arg.split()
if len(tokens) > 0 and tokens[0] in self.env.loaded:
app = self.env.apps[tokens[0]]
if len(tokens) <= 1:
print app.description
app.console.do_help("")
else:
app.console.do_help(" ".join(tokens[1:]))
else:
cmd.Cmd.do_help(self, arg)
def completenames(self, text, *ignored):
"Add loaded application names/shortcuts to cmd name completions"
completions = cmd.Cmd.completenames(self, text, ignored)
apps = self.env.loaded.keys()
if not text:
completions.extend(apps)
else:
completions.extend([d for d in apps if d.startswith(text)])
return completions
def RavelCLI(opts):
"""Start a RavelConsole instance given a list of command line options
opts: parsed OptionParser object"""
if opts.custom:
ravel.mndeps.custom(opts.custom)
if opts.topo:
topo = ravel.mndeps.build(opts.topo)
if topo is None:
print "Invalid mininet topology", opts.topo
return
else:
topo = ravel.mndeps.build("empty")
if opts.script is not None and not os.path.isfile(opts.script):
print "{0}: no such script file".format(opts.script)
return
passwd = None
if opts.password:
passwd = getpass.getpass("Enter password: ")
raveldb = ravel.db.RavelDb(opts.db,
opts.user,
ravel.db.BASE_SQL,
passwd,
opts.reconnect)
if opts.noctl:
controller = None
else:
if PoxInstance.is_running():
print "Pox instance is already running. Please shut down " \
"existing controller first (or run ravel.py --clean)."
return
controller = PoxInstance("ravel.controller.poxmgr")
from ravel.network import MininetProvider, EmptyNetProvider
if opts.onlydb:
net = EmptyNetProvider(raveldb, topo)
else:
net = MininetProvider(raveldb, topo, controller)
if net is None:
print "Cannot start network"
env = Environment(raveldb, net, Config.AppDirs, opts)
env.start()
while True:
try:
if opts.script is not None:
RavelConsole(env).do_exec(opts.script)
if opts.exit:
break
RavelConsole(env, quiet=opts.script).cmdloop()
break
except Exception, e:
logger.warning("console crashed: %s", e)
env.stop()
|
|
import os
import sys
import errno
import itertools
import logging
import stat
import threading
from fuse import FuseOSError, Operations
from . import exceptions, utils
from .keys import Key
from .logs import Log
from .views import View
logger = logging.getLogger('basefs.fs')
class ViewToErrno():
def __enter__(self):
return self
def __exit__(self, exc_type, exc, exc_tb):
if exc_type is exceptions.PermissionDenied:
raise FuseOSError(errno.EACCES)
if exc_type is exceptions.DoesNotExist:
raise FuseOSError(errno.ENOENT)
if exc_type is exceptions.Exists:
raise FuseOSError(errno.EEXIST)
class FileSystem(Operations):
def __init__(self, view, serf=None, serf_agent=None, init_function=None):
self.view = view
self.cache = {}
self.dirty = {}
self.loaded = view.log.loaded
self.init_function = init_function
self.serf = serf
self.serf_agent = serf_agent
def __call__(self, op, path, *args):
logger.debug('-> %s %s %s', op, path, repr(args))
ret = '[Unhandled Exception]'
try:
ret = getattr(self, op)(path, *args)
return ret
except OSError as e:
ret = str(e)
raise
finally:
logger.debug('<- %s %s', op, repr(ret))
def init(self, path):
""" threads should start here, otherwise will not run when fuse is backgrounded """
if self.init_function:
self.init_function()
def destroy(self, path):
super().destroy(path)
if self.serf_agent:
self.serf_agent.stop()
def get_node(self, path):
# check if logfile has been modified
if self.loaded != self.view.log.loaded:
logger.debug('-> %s rebuild', path)
self.view.build()
self.loaded = self.view.log.loaded
with ViewToErrno():
node = self.view.get(path)
if node.entry.action == node.entry.DELETE:
raise FuseOSError(errno.ENOENT)
return node
def send(self, node):
if self.serf:
entry = node.entry
logger.debug("Sending entry %s '%s'", entry.hash, entry.name)
self.serf.send(node.entry)
# def access(self, path, mode):
# return super(FileSystem, self).access(path, mode)
# full_path = self._full_path(path)
# if not os.access(full_path, mode):
# raise FuseOSError(errno.EACCES)
# def chmod(self, path, mode):
# full_path = self._full_path(path)
# return os.chmod(full_path, mode)
# def chown(self, path, uid, gid):
# full_path = self._full_path(path)
# return os.chown(full_path, uid, gid)
def getattr(self, path, fh=None):
try:
content = self.cache[path]
except KeyError:
node = self.get_node(path)
has_perm = bool(self.view.get_key(path))
if node.entry.action == node.entry.MKDIR:
mode = stat.S_IFDIR | (0o0750 if has_perm else 0o0550)
else:
mode = stat.S_IFREG | (0o0640 if has_perm else 0o0440)
return {
'st_atime': node.entry.timestamp,
'st_ctime': node.entry.ctime,
'st_gid': os.getgid(),
'st_mode': mode,
'st_mtime': node.entry.timestamp,
'st_nlink': 1,
'st_size': len(node.content),
'st_uid': os.getuid(),
}
else:
import time
return {
'st_atime': time.time(),
'st_ctime': time.time(),
'st_gid': os.getgid(),
'st_mode': stat.S_IFREG | 0o0640,
'st_mtime': time.time(),
'st_nlink': 1,
'st_size': len(content),
'st_uid': os.getuid(),
}
# full_path = self._full_path(path)
# st = os.lstat(full_path)
# return dict((key, getattr(st, key)) for key in ())
def readdir(self, path, fh):
node = self.get_node(path)
entry = node.entry
dirs = ['.', '..']
for d in itertools.chain(dirs, [child.entry.name for child in node.childs if child.entry.action not in (entry.DELETE, entry.GRANT, entry.REVOKE)]):
yield d
# def readlink(self, path):
# pathname = os.readlink(self._full_path(path))
# if pathname.startswith("/"):
# # Path name is absolute, sanitize it.
# return os.path.relpath(pathname, self.root)
# else:
# return pathname
def mknod(self, path, mode, dev):
raise NotImplementedError
def rmdir(self, path):
with ViewToErrno():
node = self.view.delete(path)
self.send(node)
def mkdir(self, path, mode):
with ViewToErrno():
node = self.view.mkdir(path)
self.send(node)
return 0
# def statfs(self, path):
# full_path = self._full_path(path)
# stv = os.statvfs(full_path)
# return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree',
# 'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag',
# 'f_frsize', 'f_namemax'))
def unlink(self, path):
with ViewToErrno():
node = self.view.delete(path)
self.send(node)
# return os.unlink(self._full_path(path))
# def symlink(self, name, target):
# return os.symlink(name, self._full_path(target))
def rename(self, old, new):
raise NotImplementedError
# def link(self, target, name):
# return os.link(self._full_path(target), self._full_path(name))
# def utimens(self, path, times=None):
# return os.utime(self._full_path(path), times)
# # File methods
# # ============
def open(self, path, flags):
node = self.get_node(path)
id = int(node.entry.hash, 16)
if path not in self.cache:
self.cache[path] = node.content
self.dirty[path] = False
return id
def create(self, path, mode, fi=None):
self.cache[path] = b''
self.dirty[path] = True
return id(path)
def read(self, path, length, offset, fh):
try:
content = self.cache[path]
except KeyError:
node = self.get_node(path)
content = node.content
return content[offset:offset+length]
def write(self, path, buf, offset, fh):
# TODO check write perissions
try:
content = self.cache[path]
except KeyError:
node = self.get_node(path)
content = node.content
size = len(buf)
new_content = content[:offset] + buf + content[offset+size:]
if content != new_content:
self.dirty[path] = True
self.cache[path] = new_content
return size
def truncate(self, path, length, fh=None):
self.cache[path] = self.cache[path][:length]
self.dirty[path] = True
# def flush(self, path, fh):
# # TODO Filesystems shouldn't assume that flush will always be called after some writes, or that if will be called at all.
# content = self.cache.pop(path, None)
# dirty = self.dirty.pop(path, False)
# if content is not None and dirty:
# print('write')
# node = self.view.write(path, content)
## self.send(node)
def release(self, path, fh):
content = self.cache.pop(path, None)
dirty = self.dirty.pop(path, False)
if content is not None and dirty:
# TODO raise permission denied should happen in write() create().... not here
with ViewToErrno():
node = self.view.write(path, content)
self.send(node)
# def fsync(self, path, fdatasync, fh):
# return self.flush(path, fh)
# return None
|
|
# Copyright 2013 Rackspace Hosting Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Quark Pluggable IPAM
"""
import functools
import itertools
import random
import time
import uuid
import netaddr
from neutron.common import exceptions as n_exc_ext
from neutron_lib import exceptions as n_exc
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_db import exception as db_exception
from oslo_log import log as logging
from oslo_utils import timeutils
from quark import billing
from quark.db import api as db_api
from quark.db import ip_types
from quark.db import models
from quark.drivers import floating_ip_registry as registry
from quark import exceptions as q_exc
from quark import network_strategy
from quark import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
STRATEGY = network_strategy.STRATEGY
quark_opts = [
cfg.IntOpt('v6_allocation_attempts',
default=10,
help=_('Number of times to retry generating v6 addresses'
' before failure. Also implicitly controls how many'
' v6 addresses we assign to any port, as the random'
' values generated will be the same every time.')),
cfg.IntOpt("mac_address_retry_max",
default=20,
help=_("Number of times to attempt to allocate a new MAC"
" address before giving up.")),
cfg.IntOpt("ip_address_retry_max",
default=20,
help=_("Number of times to attempt to allocate a new IP"
" address before giving up.")),
cfg.BoolOpt("ipam_use_synchronization",
default=False,
help=_("Configures whether or not to use the experimental"
" semaphore logic around IPAM")),
cfg.BoolOpt("ipam_select_subnet_v6_locking",
default=True,
help=_("Controls whether or not SELECT ... FOR UPDATE is used"
" when retrieving v6 subnets explicitly."))
]
CONF.register_opts(quark_opts, "QUARK")
# NOTE(mdietz): equivalent to the following line, but converting
# v6 addresses in netaddr is very slow.
# netaddr.IPAddress("::0200:0:0:0").value
MAGIC_INT = 144115188075855872
def no_synchronization(*args, **kwargs):
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
return f(*args, **kwargs)
return inner
return wrap
def named(sema):
return "%s.%s" % (__name__, sema)
if CONF.QUARK.ipam_use_synchronization:
synchronized = lockutils.synchronized
else:
synchronized = no_synchronization
def rfc2462_ip(mac, cidr):
# NOTE(mdietz): see RFC2462
int_val = netaddr.IPNetwork(cidr).value
mac = netaddr.EUI(mac)
LOG.info("Using RFC2462 method to generate a v6 with MAC %s" % mac)
int_val += mac.eui64().value
int_val ^= MAGIC_INT
return int_val
def rfc3041_ip(port_id, cidr):
if not port_id:
random_stuff = uuid.uuid4()
else:
random_stuff = uuid.UUID(port_id)
random.seed(int(random_stuff))
int_val = netaddr.IPNetwork(cidr).value
while True:
rand_bits = random.getrandbits(64)
LOG.info("Using RFC3041 method to generate a v6 with bits %s" %
rand_bits)
val = int_val + rand_bits
val ^= MAGIC_INT
yield val
def ip_address_failure(network_id):
if STRATEGY.is_provider_network(network_id):
return q_exc.ProviderNetworkOutOfIps(net_id=network_id)
else:
return n_exc.IpAddressGenerationFailure(net_id=network_id)
def generate_v6(mac, port_id, cidr):
# NOTE(mdietz): RM10879 - if we don't have a MAC, don't panic, defer to
# our magic rfc3041_ip method instead. If an IP is created
# by the ip_addresses controller, we wouldn't necessarily
# have a MAC to base our generator on in that case for
# example.
if mac is not None:
addr = rfc2462_ip(mac, cidr)
yield addr
for addr in rfc3041_ip(port_id, cidr):
yield addr
def ipam_logged(fx):
def wrap(self, *args, **kwargs):
log = QuarkIPAMLog()
kwargs['ipam_log'] = log
try:
return fx(self, *args, **kwargs)
finally:
log.end()
return wrap
class QuarkIPAMLog(object):
def __init__(self):
self.entries = {}
self.success = True
def make_entry(self, fx_name):
if fx_name not in self.entries:
self.entries[fx_name] = []
entry = QuarkIPAMLogEntry(self, fx_name)
self.entries[fx_name].append(entry)
return entry
def _output(self, status, time_total, fails, successes):
statistics = ("TIME:%f ATTEMPTS:%d PASS:%d FAIL:%d" %
(time_total, fails + successes, successes, fails))
if not self.success:
LOG.warning("STATUS:FAILED %s" % statistics)
else:
LOG.debug("STATUS:SUCCESS %s" % statistics)
def end(self):
total = 0
fails = 0
successes = 0
for fx, entries in self.entries.items():
for entry in entries:
total += entry.get_time()
if entry.success:
successes += 1
else:
fails += 1
self._output(self.success, total, fails, successes)
def failed(self):
self.success = False
class QuarkIPAMLogEntry(object):
def __init__(self, log, name):
self.name = name
self.log = log
self.start_time = time.time()
self.success = True
def failed(self):
self.success = False
def end(self):
self.end_time = time.time()
def get_time(self):
if not hasattr(self, 'end_time'):
return 0
return self.end_time - self.start_time
class QuarkIpam(object):
@synchronized(named("allocate_mac_address"))
def allocate_mac_address(self, context, net_id, port_id, reuse_after,
mac_address=None,
use_forbidden_mac_range=False, **kwargs):
if mac_address:
mac_address = netaddr.EUI(mac_address).value
kwargs.update({"network_id": net_id, "port_id": port_id,
"mac_address": mac_address,
"use_forbidden_mac_range": use_forbidden_mac_range})
LOG.info(("Attempting to allocate a new MAC address "
"[{0}]").format(utils.pretty_kwargs(**kwargs)))
for retry in xrange(CONF.QUARK.mac_address_retry_max):
LOG.info("Attemping to reallocate deallocated MAC (step 1 of 3),"
" attempt {0} of {1}".format(
retry + 1, CONF.QUARK.mac_address_retry_max))
try:
with context.session.begin():
transaction = db_api.transaction_create(context)
update_kwargs = {
"deallocated": False,
"deallocated_at": None,
"transaction_id": transaction.id
}
filter_kwargs = {
"deallocated": True,
}
if mac_address is not None:
filter_kwargs["address"] = mac_address
if reuse_after is not None:
filter_kwargs["reuse_after"] = reuse_after
elevated = context.elevated()
result = db_api.mac_address_reallocate(
elevated, update_kwargs, **filter_kwargs)
if not result:
break
reallocated_mac = db_api.mac_address_reallocate_find(
elevated, transaction.id)
if reallocated_mac:
dealloc = netaddr.EUI(reallocated_mac["address"])
LOG.info("Found a suitable deallocated MAC {0}".format(
str(dealloc)))
LOG.info("MAC assignment for port ID {0} completed "
"with address {1}".format(port_id, dealloc))
return reallocated_mac
except Exception:
LOG.exception("Error in mac reallocate...")
continue
LOG.info("Couldn't find a suitable deallocated MAC, attempting "
"to create a new one")
# This could fail if a large chunk of MACs were chosen explicitly,
# but under concurrent load enough MAC creates should iterate without
# any given thread exhausting its retry count.
for retry in xrange(CONF.QUARK.mac_address_retry_max):
LOG.info("Attemping to find a range to create a new MAC in "
"(step 2 of 3), attempt {0} of {1}".format(
retry + 1, CONF.QUARK.mac_address_retry_max))
next_address = None
with context.session.begin():
try:
fn = db_api.mac_address_range_find_allocation_counts
mac_range = \
fn(context, address=mac_address,
use_forbidden_mac_range=use_forbidden_mac_range)
if not mac_range:
LOG.info("No MAC ranges could be found given "
"the criteria")
break
rng, addr_count = mac_range
LOG.info("Found a MAC range {0}".format(rng["cidr"]))
last = rng["last_address"]
first = rng["first_address"]
if (last - first + 1) <= addr_count:
# Somehow, the range got filled up without us
# knowing, so set the next_auto_assign to be -1
# so we never try to create new ones
# in this range
db_api.mac_range_update_set_full(context, rng)
LOG.info("MAC range {0} is full".format(rng["cidr"]))
continue
if mac_address:
next_address = mac_address
else:
next_address = rng["next_auto_assign_mac"]
if next_address + 1 > rng["last_address"]:
db_api.mac_range_update_set_full(context, rng)
else:
db_api.mac_range_update_next_auto_assign_mac(
context, rng)
context.session.refresh(rng)
except Exception:
LOG.exception("Error in updating mac range")
continue
# Based on the above, this should only fail if a MAC was
# was explicitly chosen at some point. As such, fall through
# here and get in line for a new MAC address to try
try:
mac_readable = str(netaddr.EUI(next_address))
LOG.info("Attempting to create new MAC {0} "
"(step 3 of 3)".format(mac_readable))
with context.session.begin():
address = db_api.mac_address_create(
context, address=next_address,
mac_address_range_id=rng["id"])
LOG.info("MAC assignment for port ID {0} completed with "
"address {1}".format(port_id, mac_readable))
return address
except Exception:
LOG.info("Failed to create new MAC {0}".format(mac_readable))
LOG.exception("Error in creating mac. MAC possibly duplicate")
continue
raise n_exc_ext.MacAddressGenerationFailure(net_id=net_id)
@synchronized(named("reallocate_ip"))
def attempt_to_reallocate_ip(self, context, net_id, port_id, reuse_after,
version=None, ip_address=None,
segment_id=None, subnets=None, **kwargs):
version = version or [4, 6]
elevated = context.elevated()
LOG.info("Attempting to reallocate an IP (step 1 of 3) - [{0}]".format(
utils.pretty_kwargs(network_id=net_id, port_id=port_id,
version=version, segment_id=segment_id,
subnets=subnets, ip_address=ip_address)))
if version == 6:
# Defers to the create case. The reason why is we'd have to look
# up subnets here to correctly generate the v6. If we split them
# up into reallocate and create, we'd be looking up the same
# subnets twice, which is a waste of time.
# TODO(mdietz): after reviewing this code, this block annoyingly
# doesn't trigger in the ANY case, since we end up
# using a list of [4, 6]. It works as expected most
# of the time, but we can anticipate that isolated
# networks will end up using sequential assignment.
# Probably want to rework this logic to compensate
# at some point. Considering they all come from the
# same MAC address pool, nothing bad will happen,
# just worth noticing and fixing.
LOG.info("Identified as v6 case, deferring to IP create path")
return []
sub_ids = []
if subnets:
sub_ids = subnets
elif segment_id:
subnets = db_api.subnet_find(elevated,
network_id=net_id,
segment_id=segment_id)
sub_ids = [s["id"] for s in subnets]
if not sub_ids:
LOG.info("No subnets matching segment_id {0} could be "
"found".format(segment_id))
raise ip_address_failure(net_id)
ip_kwargs = {
"network_id": net_id,
"deallocated": True,
"version": version,
"lock_id": None,
}
if reuse_after is not None:
ip_kwargs["reuse_after"] = reuse_after
if ip_address is not None:
ip_kwargs["ip_address"] = ip_address
del ip_kwargs["deallocated"]
if sub_ids:
ip_kwargs["subnet_id"] = sub_ids
ipam_log = kwargs.get('ipam_log', None)
for retry in xrange(CONF.QUARK.ip_address_retry_max):
attempt = None
if ipam_log:
attempt = ipam_log.make_entry("attempt_to_reallocate_ip")
LOG.info("Attempt {0} of {1}".format(
retry + 1, CONF.QUARK.ip_address_retry_max))
try:
with context.session.begin():
transaction = db_api.transaction_create(context)
m = models.IPAddress
update_kwargs = {
m.transaction_id: transaction.id,
m.address_type: kwargs.get("address_type", ip_types.FIXED),
m.deallocated: False,
m.deallocated_at: None,
m.used_by_tenant_id: context.tenant_id,
m.allocated_at: timeutils.utcnow(),
}
result = db_api.ip_address_reallocate(
elevated, update_kwargs, **ip_kwargs)
if not result:
LOG.info("Couldn't update any reallocatable addresses "
"given the criteria")
if attempt:
attempt.failed()
break
updated_address = db_api.ip_address_reallocate_find(
elevated, transaction.id)
if not updated_address:
if attempt:
attempt.failed()
continue
LOG.info("Address {0} is reallocated".format(
updated_address["address_readable"]))
return [updated_address]
except Exception:
if attempt:
attempt.failed()
LOG.exception("Error in reallocate ip...")
finally:
if attempt:
attempt.end()
return []
def is_strategy_satisfied(self, ip_addresses, allocate_complete=False):
return ip_addresses
def _allocate_from_subnet(self, context, net_id, subnet,
port_id, reuse_after, ip_address=None, **kwargs):
LOG.info("Creating a new address in subnet {0} - [{1}]".format(
subnet["_cidr"], utils.pretty_kwargs(network_id=net_id,
subnet=subnet,
port_id=port_id,
ip_address=ip_address)))
if subnet and subnet["ip_policy"]:
ip_policy_cidrs = subnet["ip_policy"].get_cidrs_ip_set()
else:
ip_policy_cidrs = netaddr.IPSet([])
next_ip = ip_address
if not next_ip:
if subnet["next_auto_assign_ip"] != -1:
next_ip = netaddr.IPAddress(subnet["next_auto_assign_ip"] - 1)
else:
next_ip = netaddr.IPAddress(subnet["last_ip"])
if subnet["ip_version"] == 4:
next_ip = next_ip.ipv4()
LOG.info("Next IP is {0}".format(str(next_ip)))
if ip_policy_cidrs and next_ip in ip_policy_cidrs and not ip_address:
LOG.info("Next IP {0} violates policy".format(str(next_ip)))
raise q_exc.IPAddressPolicyRetryableFailure(ip_addr=next_ip,
net_id=net_id)
try:
with context.session.begin():
address = db_api.ip_address_create(
context, address=next_ip, subnet_id=subnet["id"],
deallocated=0, version=subnet["ip_version"],
network_id=net_id,
port_id=port_id,
address_type=kwargs.get('address_type', ip_types.FIXED))
address["deallocated"] = 0
# alexm: instead of notifying billing from here we notify from
# allocate_ip_address() when it's clear that the IP
# allocation was successful
except db_exception.DBDuplicateEntry:
raise n_exc.IpAddressInUse(ip_address=next_ip, net_id=net_id)
except db_exception.DBError:
raise q_exc.IPAddressRetryableFailure(ip_addr=next_ip,
net_id=net_id)
return address
def _allocate_from_v6_subnet(self, context, net_id, subnet,
port_id, reuse_after, ip_address=None,
**kwargs):
"""This attempts to allocate v6 addresses as per RFC2462 and RFC3041.
To accomodate this, we effectively treat all v6 assignment as a
first time allocation utilizing the MAC address of the VIF. Because
we recycle MACs, we will eventually attempt to recreate a previously
generated v6 address. Instead of failing, we've opted to handle
reallocating that address in this method.
This should provide a performance boost over attempting to check
each and every subnet in the existing reallocate logic, as we'd
have to iterate over each and every subnet returned
"""
LOG.info("Attempting to allocate a v6 address - [{0}]".format(
utils.pretty_kwargs(network_id=net_id, subnet=subnet,
port_id=port_id, ip_address=ip_address)))
if ip_address:
LOG.info("IP %s explicitly requested, deferring to standard "
"allocation" % ip_address)
return self._allocate_from_subnet(context, net_id=net_id,
subnet=subnet, port_id=port_id,
reuse_after=reuse_after,
ip_address=ip_address, **kwargs)
else:
mac = kwargs.get("mac_address")
if mac:
mac = kwargs["mac_address"].get("address")
if subnet and subnet["ip_policy"]:
ip_policy_cidrs = subnet["ip_policy"].get_cidrs_ip_set()
else:
ip_policy_cidrs = netaddr.IPSet([])
for tries, ip_address in enumerate(
generate_v6(mac, port_id, subnet["cidr"])):
LOG.info("Attempt {0} of {1}".format(
tries + 1, CONF.QUARK.v6_allocation_attempts))
if tries > CONF.QUARK.v6_allocation_attempts - 1:
LOG.info("Exceeded v6 allocation attempts, bailing")
raise ip_address_failure(net_id)
ip_address = netaddr.IPAddress(ip_address).ipv6()
LOG.info("Generated a new v6 address {0}".format(
str(ip_address)))
if (ip_policy_cidrs is not None and
ip_address in ip_policy_cidrs):
LOG.info("Address {0} excluded by policy".format(
str(ip_address)))
continue
try:
with context.session.begin():
address = db_api.ip_address_create(
context, address=ip_address,
subnet_id=subnet["id"],
version=subnet["ip_version"], network_id=net_id,
address_type=kwargs.get('address_type',
ip_types.FIXED))
return address
except db_exception.DBDuplicateEntry:
# This shouldn't ever happen, since we hold a unique MAC
# address from the previous IPAM step.
LOG.info("{0} exists but was already "
"allocated".format(str(ip_address)))
LOG.debug("Duplicate entry found when inserting subnet_id"
" %s ip_address %s", subnet["id"], ip_address)
def _allocate_ips_from_subnets(self, context, new_addresses, net_id,
subnets, port_id, reuse_after,
ip_address=None, **kwargs):
LOG.info("Allocating IP(s) from chosen subnet(s) (step 3 of 3) - "
"[{0}]".format(utils.pretty_kwargs(
network_id=net_id, port_id=port_id,
new_addresses=new_addresses, ip_address=ip_address)))
subnets = subnets or []
allocated_ips = [ip.get("address_readable") for ip in new_addresses]
for subnet in subnets:
if not subnet:
continue
if str(ip_address) in allocated_ips:
continue
LOG.info("Attempting to allocate from {0} - {1}".format(
subnet["id"], subnet["_cidr"]))
address = None
if int(subnet["ip_version"]) == 4:
address = self._allocate_from_subnet(context, net_id,
subnet, port_id,
reuse_after,
ip_address, **kwargs)
else:
address = self._allocate_from_v6_subnet(context, net_id,
subnet, port_id,
reuse_after,
ip_address, **kwargs)
if address:
LOG.info("Created IP {0}".format(
address["address_readable"]))
new_addresses.append(address)
return new_addresses
@ipam_logged
def allocate_ip_address(self, context, new_addresses, net_id, port_id,
reuse_after, segment_id=None, version=None,
ip_addresses=None, subnets=None, **kwargs):
elevated = context.elevated()
subnets = subnets or []
ip_addresses = ip_addresses or []
ipam_log = kwargs.get('ipam_log', None)
LOG.info("Starting a new IP address(es) allocation. Strategy "
"is {0} - [{1}]".format(
self.get_name(),
utils.pretty_kwargs(network_id=net_id, port_id=port_id,
new_addresses=new_addresses,
ip_addresses=ip_addresses,
subnets=subnets,
segment_id=segment_id,
version=version)))
def _try_reallocate_ip_address(ipam_log, ip_addr=None):
new_addresses.extend(self.attempt_to_reallocate_ip(
context, net_id, port_id, reuse_after, version=version,
ip_address=ip_addr, segment_id=segment_id, subnets=subnets,
**kwargs))
def _try_allocate_ip_address(ipam_log, ip_addr=None, sub=None):
for retry in xrange(CONF.QUARK.ip_address_retry_max):
attempt = None
if ipam_log:
attempt = ipam_log.make_entry("_try_allocate_ip_address")
LOG.info("Allocating new IP attempt {0} of {1}".format(
retry + 1, CONF.QUARK.ip_address_retry_max))
if not sub:
subnets = self._choose_available_subnet(
elevated, net_id, version, segment_id=segment_id,
ip_address=ip_addr, reallocated_ips=new_addresses)
else:
subnets = [self.select_subnet(context, net_id,
ip_addr, segment_id,
subnet_ids=[sub])]
LOG.info("Subnet selection returned {0} viable subnet(s) - "
"IDs: {1}".format(len(subnets),
", ".join([str(s["id"])
for s in subnets if s])))
try:
self._allocate_ips_from_subnets(context, new_addresses,
net_id, subnets,
port_id, reuse_after,
ip_addr, **kwargs)
except q_exc.IPAddressRetryableFailure:
LOG.exception("Error in allocating IP")
if attempt:
LOG.debug("ATTEMPT FAILED")
attempt.failed()
remaining = CONF.QUARK.ip_address_retry_max - retry - 1
if remaining > 0:
LOG.info("{0} retries remain, retrying...".format(
remaining))
else:
LOG.info("No retries remaing, bailing")
continue
finally:
if attempt:
attempt.end()
break
ip_addresses = [netaddr.IPAddress(ip_address)
for ip_address in ip_addresses]
if ip_addresses:
for ip_address in ip_addresses:
_try_reallocate_ip_address(ipam_log, ip_address)
else:
_try_reallocate_ip_address(ipam_log)
if self.is_strategy_satisfied(new_addresses):
return
else:
LOG.info("Reallocated addresses {0} but still need more addresses "
"to satisfy strategy {1}. Falling back to creating "
"IPs".format(new_addresses, self.get_name()))
if ip_addresses or subnets:
for ip_address, subnet in itertools.izip_longest(ip_addresses,
subnets):
_try_allocate_ip_address(ipam_log, ip_address, subnet)
else:
_try_allocate_ip_address(ipam_log)
if self.is_strategy_satisfied(new_addresses, allocate_complete=True):
# Only notify when all went well
for address in new_addresses:
billing.notify(context, billing.IP_ADD, address, **kwargs)
LOG.info("IPAM for port ID {0} completed with addresses "
"{1}".format(port_id,
[a["address_readable"]
for a in new_addresses]))
return
ipam_log.failed()
raise ip_address_failure(net_id)
def deallocate_ip_address(self, context, address, **kwargs):
if address["version"] == 6:
db_api.ip_address_delete(context, address)
else:
address["deallocated"] = 1
address["address_type"] = None
billing.notify(context, billing.IP_DEL, address, send_usage=True,
**kwargs)
def deallocate_ips_by_port(self, context, port=None, **kwargs):
ips_to_remove = []
for addr in port["ip_addresses"]:
if "ip_address" in kwargs:
ip = kwargs["ip_address"]
if ip != netaddr.IPAddress(int(addr["address"])):
continue
# Note: only deallocate ip if this is the
# only port mapped
ips_to_remove.append(addr)
port["ip_addresses"] = list(
set(port["ip_addresses"]) - set(ips_to_remove))
# NCP-1541: We don't need to track v6 IPs the same way. Also, we can't
# delete them until we've removed the FK on the assoc record first, so
# we have to flush the current state of the transaction.
# NOTE(mdietz): this does increase traffic to the db because we need
# to flush, fetch the records again and potentially make
# another trip to deallocate each IP, but keeping our
# indices smaller probably provides more value than the
# cost
# NOTE(aquillin): For floating IPs associated with the port, we do not
# want to deallocate the IP or disassociate the IP from
# the tenant, instead we will disassociate floating's
# fixed IP address.
context.session.flush()
deallocated_ips = []
flip = None
for ip in ips_to_remove:
if ip["address_type"] in (ip_types.FLOATING, ip_types.SCALING):
flip = ip
else:
if len(ip["ports"]) == 0:
self.deallocate_ip_address(context, ip)
deallocated_ips.append(ip.id)
if flip:
if flip.fixed_ips and len(flip.fixed_ips) == 1:
# This is a FLIP or SCIP that is only associated with one
# port and fixed_ip, so we can safely just disassociate all
# and remove the flip from unicorn.
db_api.floating_ip_disassociate_all_fixed_ips(context, flip)
# NOTE(blogan): I'm not too happy about having do another
# flush but some test runs showed inconsistent state based on
# SQLAlchemy caching.
context.session.add(flip)
context.session.flush()
billing.notify(context, billing.IP_DISASSOC, flip, **kwargs)
driver = registry.DRIVER_REGISTRY.get_driver()
driver.remove_floating_ip(flip)
elif len(flip.fixed_ips) > 1:
# This is a SCIP and we need to diassociate the one fixed_ip
# from the SCIP and update unicorn with the remaining
# ports and fixed_ips
remaining_fixed_ips = []
for fix_ip in flip.fixed_ips:
if fix_ip.id in deallocated_ips:
db_api.floating_ip_disassociate_fixed_ip(
context, flip, fix_ip)
context.session.add(flip)
context.session.flush()
billing.notify(context, billing.IP_DISASSOC, flip,
**kwargs)
else:
remaining_fixed_ips.append(fix_ip)
port_fixed_ips = {}
for fix_ip in remaining_fixed_ips:
# NOTE(blogan): Since this is the flip's fixed_ips it
# should be safe to assume there is only one port
# associated with it.
remaining_port = fix_ip.ports[0]
port_fixed_ips[remaining_port.id] = {
'port': remaining_port,
'fixed_ip': fix_ip
}
driver = registry.DRIVER_REGISTRY.get_driver()
driver.update_floating_ip(flip, port_fixed_ips)
# NCP-1509(roaet):
# - started using admin_context due to tenant not claiming when realloc
def deallocate_mac_address(self, context, address, **kwargs):
admin_context = context.elevated()
mac = db_api.mac_address_find(admin_context, address=address,
scope=db_api.ONE)
if not mac:
raise q_exc.MacAddressNotFound(
mac_address_id=address,
readable_mac=netaddr.EUI(address))
if (mac["mac_address_range"] is None or
mac["mac_address_range"]["do_not_use"]):
db_api.mac_address_delete(admin_context, mac)
else:
db_api.mac_address_update(admin_context, mac, deallocated=True,
deallocated_at=timeutils.utcnow())
def _select_subnet(self, context, net_id, ip_address, segment_id,
subnet_ids, **filters):
# NCP-1480: Don't need to lock V6 subnets, since we don't use
# next_auto_assign_ip for them. We already uniquely identified
# the V6 we're going to get by generating a MAC in a previous step.
# Also note that this only works under BOTH or BOTH_REQUIRED. ANY
# does not pass an ip_version
lock_subnets = True
if (not CONF.QUARK.ipam_select_subnet_v6_locking and
"ip_version" in filters and
int(filters["ip_version"]) == 6):
lock_subnets = False
select_api = db_api.subnet_find_ordered_by_most_full
# TODO(mdietz): Add configurable, alternate subnet selection here
subnets = select_api(context, net_id, lock_subnets=lock_subnets,
segment_id=segment_id, scope=db_api.ALL,
subnet_id=subnet_ids, **filters)
if not subnets:
LOG.info("No subnets found given the search criteria!")
return
# TODO(mdietz): Making this into an iterator because we want to move
# to selecting 1 subnet at a time and paginating rather
# than the bulk fetch. Without locks, we need to
# minimize looking at stale data to save ourselves
# some retries. Getting then 1 at a time will
# facilitate this.
for subnet, ips_in_subnet in subnets:
yield subnet, ips_in_subnet
def _should_mark_subnet_full(self, context, subnet, ipnet, ip_address,
ips_in_subnet):
ip = subnet["next_auto_assign_ip"]
# NOTE(mdietz): When atomically updated, this probably
# doesn't need the lower bounds check but
# I'm not comfortable removing it yet.
if (subnet["ip_version"] == 4 and ip < subnet["first_ip"] or
ip > subnet["last_ip"]):
return True
ip_policy = None
if not ip_address:
# Policies don't prevent explicit assignment, so we only
# need to check if we're allocating a new IP
ip_policy = subnet.get("ip_policy")
policy_size = ip_policy["size"] if ip_policy else 0
if ipnet.size > (ips_in_subnet + policy_size - 1):
return False
return True
def _ip_in_subnet(self, subnet, subnet_ids, ipnet, ip_address):
if ip_address:
requested_ip = netaddr.IPAddress(ip_address)
if ipnet.version == 4 and requested_ip.version != 4:
requested_ip = requested_ip.ipv4()
if requested_ip not in ipnet:
if subnet_ids is not None:
LOG.info("Requested IP {0} not in subnet {1}, "
"retrying".format(str(requested_ip),
str(ipnet)))
raise q_exc.IPAddressNotInSubnet(
ip_addr=ip_address, subnet_id=subnet["id"])
return False
return True
def select_subnet(self, context, net_id, ip_address, segment_id,
subnet_ids=None, **filters):
LOG.info("Selecting subnet(s) - (Step 2 of 3) [{0}]".format(
utils.pretty_kwargs(network_id=net_id, ip_address=ip_address,
segment_id=segment_id, subnet_ids=subnet_ids,
ip_version=filters.get("ip_version"))))
# TODO(mdietz): Invert the iterator and the session, should only be
# one subnet per attempt. We should also only be fetching
# the subnet and usage when we need to. Otherwise
# we're locking every subnet for a segment, and once
# we stop locking, we're looking at stale data.
with context.session.begin():
for subnet, ips_in_subnet in self._select_subnet(context, net_id,
ip_address,
segment_id,
subnet_ids,
**filters):
if subnet is None:
continue
ipnet = netaddr.IPNetwork(subnet["cidr"])
LOG.info("Trying subnet ID: {0} - CIDR: {1}".format(
subnet["id"], subnet["_cidr"]))
if not self._ip_in_subnet(subnet, subnet_ids, ipnet,
ip_address):
continue
if self._should_mark_subnet_full(context, subnet, ipnet,
ip_address, ips_in_subnet):
LOG.info("Marking subnet {0} as full".format(subnet["id"]))
updated = db_api.subnet_update_set_full(context, subnet)
# Ensure the session is aware of the changes to the subnet
if updated:
context.session.refresh(subnet)
continue
if not ip_address and subnet["ip_version"] == 4:
auto_inc = db_api.subnet_update_next_auto_assign_ip
updated = auto_inc(context, subnet)
if updated:
context.session.refresh(subnet)
else:
# This means the subnet was marked full
# while we were checking out policies.
# Fall out and go back to the outer retry
# loop.
return
LOG.info("Subnet {0} - {1} {2} looks viable, "
"returning".format(subnet["id"], subnet["_cidr"],
subnet["next_auto_assign_ip"]))
return subnet
class QuarkIpamANY(QuarkIpam):
@classmethod
def get_name(self):
return "ANY"
def _choose_available_subnet(self, context, net_id, version=None,
segment_id=None, ip_address=None,
reallocated_ips=None):
filters = {}
if version:
filters["ip_version"] = version
subnet = self.select_subnet(context, net_id, ip_address, segment_id,
**filters)
if subnet:
return [subnet]
raise ip_address_failure(net_id)
class QuarkIpamBOTH(QuarkIpam):
@classmethod
def get_name(self):
return "BOTH"
def is_strategy_satisfied(self, reallocated_ips, allocate_complete=False):
req = [4, 6]
for ip in reallocated_ips:
if ip is not None:
req.remove(ip["version"])
ips_allocated = len(req)
if ips_allocated == 0:
return True
elif ips_allocated == 1 and allocate_complete:
return True
return False
def attempt_to_reallocate_ip(self, context, net_id, port_id,
reuse_after, version=None,
ip_address=None, segment_id=None,
subnets=None, **kwargs):
ip_address_version = 4 if not ip_address else ip_address.version
# NOTE(quade): We do not attempt to reallocate ipv6, so just return
if ip_address_version == 6:
return []
return super(QuarkIpamBOTH, self).attempt_to_reallocate_ip(
context, net_id, port_id, reuse_after, ip_address_version,
ip_address, segment_id, subnets=subnets, **kwargs)
def _choose_available_subnet(self, context, net_id, version=None,
segment_id=None, ip_address=None,
reallocated_ips=None):
both_subnet_versions = []
need_versions = [4, 6]
for i in reallocated_ips:
if i["version"] in need_versions:
need_versions.remove(i["version"])
filters = {}
for ver in need_versions:
filters["ip_version"] = ver
sub = self.select_subnet(context, net_id, ip_address, segment_id,
**filters)
if sub:
both_subnet_versions.append(sub)
if not reallocated_ips and not both_subnet_versions:
raise ip_address_failure(net_id)
return both_subnet_versions
class QuarkIpamBOTHREQ(QuarkIpamBOTH):
@classmethod
def get_name(self):
return "BOTH_REQUIRED"
def is_strategy_satisfied(self, reallocated_ips, allocate_complete=False):
req = [4, 6]
for ip in reallocated_ips:
if ip is not None:
req.remove(ip["version"])
ips_allocated = len(req)
if ips_allocated == 0:
return True
return False
def _choose_available_subnet(self, context, net_id, version=None,
segment_id=None, ip_address=None,
reallocated_ips=None):
subnets = super(QuarkIpamBOTHREQ, self)._choose_available_subnet(
context, net_id, version, segment_id, ip_address, reallocated_ips)
if len(reallocated_ips) + len(subnets) < 2:
raise ip_address_failure(net_id)
return subnets
class IronicIpam(QuarkIpam):
"""IPAM base class for the Ironic driver.
The idea here is that there are many small subnets created for a
particular segment for a provider network. The Ironic IPAM
family selects unused ones, and only allows a single allocation
per subnet.
"""
def _select_subnet(self, context, net_id, ip_address, segment_id,
subnet_ids, **filters):
lock_subnets = True
select_api = db_api.subnet_find_unused
subnets = select_api(context, net_id, lock_subnets=lock_subnets,
segment_id=segment_id, scope=db_api.ALL,
subnet_id=subnet_ids, **filters)
if not subnets:
LOG.info("No subnets found given the search criteria!")
return
for subnet, ips_in_subnet in subnets:
# make sure we don't select subnets that have allocated ips.
if ips_in_subnet:
continue
yield subnet, ips_in_subnet
class IronicIpamANY(IronicIpam, QuarkIpamANY):
@classmethod
def get_name(self):
return "IRONIC_ANY"
class IronicIpamBOTH(IronicIpam, QuarkIpamBOTH):
@classmethod
def get_name(self):
return "IRONIC_BOTH"
class IronicIpamBOTHREQ(IronicIpam, QuarkIpamBOTHREQ):
@classmethod
def get_name(self):
return "IRONIC_BOTH_REQUIRED"
class IpamRegistry(object):
def __init__(self):
self.strategies = {
QuarkIpamANY.get_name(): QuarkIpamANY(),
QuarkIpamBOTH.get_name(): QuarkIpamBOTH(),
QuarkIpamBOTHREQ.get_name(): QuarkIpamBOTHREQ(),
IronicIpamANY.get_name(): IronicIpamANY(),
IronicIpamBOTH.get_name(): IronicIpamBOTH(),
IronicIpamBOTHREQ.get_name(): IronicIpamBOTHREQ()
}
def is_valid_strategy(self, strategy_name):
if strategy_name in self.strategies:
return True
return False
def get_strategy(self, strategy_name):
if self.is_valid_strategy(strategy_name):
return self.strategies[strategy_name]
fallback = CONF.QUARK.default_ipam_strategy
LOG.warn("IPAM strategy %s not found, "
"using default %s" % (strategy_name, fallback))
return self.strategies[fallback]
IPAM_REGISTRY = IpamRegistry()
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the compute extra resources framework."""
from oslo_config import cfg
from stevedore import extension
from stevedore import named
from jacket.compute.cloud import resources
from jacket.compute.cloud.resources import base
from jacket import context
from jacket.objects.compute import flavor as flavor_obj
from jacket.compute import test
CONF = cfg.CONF
class FakeResourceHandler(resources.ResourceHandler):
def __init__(self, extensions):
self._mgr = \
named.NamedExtensionManager.make_test_instance(extensions)
class FakeResource(base.Resource):
def __init__(self):
self.total_res = 0
self.used_res = 0
def _get_requested(self, usage):
if 'extra_specs' not in usage:
return
if self.resource_name not in usage['extra_specs']:
return
req = usage['extra_specs'][self.resource_name]
return int(req)
def _get_limit(self, limits):
if self.resource_name not in limits:
return
limit = limits[self.resource_name]
return int(limit)
def reset(self, resources, driver):
self.total_res = 0
self.used_res = 0
def test(self, usage, limits):
requested = self._get_requested(usage)
if not requested:
return
limit = self._get_limit(limits)
if not limit:
return
free = limit - self.used_res
if requested <= free:
return
else:
return ('Free %(free)d < requested %(requested)d ' %
{'free': free, 'requested': requested})
def add_instance(self, usage):
requested = self._get_requested(usage)
if requested:
self.used_res += requested
def remove_instance(self, usage):
requested = self._get_requested(usage)
if requested:
self.used_res -= requested
def write(self, resources):
pass
def report_free(self):
return "Free %s" % (self.total_res - self.used_res)
class ResourceA(FakeResource):
def reset(self, resources, driver):
# ResourceA uses a configuration option
self.total_res = int(CONF.resA)
self.used_res = 0
self.resource_name = 'resource:resA'
def write(self, resources):
resources['resA'] = self.total_res
resources['used_resA'] = self.used_res
class ResourceB(FakeResource):
def reset(self, resources, driver):
# ResourceB uses resource details passed in parameter resources
self.total_res = resources['resB']
self.used_res = 0
self.resource_name = 'resource:resB'
def write(self, resources):
resources['resB'] = self.total_res
resources['used_resB'] = self.used_res
def fake_flavor_obj(**updates):
flavor = flavor_obj.Flavor()
flavor.id = 1
flavor.name = 'fakeflavor'
flavor.memory_mb = 8000
flavor.vcpus = 3
flavor.root_gb = 11
flavor.ephemeral_gb = 4
flavor.swap = 0
flavor.rxtx_factor = 1.0
flavor.vcpu_weight = 1
if updates:
flavor.update(updates)
return flavor
class BaseTestCase(test.NoDBTestCase):
def _initialize_used_res_counter(self):
# Initialize the value for the used resource
for ext in self.r_handler._mgr.extensions:
ext.obj.used_res = 0
def setUp(self):
super(BaseTestCase, self).setUp()
# initialize flavors and stub get_by_id to
# get flavors from here
self._flavors = {}
self.ctxt = context.get_admin_context()
# Create a flavor without extra_specs defined
_flavor_id = 1
_flavor = fake_flavor_obj(id=_flavor_id)
self._flavors[_flavor_id] = _flavor
# Create a flavor with extra_specs defined
_flavor_id = 2
requested_resA = 5
requested_resB = 7
requested_resC = 7
_extra_specs = {'resource:resA': requested_resA,
'resource:resB': requested_resB,
'resource:resC': requested_resC}
_flavor = fake_flavor_obj(id=_flavor_id,
extra_specs=_extra_specs)
self._flavors[_flavor_id] = _flavor
# create fake resource extensions and resource handler
_extensions = [
extension.Extension('resA', None, ResourceA, ResourceA()),
extension.Extension('resB', None, ResourceB, ResourceB()),
]
self.r_handler = FakeResourceHandler(_extensions)
# Resources details can be passed to each plugin or can be specified as
# configuration options
driver_resources = {'resB': 5}
CONF.resA = '10'
# initialise the resources
self.r_handler.reset_resources(driver_resources, None)
def test_update_from_instance_with_extra_specs(self):
# Flavor with extra_specs
_flavor_id = 2
sign = 1
self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)
expected_resA = self._flavors[_flavor_id].extra_specs['resource:resA']
expected_resB = self._flavors[_flavor_id].extra_specs['resource:resB']
self.assertEqual(int(expected_resA),
self.r_handler._mgr['resA'].obj.used_res)
self.assertEqual(int(expected_resB),
self.r_handler._mgr['resB'].obj.used_res)
def test_update_from_instance_without_extra_specs(self):
# Flavor id without extra spec
_flavor_id = 1
self._initialize_used_res_counter()
self.r_handler.resource_list = []
sign = 1
self.r_handler.update_from_instance(self._flavors[_flavor_id], sign)
self.assertEqual(0, self.r_handler._mgr['resA'].obj.used_res)
self.assertEqual(0, self.r_handler._mgr['resB'].obj.used_res)
def test_write_resources(self):
self._initialize_used_res_counter()
extra_resources = {}
expected = {'resA': 10, 'used_resA': 0, 'resB': 5, 'used_resB': 0}
self.r_handler.write_resources(extra_resources)
self.assertEqual(expected, extra_resources)
def test_test_resources_without_extra_specs(self):
limits = {}
# Flavor id without extra_specs
flavor = self._flavors[1]
result = self.r_handler.test_resources(flavor, limits)
self.assertEqual([None, None], result)
def test_test_resources_with_limits_for_different_resource(self):
limits = {'resource:resC': 20}
# Flavor id with extra_specs
flavor = self._flavors[2]
result = self.r_handler.test_resources(flavor, limits)
self.assertEqual([None, None], result)
def test_passing_test_resources(self):
limits = {'resource:resA': 10, 'resource:resB': 20}
# Flavor id with extra_specs
flavor = self._flavors[2]
self._initialize_used_res_counter()
result = self.r_handler.test_resources(flavor, limits)
self.assertEqual([None, None], result)
def test_failing_test_resources_for_single_resource(self):
limits = {'resource:resA': 4, 'resource:resB': 20}
# Flavor id with extra_specs
flavor = self._flavors[2]
self._initialize_used_res_counter()
result = self.r_handler.test_resources(flavor, limits)
expected = ['Free 4 < requested 5 ', None]
self.assertEqual(sorted(expected, key=str),
sorted(result, key=str))
def test_empty_resource_handler(self):
"""An empty resource handler has no resource extensions,
should have no effect, and should raise no exceptions.
"""
empty_r_handler = FakeResourceHandler([])
resources = {}
empty_r_handler.reset_resources(resources, None)
flavor = self._flavors[1]
sign = 1
empty_r_handler.update_from_instance(flavor, sign)
limits = {}
test_result = empty_r_handler.test_resources(flavor, limits)
self.assertEqual([], test_result)
sign = -1
empty_r_handler.update_from_instance(flavor, sign)
extra_resources = {}
expected_extra_resources = extra_resources
empty_r_handler.write_resources(extra_resources)
self.assertEqual(expected_extra_resources, extra_resources)
empty_r_handler.report_free_resources()
|
|
#!/usr/bin/env python
__author__ = "waroquiers"
import os
import unittest
import numpy as np
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometries import (
AllCoordinationGeometries,
)
from pymatgen.analysis.chemenv.coordination_environments.coordination_geometry_finder import (
AbstractGeometry,
LocalGeometryFinder,
symmetry_measure,
)
from pymatgen.util.testing import PymatgenTest
json_files_dir = os.path.join(
os.path.dirname(__file__),
"..",
"..",
"..",
"..",
"..",
"test_files",
"chemenv",
"json_test_files",
)
class CoordinationGeometryFinderTest(PymatgenTest):
def setUp(self):
self.lgf = LocalGeometryFinder()
self.lgf.setup_parameters(
centering_type="standard",
structure_refinement=self.lgf.STRUCTURE_REFINEMENT_NONE,
)
# self.strategies = [SimplestChemenvStrategy(), SimpleAbundanceChemenvStrategy()]
def test_abstract_geometry(self):
cg_ts3 = self.lgf.allcg["TS:3"]
cg_tet = self.lgf.allcg["T:4"]
abstract_geom = AbstractGeometry.from_cg(cg=cg_ts3, centering_type="central_site")
self.assertArrayAlmostEqual(abstract_geom.centre, [0.0, 0.0, 0.0])
abstract_geom = AbstractGeometry.from_cg(cg=cg_ts3, centering_type="centroid")
self.assertArrayAlmostEqual(abstract_geom.centre, [0.0, 0.0, 0.33333333333])
with self.assertRaises(ValueError) as cm:
AbstractGeometry.from_cg(
cg=cg_ts3,
centering_type="central_site",
include_central_site_in_centroid=True,
)
self.assertEqual(
str(cm.exception),
"The center is the central site, no calculation of the centroid, "
"variable include_central_site_in_centroid should be set to False",
)
abstract_geom = AbstractGeometry.from_cg(
cg=cg_ts3, centering_type="centroid", include_central_site_in_centroid=True
)
self.assertArrayAlmostEqual(abstract_geom.centre, [0.0, 0.0, 0.25])
# WHY ARE WE TESTING STRINGS????
# self.assertEqual(abstract_geom.__str__(),
# '\nAbstract Geometry with 3 points :\n'
# ' [-1. 0. -0.25]\n'
# ' [ 1. 0. -0.25]\n'
# ' [ 0. 0. 0.75]\n'
# 'Points are referenced to the centroid (calculated with the central site) :\n'
# ' [ 0. 0. 0.25]\n')
symm_dict = symmetry_measure([[0.0, 0.0, 0.0]], [1.1, 2.2, 3.3])
self.assertAlmostEqual(symm_dict["symmetry_measure"], 0.0)
self.assertEqual(symm_dict["scaling_factor"], None)
self.assertEqual(symm_dict["rotation_matrix"], None)
tio2_struct = self.get_structure("TiO2")
envs = self.lgf.compute_coordination_environments(structure=tio2_struct, indices=[0])
self.assertAlmostEqual(envs[0][0]["csm"], 1.5309987846957258)
self.assertAlmostEqual(envs[0][0]["ce_fraction"], 1.0)
self.assertEqual(envs[0][0]["ce_symbol"], "O:6")
self.assertEqual(sorted(envs[0][0]["permutation"]), sorted([0, 4, 1, 5, 2, 3]))
self.lgf.setup_random_structure(coordination=5)
self.assertEqual(len(self.lgf.structure), 6)
self.lgf.setup_random_indices_local_geometry(coordination=5)
self.assertEqual(self.lgf.icentral_site, 0)
self.assertEqual(len(self.lgf.indices), 5)
self.lgf.setup_ordered_indices_local_geometry(coordination=5)
self.assertEqual(self.lgf.icentral_site, 0)
self.assertEqual(self.lgf.indices, list(range(1, 6)))
self.lgf.setup_explicit_indices_local_geometry(explicit_indices=[3, 5, 2, 0, 1, 4])
self.assertEqual(self.lgf.icentral_site, 0)
self.assertEqual(self.lgf.indices, [4, 6, 3, 1, 2, 5])
LiFePO4_struct = self.get_structure("LiFePO4")
isite = 10
envs_LiFePO4 = self.lgf.compute_coordination_environments(structure=LiFePO4_struct, indices=[isite])
self.assertAlmostEqual(envs_LiFePO4[isite][0]["csm"], 0.140355832317)
nbs_coords = [
np.array([6.16700437, -4.55194317, -5.89031356]),
np.array([4.71588167, -4.54248093, -3.75553856]),
np.array([6.88012571, -5.79877503, -3.73177541]),
np.array([6.90041188, -3.32797839, -3.71812416]),
]
self.lgf.setup_structure(LiFePO4_struct)
self.lgf.setup_local_geometry(isite, coords=nbs_coords)
perfect_tet = AbstractGeometry.from_cg(
cg=cg_tet, centering_type="centroid", include_central_site_in_centroid=False
)
points_perfect_tet = perfect_tet.points_wcs_ctwcc()
res = self.lgf.coordination_geometry_symmetry_measures_fallback_random(
coordination_geometry=cg_tet, NRANDOM=5, points_perfect=points_perfect_tet
)
(
permutations_symmetry_measures,
permutations,
algos,
local2perfect_maps,
perfect2local_maps,
) = res
for perm_csm_dict in permutations_symmetry_measures:
self.assertAlmostEqual(perm_csm_dict["symmetry_measure"], 0.140355832317)
#
# def _strategy_test(self, strategy):
# files = []
# for (dirpath, dirnames, filenames) in os.walk(json_files_dir):
# files.extend(filenames)
# break
#
# for ifile, json_file in enumerate(files):
# with self.subTest(json_file=json_file):
# f = open("{}/{}".format(json_files_dir, json_file), 'r')
# dd = json.load(f)
# f.close()
#
# atom_indices = dd['atom_indices']
# expected_geoms = dd['expected_geoms']
#
# struct = Structure.from_dict(dd['structure'])
#
# struct = self.lgf.setup_structure(struct)
# se = self.lgf.compute_structure_environments_detailed_voronoi(only_indices=atom_indices,
# maximum_distance_factor=1.5)
#
# #All strategies should get the correct environment with their default parameters
# strategy.set_structure_environments(se)
# for ienv, isite in enumerate(atom_indices):
# ce = strategy.get_site_coordination_environment(struct[isite])
# try:
# coord_env = ce[0]
# except TypeError:
# coord_env = ce
# #Check that the environment found is the expected one
# self.assertEqual(coord_env, expected_geoms[ienv])
#
# def test_simplest_chemenv_strategy(self):
# strategy = SimplestChemenvStrategy()
# self._strategy_test(strategy)
#
# def test_simple_abundance_chemenv_strategy(self):
# strategy = SimpleAbundanceChemenvStrategy()
# self._strategy_test(strategy)
def test_perfect_environments(self):
allcg = AllCoordinationGeometries()
indices_CN = {
1: [0],
2: [1, 0],
3: [1, 0, 2],
4: [2, 0, 3, 1],
5: [2, 3, 1, 0, 4],
6: [0, 2, 3, 1, 5, 4],
7: [2, 6, 0, 3, 4, 5, 1],
8: [1, 2, 6, 3, 7, 0, 4, 5],
9: [5, 2, 6, 0, 4, 7, 3, 8, 1],
10: [8, 5, 6, 3, 0, 7, 2, 4, 9, 1],
11: [7, 6, 4, 1, 2, 5, 0, 8, 9, 10, 3],
12: [5, 8, 9, 0, 3, 1, 4, 2, 6, 11, 10, 7],
13: [4, 11, 5, 12, 1, 2, 8, 3, 0, 6, 9, 7, 10],
20: [8, 12, 11, 0, 14, 10, 13, 6, 18, 1, 9, 17, 3, 19, 5, 7, 15, 2, 16, 4],
}
for coordination in range(1, 21):
for mp_symbol in allcg.get_implemented_geometries(coordination=coordination, returned="mp_symbol"):
cg = allcg.get_geometry_from_mp_symbol(mp_symbol=mp_symbol)
self.lgf.allcg = AllCoordinationGeometries(only_symbols=[mp_symbol])
self.lgf.setup_test_perfect_environment(
mp_symbol,
randomness=False,
indices=indices_CN[coordination],
random_translation="NONE",
random_rotation="NONE",
random_scale="NONE",
)
se = self.lgf.compute_structure_environments(
only_indices=[0],
maximum_distance_factor=1.01 * cg.distfactor_max,
min_cn=cg.coordination_number,
max_cn=cg.coordination_number,
only_symbols=[mp_symbol],
)
self.assertAlmostEqual(
se.get_csm(0, mp_symbol)["symmetry_measure"],
0.0,
delta=1e-8,
msg="Failed to get perfect environment with mp_symbol {}".format(mp_symbol),
)
def test_disable_hints(self):
allcg = AllCoordinationGeometries()
mp_symbol = "SH:13"
mp_symbols = ["SH:13", "HP:12"]
cg = allcg.get_geometry_from_mp_symbol(mp_symbol=mp_symbol)
mypoints = cg.points
mypoints[-1] = [0.9 * cc for cc in mypoints[-1]]
self.lgf.allcg = AllCoordinationGeometries(only_symbols=[mp_symbol])
self.lgf.setup_test_perfect_environment(
mp_symbol,
randomness=False,
indices=[4, 11, 5, 12, 1, 2, 8, 3, 0, 6, 9, 7, 10],
random_translation="NONE",
random_rotation="NONE",
random_scale="NONE",
points=mypoints,
)
se_nohints = self.lgf.compute_structure_environments(
only_indices=[0],
maximum_distance_factor=1.02 * cg.distfactor_max,
min_cn=12,
max_cn=13,
only_symbols=mp_symbols,
get_from_hints=False,
)
se_hints = self.lgf.compute_structure_environments(
only_indices=[0],
maximum_distance_factor=1.02 * cg.distfactor_max,
min_cn=12,
max_cn=13,
only_symbols=mp_symbols,
get_from_hints=True,
)
with self.assertRaises(KeyError):
abc = se_nohints.ce_list[0][12]
abc.minimum_geometries()
self.assertAlmostEqual(se_hints.ce_list[0][13][0], se_nohints.ce_list[0][13][0])
self.assertTrue(set(se_nohints.ce_list[0].keys()).issubset(set(se_hints.ce_list[0].keys())))
if __name__ == "__main__":
unittest.main()
|
|
"""
intersections.py
------------------
Primarily mesh-plane intersections (slicing).
"""
import numpy as np
from .constants import log, tol
from . import util
from . import geometry
from . import grouping
from . import transformations
def mesh_plane(mesh,
plane_normal,
plane_origin,
return_faces=False,
cached_dots=None):
"""
Find a the intersections between a mesh and a plane,
returning a set of line segments on that plane.
Parameters
---------
mesh : Trimesh object
Source mesh to slice
plane_normal : (3,) float
Normal vector of plane to intersect with mesh
plane_origin: (3,) float
Point on plane to intersect with mesh
return_faces: bool
If True return face index each line is from
cached_dots : (n, 3) float
If an external function has stored dot
products pass them here to avoid recomputing
Returns
----------
lines : (m, 2, 3) float
List of 3D line segments in space
face_index : (m,) int
Index of mesh.faces for each line
Only returned if return_faces was True
"""
def triangle_cases(signs):
"""
Figure out which faces correspond to which intersection
case from the signs of the dot product of each vertex.
Does this by bitbang each row of signs into an 8 bit
integer.
code : signs : intersects
0 : [-1 -1 -1] : No
2 : [-1 -1 0] : No
4 : [-1 -1 1] : Yes; 2 on one side, 1 on the other
6 : [-1 0 0] : Yes; one edge fully on plane
8 : [-1 0 1] : Yes; one vertex on plane, 2 on different sides
12 : [-1 1 1] : Yes; 2 on one side, 1 on the other
14 : [0 0 0] : No (on plane fully)
16 : [0 0 1] : Yes; one edge fully on plane
20 : [0 1 1] : No
28 : [1 1 1] : No
Parameters
----------
signs: (n,3) int, all values are -1,0, or 1
Each row contains the dot product of all three vertices
in a face with respect to the plane
Returns
---------
basic: (n,) bool, which faces are in the basic intersection case
one_vertex: (n,) bool, which faces are in the one vertex case
one_edge: (n,) bool, which faces are in the one edge case
"""
signs_sorted = np.sort(signs, axis=1)
coded = np.zeros(len(signs_sorted), dtype=np.int8) + 14
for i in range(3):
coded += signs_sorted[:, i] << 3 - i
# one edge fully on the plane
# note that we are only accepting *one* of the on- edge cases,
# where the other vertex has a positive dot product (16) instead
# of both on- edge cases ([6,16])
# this is so that for regions that are co-planar with the the section plane
# we don't end up with an invalid boundary
key = np.zeros(29, dtype=np.bool)
key[16] = True
one_edge = key[coded]
# one vertex on plane, other two on different sides
key[:] = False
key[8] = True
one_vertex = key[coded]
# one vertex on one side of the plane, two on the other
key[:] = False
key[[4, 12]] = True
basic = key[coded]
return basic, one_vertex, one_edge
def handle_on_vertex(signs, faces, vertices):
# case where one vertex is on plane, two are on different sides
vertex_plane = faces[signs == 0]
edge_thru = faces[signs != 0].reshape((-1, 2))
point_intersect, valid = plane_lines(plane_origin,
plane_normal,
vertices[edge_thru.T],
line_segments=False)
lines = np.column_stack((vertices[vertex_plane[valid]],
point_intersect)).reshape((-1, 2, 3))
return lines
def handle_on_edge(signs, faces, vertices):
# case where two vertices are on the plane and one is off
edges = faces[signs == 0].reshape((-1, 2))
points = vertices[edges]
return points
def handle_basic(signs, faces, vertices):
# case where one vertex is on one side and two are on the other
unique_element = grouping.unique_value_in_row(
signs, unique=[-1, 1])
edges = np.column_stack(
(faces[unique_element],
faces[np.roll(unique_element, 1, axis=1)],
faces[unique_element],
faces[np.roll(unique_element, 2, axis=1)])).reshape(
(-1, 2))
intersections, valid = plane_lines(plane_origin,
plane_normal,
vertices[edges.T],
line_segments=False)
# since the data has been pre- culled, any invalid intersections at all
# means the culling was done incorrectly and thus things are
# mega-fucked
assert valid.all()
return intersections.reshape((-1, 2, 3))
# check input plane
plane_normal = np.asanyarray(plane_normal,
dtype=np.float64)
plane_origin = np.asanyarray(plane_origin,
dtype=np.float64)
if plane_origin.shape != (3,) or plane_normal.shape != (3,):
raise ValueError('Plane origin and normal must be (3,)!')
if cached_dots is not None:
dots = cached_dots
else:
# dot product of each vertex with the plane normal indexed by face
# so for each face the dot product of each vertex is a row
# shape is the same as mesh.faces (n,3)
dots = np.einsum('i,ij->j', plane_normal,
(mesh.vertices - plane_origin).T)[mesh.faces]
# sign of the dot product is -1, 0, or 1
# shape is the same as mesh.faces (n,3)
signs = np.zeros(mesh.faces.shape, dtype=np.int8)
signs[dots < -tol.merge] = -1
signs[dots > tol.merge] = 1
# figure out which triangles are in the cross section,
# and which of the three intersection cases they are in
cases = triangle_cases(signs)
# handlers for each case
handlers = (handle_basic,
handle_on_vertex,
handle_on_edge)
# the (m, 2, 3) line segments
lines = np.vstack([h(signs[c],
mesh.faces[c],
mesh.vertices)
for c, h in zip(cases, handlers)])
log.debug('mesh_cross_section found %i intersections',
len(lines))
if return_faces:
face_index = np.hstack([np.nonzero(c)[0] for c in cases])
return lines, face_index
return lines
def mesh_multiplane(mesh,
plane_origin,
plane_normal,
heights):
"""
A utility function for slicing a mesh by multiple
parallel planes, which caches the dot product operation.
Parameters
-------------
mesh : trimesh.Trimesh
Geometry to be sliced by planes
plane_normal : (3,) float
Normal vector of plane
plane_origin : (3,) float
Point on a plane
heights : (m,) float
Offset distances from plane to slice at
Returns
--------------
lines : (m,) sequence of (n, 2, 2) float
Lines in space for m planes
to_3D : (m, 4, 4) float
Transform to move each section back to 3D
face_index : (m,) sequence of (n,) int
Indexes of mesh.faces for each segment
"""
# check input plane
plane_normal = util.unitize(plane_normal)
plane_origin = np.asanyarray(plane_origin,
dtype=np.float64)
heights = np.asanyarray(heights, dtype=np.float64)
# dot product of every vertex with plane
vertex_dots = np.dot(plane_normal,
(mesh.vertices - plane_origin).T)
# reconstruct transforms for each 2D section
base_transform = geometry.plane_transform(origin=plane_origin,
normal=plane_normal)
base_transform = np.linalg.inv(base_transform)
# alter translation Z inside loop
translation = np.eye(4)
# store results
transforms = []
face_index = []
segments = []
# loop through user specified heights
for height in heights:
# offset the origin by the height
new_origin = plane_origin + (plane_normal * height)
# offset the dot products by height and index by faces
new_dots = (vertex_dots - height)[mesh.faces]
# run the intersection with the cached dot products
lines, index = mesh_plane(mesh=mesh,
plane_origin=new_origin,
plane_normal=plane_normal,
return_faces=True,
cached_dots=new_dots)
# get the transforms to 3D space and back
translation[2, 3] = height
to_3D = np.dot(base_transform, translation)
to_2D = np.linalg.inv(to_3D)
transforms.append(to_3D)
# transform points to 2D frame
lines_2D = transformations.transform_points(
lines.reshape((-1, 3)),
to_2D)
# if we didn't screw up the transform all
# of the Z values should be zero
assert np.allclose(lines_2D[:, 2], 0.0)
# reshape back in to lines and discard Z
lines_2D = lines_2D[:, :2].reshape((-1, 2, 2))
# store (n, 2, 2) float lines
segments.append(lines_2D)
# store (n,) int indexes of mesh.faces
face_index.append(face_index)
# (n, 4, 4) transforms from 2D to 3D
transforms = np.array(transforms, dtype=np.float64)
return segments, transforms, face_index
def plane_lines(plane_origin,
plane_normal,
endpoints,
line_segments=True):
"""
Calculate plane-line intersections
Parameters
---------
plane_origin : (3,) float
Point on plane
plane_normal : (3,) float
Plane normal vector
endpoints : (2, n, 3) float
Points defining lines to be tested
line_segments : bool
If True, only returns intersections as valid if
vertices from endpoints are on different sides
of the plane.
Returns
---------
intersections : (m, 3) float
Cartesian intersection points
valid : (n, 3) bool
Indicate whether a valid intersection exists
for each input line segment
"""
endpoints = np.asanyarray(endpoints)
plane_origin = np.asanyarray(plane_origin).reshape(3)
line_dir = util.unitize(endpoints[1] - endpoints[0])
plane_normal = util.unitize(np.asanyarray(plane_normal).reshape(3))
t = np.dot(plane_normal, (plane_origin - endpoints[0]).T)
b = np.dot(plane_normal, line_dir.T)
# If the plane normal and line direction are perpendicular, it means
# the vector is 'on plane', and there isn't a valid intersection.
# We discard on-plane vectors by checking that the dot product is nonzero
valid = np.abs(b) > tol.zero
if line_segments:
test = np.dot(plane_normal,
np.transpose(plane_origin - endpoints[1]))
different_sides = np.sign(t) != np.sign(test)
nonzero = np.logical_or(np.abs(t) > tol.zero,
np.abs(test) > tol.zero)
valid = np.logical_and(valid, different_sides)
valid = np.logical_and(valid, nonzero)
d = np.divide(t[valid], b[valid])
intersection = endpoints[0][valid]
intersection = intersection + np.reshape(d, (-1, 1)) * line_dir[valid]
return intersection, valid
def planes_lines(plane_origins,
plane_normals,
line_origins,
line_directions,
return_distance=False,
return_denom=False):
"""
Given one line per plane find the intersection points.
Parameters
-----------
plane_origins : (n,3) float
Point on each plane
plane_normals : (n,3) float
Normal vector of each plane
line_origins : (n,3) float
Point at origin of each line
line_directions : (n,3) float
Direction vector of each line
return_distance : bool
Return distance from origin to point also
return_denom : bool
Return denominator, so you can check for small values
Returns
----------
on_plane : (n,3) float
Points on specified planes
valid : (n,) bool
Did plane intersect line or not
distance : (n,) float
[OPTIONAL] Distance from point
denom : (n,) float
[OPTIONAL] Denominator
"""
# check input types
plane_origins = np.asanyarray(plane_origins, dtype=np.float64)
plane_normals = np.asanyarray(plane_normals, dtype=np.float64)
line_origins = np.asanyarray(line_origins, dtype=np.float64)
line_directions = np.asanyarray(line_directions, dtype=np.float64)
# vector from line to plane
origin_vectors = plane_origins - line_origins
projection_ori = util.diagonal_dot(origin_vectors, plane_normals)
projection_dir = util.diagonal_dot(line_directions, plane_normals)
valid = np.abs(projection_dir) > 1e-5
distance = np.divide(projection_ori[valid],
projection_dir[valid])
on_plane = line_directions[valid] * distance.reshape((-1, 1))
on_plane += line_origins[valid]
result = [on_plane, valid]
if return_distance:
result.append(distance)
if return_denom:
result.append(projection_dir)
return result
def slice_faces_plane(vertices,
faces,
plane_normal,
plane_origin,
cached_dots=None):
"""
Slice a mesh (given as a set of faces and vertices) with a plane, returning a
new mesh (again as a set of faces and vertices) that is the
portion of the original mesh to the positive normal side of the plane.
Parameters
---------
vertices : (n, 3) float
Vertices of source mesh to slice
faces : (n, 3) int
Faces of source mesh to slice
plane_normal : (3,) float
Normal vector of plane to intersect with mesh
plane_origin : (3,) float
Point on plane to intersect with mesh
cached_dots : (n, 3) float
If an external function has stored dot
products pass them here to avoid recomputing
Returns
----------
new_vertices : (n, 3) float
Vertices of sliced mesh
new_faces : (n, 3) int
Faces of sliced mesh
"""
if len(vertices) == 0:
return vertices, faces
if cached_dots is not None:
dots = cached_dots
else:
# dot product of each vertex with the plane normal indexed by face
# so for each face the dot product of each vertex is a row
# shape is the same as faces (n,3)
dots = np.einsum('i,ij->j', plane_normal,
(vertices - plane_origin).T)[faces]
# Find vertex orientations w.r.t. faces for all triangles:
# -1 -> vertex "inside" plane (positive normal direction)
# 0 -> vertex on plane
# 1 -> vertex "outside" plane (negative normal direction)
signs = np.zeros(faces.shape, dtype=np.int8)
signs[dots < -tol.merge] = 1
signs[dots > tol.merge] = -1
signs[np.logical_and(dots >= -tol.merge, dots <= tol.merge)] = 0
# Find all triangles that intersect this plane
# onedge <- indices of all triangles intersecting the plane
# inside <- indices of all triangles "inside" the plane (positive normal)
signs_sum = signs.sum(axis=1, dtype=np.int8)
signs_asum = np.abs(signs).sum(axis=1, dtype=np.int8)
# Cases:
# (0,0,0), (-1,0,0), (-1,-1,0), (-1,-1,-1) <- inside
# (1,0,0), (1,1,0), (1,1,1) <- outside
# (1,0,-1), (1,-1,-1), (1,1,-1) <- onedge
onedge = np.logical_and(signs_asum >= 2,
np.abs(signs_sum) <= 1)
inside = (signs_sum == -signs_asum)
# Automatically include all faces that are "inside"
new_faces = faces[inside]
# Separate faces on the edge into two cases: those which will become
# quads (two vertices inside plane) and those which will become triangles
# (one vertex inside plane)
triangles = vertices[faces]
cut_triangles = triangles[onedge]
cut_faces_quad = faces[np.logical_and(onedge, signs_sum < 0)]
cut_faces_tri = faces[np.logical_and(onedge, signs_sum >= 0)]
cut_signs_quad = signs[np.logical_and(onedge, signs_sum < 0)]
cut_signs_tri = signs[np.logical_and(onedge, signs_sum >= 0)]
# If no faces to cut, the surface is not in contact with this plane.
# Thus, return a mesh with only the inside faces
if len(cut_faces_quad) + len(cut_faces_tri) == 0:
if len(new_faces) == 0:
# if no new faces at all return empty arrays
empty = (np.zeros((0, 3), dtype=np.float64),
np.zeros((0, 3), dtype=np.int64))
return empty
# find the unique indices in the new faces
# using an integer-only unique function
unique, inverse = grouping.unique_bincount(new_faces.reshape(-1),
minlength=len(vertices),
return_inverse=True)
# use the unique indices for our final vertices and faces
final_vert = vertices[unique]
final_face = inverse.reshape((-1, 3))
return final_vert, final_face
# Extract the intersections of each triangle's edges with the plane
o = cut_triangles # origins
d = np.roll(o, -1, axis=1) - o # directions
num = (plane_origin - o).dot(plane_normal) # compute num/denom
denom = np.dot(d, plane_normal)
denom[denom == 0.0] = 1e-12 # prevent division by zero
dist = np.divide(num, denom)
# intersection points for each segment
int_points = np.einsum('ij,ijk->ijk', dist, d) + o
# Initialize the array of new vertices with the current vertices
new_vertices = vertices
# Handle the case where a new quad is formed by the intersection
# First, extract the intersection points belonging to a new quad
quad_int_points = int_points[(signs_sum < 0)[onedge], :, :]
num_quads = len(quad_int_points)
if num_quads > 0:
# Extract the vertex on the outside of the plane, then get the vertices
# (in CCW order of the inside vertices)
quad_int_inds = np.where(cut_signs_quad == 1)[1]
quad_int_verts = cut_faces_quad[
np.stack((range(num_quads), range(num_quads)), axis=1),
np.stack(((quad_int_inds + 1) % 3, (quad_int_inds + 2) % 3), axis=1)]
# Fill out new quad faces with the intersection points as vertices
new_quad_faces = np.append(
quad_int_verts,
np.arange(len(new_vertices),
len(new_vertices) +
2 * num_quads).reshape(num_quads, 2), axis=1)
# Extract correct intersection points from int_points and order them in
# the same way as they were added to faces
new_quad_vertices = quad_int_points[
np.stack((range(num_quads), range(num_quads)), axis=1),
np.stack((((quad_int_inds + 2) % 3).T, quad_int_inds.T),
axis=1), :].reshape(2 * num_quads, 3)
# Add new vertices to existing vertices, triangulate quads, and add the
# resulting triangles to the new faces
new_vertices = np.append(new_vertices, new_quad_vertices, axis=0)
new_tri_faces_from_quads = geometry.triangulate_quads(new_quad_faces)
new_faces = np.append(new_faces, new_tri_faces_from_quads, axis=0)
# Handle the case where a new triangle is formed by the intersection
# First, extract the intersection points belonging to a new triangle
tri_int_points = int_points[(signs_sum >= 0)[onedge], :, :]
num_tris = len(tri_int_points)
if num_tris > 0:
# Extract the single vertex for each triangle inside the plane and get the
# inside vertices (CCW order)
tri_int_inds = np.where(cut_signs_tri == -1)[1]
tri_int_verts = cut_faces_tri[range(
num_tris), tri_int_inds].reshape(num_tris, 1)
# Fill out new triangles with the intersection points as vertices
new_tri_faces = np.append(
tri_int_verts,
np.arange(len(new_vertices),
len(new_vertices) +
2 * num_tris).reshape(num_tris, 2),
axis=1)
# Extract correct intersection points and order them in the same way as
# the vertices were added to the faces
new_tri_vertices = tri_int_points[
np.stack((range(num_tris), range(num_tris)), axis=1),
np.stack((tri_int_inds.T, ((tri_int_inds + 2) % 3).T),
axis=1),
:].reshape(2 * num_tris, 3)
# Append new vertices and new faces
new_vertices = np.append(new_vertices, new_tri_vertices, axis=0)
new_faces = np.append(new_faces, new_tri_faces, axis=0)
# find the unique indices in the new faces
# using an integer-only unique function
unique, inverse = grouping.unique_bincount(new_faces.reshape(-1),
minlength=len(new_vertices),
return_inverse=True)
# use the unique indexes for our final vertex and faces
final_vert = new_vertices[unique]
final_face = inverse.reshape((-1, 3))
return final_vert, final_face
def slice_mesh_plane(mesh,
plane_normal,
plane_origin,
cap=False,
cached_dots=None,
**kwargs):
"""
Slice a mesh with a plane, returning a new mesh that is the
portion of the original mesh to the positive normal side of the plane
Parameters
---------
mesh : Trimesh object
Source mesh to slice
plane_normal : (3,) float
Normal vector of plane to intersect with mesh
plane_origin : (3,) float
Point on plane to intersect with mesh
cap : bool
If True, cap the result with a triangulated polygon
cached_dots : (n, 3) float
If an external function has stored dot
products pass them here to avoid recomputing
Returns
----------
new_mesh : Trimesh object
Sliced mesh
"""
# check input for none
if mesh is None:
return None
# avoid circular import
from .base import Trimesh
from .creation import triangulate_polygon
# check input plane
plane_normal = np.asanyarray(plane_normal,
dtype=np.float64)
plane_origin = np.asanyarray(plane_origin,
dtype=np.float64)
# check to make sure origins and normals have acceptable shape
shape_ok = ((plane_origin.shape == (3,) or
util.is_shape(plane_origin, (-1, 3))) and
(plane_normal.shape == (3,) or
util.is_shape(plane_normal, (-1, 3))) and
plane_origin.shape == plane_normal.shape)
if not shape_ok:
raise ValueError('plane origins and normals must be (n, 3)!')
# start with copy of original mesh, faces, and vertices
sliced_mesh = mesh.copy()
vertices = mesh.vertices.copy()
faces = mesh.faces.copy()
# slice away specified planes
for origin, normal in zip(plane_origin.reshape((-1, 3)),
plane_normal.reshape((-1, 3))):
# calculate dots here if not passed in to save time
# in case of cap
if cached_dots is None:
# dot product of each vertex with the plane normal indexed by face
# so for each face the dot product of each vertex is a row
# shape is the same as faces (n,3)
dots = np.einsum('i,ij->j', normal,
(vertices - origin).T)[faces]
else:
dots = cached_dots
# save the new vertices and faces
vertices, faces = slice_faces_plane(vertices=vertices,
faces=faces,
plane_normal=normal,
plane_origin=origin,
cached_dots=dots)
# check if cap arg specified
if cap:
# check if mesh is watertight (can't cap if not)
if not sliced_mesh.is_watertight:
raise ValueError('Input mesh must be watertight to cap slice')
path = sliced_mesh.section(plane_normal=normal,
plane_origin=origin,
cached_dots=dots)
# transform Path3D onto XY plane for triangulation
on_plane, to_3D = path.to_planar()
# triangulate each closed region of 2D cap
# without adding any new vertices
v, f = [], []
for polygon in on_plane.polygons_full:
t = triangulate_polygon(
polygon, triangle_args='p', allow_boundary_steiner=False)
v.append(t[0])
f.append(t[1])
# append regions and reindex
vf, ff = util.append_faces(v, f)
# make vertices 3D and transform back to mesh frame
vf = np.column_stack((vf, np.zeros(len(vf))))
vf = transformations.transform_points(vf, to_3D)
# add cap vertices and faces and reindex
vertices, faces = util.append_faces([vertices, vf], [faces, ff])
# Update mesh with cap (processing needed to merge vertices)
sliced_mesh = Trimesh(vertices=vertices, faces=faces)
vertices, faces = sliced_mesh.vertices.copy(), sliced_mesh.faces.copy()
# return the sliced mesh
return Trimesh(vertices=vertices, faces=faces, process=False)
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import eventlet
from oslo.config import cfg
import six
from stevedore import enabled
from climate.db import api as db_api
from climate.db import exceptions as db_ex
from climate import exceptions as common_ex
from climate import manager
from climate.manager import exceptions
from climate.notification import api as notification_api
from climate.openstack.common.gettextutils import _
from climate.openstack.common import log as logging
from climate.utils import service as service_utils
from climate.utils import trusts
manager_opts = [
cfg.ListOpt('plugins',
default=['dummy.vm.plugin'],
help='All plugins to use (one for every resource type to '
'support.)'),
cfg.IntOpt('notify_hours_before_lease_end',
default=48,
help='Number of hours prior to lease end in which a '
'notification of lease close to expire will be sent. If '
'this is set to 0, then this notification will '
'not be sent.')
]
CONF = cfg.CONF
CONF.register_opts(manager_opts, 'manager')
LOG = logging.getLogger(__name__)
LEASE_DATE_FORMAT = "%Y-%m-%d %H:%M"
class ManagerService(service_utils.RPCServer):
"""Service class for the climate-manager service.
Responsible for working with Climate DB, scheduling logic, running events,
working with plugins, etc.
"""
def __init__(self):
target = manager.get_target()
super(ManagerService, self).__init__(target)
self.plugins = self._get_plugins()
self.resource_actions = self._setup_actions()
def start(self):
super(ManagerService, self).start()
self.tg.add_timer(10, self._event)
def _get_plugins(self):
"""Return dict of resource-plugin class pairs."""
config_plugins = CONF.manager.plugins
plugins = {}
extension_manager = enabled.EnabledExtensionManager(
check_func=lambda ext: ext.name in config_plugins,
namespace='climate.resource.plugins',
invoke_on_load=False
)
for ext in extension_manager.extensions:
try:
plugin_obj = ext.plugin()
except Exception as e:
LOG.warning("Could not load {0} plugin "
"for resource type {1} '{2}'".format(
ext.name, ext.plugin.resource_type, e))
else:
if plugin_obj.resource_type in plugins:
msg = ("You have provided several plugins for "
"one resource type in configuration file. "
"Please set one plugin per resource type.")
raise exceptions.PluginConfigurationError(error=msg)
plugins[plugin_obj.resource_type] = plugin_obj
return plugins
def _setup_actions(self):
"""Setup actions for each resource type supported.
BasePlugin interface provides only on_start and on_end behaviour now.
If there are some configs needed by plugin, they should be returned
from get_plugin_opts method. These flags are registered in
[resource_type] group of configuration file.
"""
actions = {}
for resource_type, plugin in six.iteritems(self.plugins):
plugin = self.plugins[resource_type]
CONF.register_opts(plugin.get_plugin_opts(), group=resource_type)
actions[resource_type] = {}
actions[resource_type]['on_start'] = plugin.on_start
actions[resource_type]['on_end'] = plugin.on_end
plugin.setup(None)
return actions
@service_utils.with_empty_context
def _event(self):
"""Tries to commit event.
If there is an event in Climate DB to be done, do it and change its
status to 'DONE'.
"""
LOG.debug('Trying to get event from DB.')
event = db_api.event_get_first_sorted_by_filters(
sort_key='time',
sort_dir='asc',
filters={'status': 'UNDONE'}
)
if not event:
return
if event['time'] < datetime.datetime.utcnow():
db_api.event_update(event['id'], {'status': 'IN_PROGRESS'})
event_type = event['event_type']
event_fn = getattr(self, event_type, None)
if event_fn is None:
raise exceptions.EventError(error='Event type %s is not '
'supported' % event_type)
try:
eventlet.spawn_n(service_utils.with_empty_context(event_fn),
event['lease_id'], event['id'])
lease = db_api.lease_get(event['lease_id'])
with trusts.create_ctx_from_trust(lease['trust_id']) as ctx:
self._send_notification(lease,
ctx,
events=['event.%s' % event_type])
except Exception:
db_api.event_update(event['id'], {'status': 'ERROR'})
LOG.exception(_('Error occurred while event handling.'))
def _date_from_string(self, date_string, date_format=LEASE_DATE_FORMAT):
try:
date = datetime.datetime.strptime(date_string, date_format)
except ValueError:
raise exceptions.InvalidDate(date=date_string,
date_format=date_format)
return date
def get_lease(self, lease_id):
return db_api.lease_get(lease_id)
def list_leases(self, project_id=None):
return db_api.lease_list(project_id)
def create_lease(self, lease_values):
"""Create a lease with reservations.
Return either the model of created lease or None if any error.
"""
try:
trust_id = lease_values.pop('trust_id')
except KeyError:
raise exceptions.MissingTrustId()
# Remove and keep reservation values
reservations = lease_values.pop("reservations", [])
# Create the lease without the reservations
start_date = lease_values['start_date']
end_date = lease_values['end_date']
now = datetime.datetime.utcnow()
now = datetime.datetime(now.year,
now.month,
now.day,
now.hour,
now.minute)
if start_date == 'now':
start_date = now
else:
start_date = self._date_from_string(start_date)
end_date = self._date_from_string(end_date)
if start_date < now:
raise common_ex.NotAuthorized(
'Start date must later than current date')
with trusts.create_ctx_from_trust(trust_id) as ctx:
lease_values['user_id'] = ctx.user_id
lease_values['project_id'] = ctx.project_id
lease_values['start_date'] = start_date
lease_values['end_date'] = end_date
if not lease_values.get('events'):
lease_values['events'] = []
lease_values['events'].append({'event_type': 'start_lease',
'time': start_date,
'status': 'UNDONE'})
lease_values['events'].append({'event_type': 'end_lease',
'time': end_date,
'status': 'UNDONE'})
before_end_date = lease_values.get('before_end_notification', None)
if before_end_date:
# incoming param. Validation check
try:
before_end_date = self._date_from_string(
before_end_date)
self._check_date_within_lease_limits(before_end_date,
lease_values)
except common_ex.ClimateException as e:
LOG.error("Invalid before_end_date param. %s" % e.message)
raise e
elif CONF.manager.notify_hours_before_lease_end > 0:
delta = datetime.timedelta(
hours=CONF.manager.notify_hours_before_lease_end)
before_end_date = lease_values['end_date'] - delta
if before_end_date:
event = {'event_type': 'before_end_lease',
'status': 'UNDONE'}
lease_values['events'].append(event)
self._update_before_end_event_date(event, before_end_date,
lease_values)
try:
if trust_id:
lease_values.update({'trust_id': trust_id})
lease = db_api.lease_create(lease_values)
lease_id = lease['id']
except db_ex.ClimateDBDuplicateEntry:
LOG.exception('Cannot create a lease - duplicated lease name')
raise exceptions.LeaseNameAlreadyExists(
name=lease_values['name'])
except db_ex.ClimateDBException:
LOG.exception('Cannot create a lease')
raise
else:
try:
for reservation in reservations:
reservation['lease_id'] = lease['id']
reservation['start_date'] = lease['start_date']
reservation['end_date'] = lease['end_date']
resource_type = reservation['resource_type']
if resource_type in self.plugins:
self.plugins[resource_type].create_reservation(
reservation)
else:
raise exceptions.UnsupportedResourceType(
resource_type)
except (exceptions.UnsupportedResourceType,
common_ex.ClimateException):
LOG.exception("Failed to create reservation for a lease. "
"Rollback the lease and associated "
"reservations")
db_api.lease_destroy(lease_id)
raise
else:
lease = db_api.lease_get(lease['id'])
self._send_notification(lease, ctx, events=['create'])
return lease
def update_lease(self, lease_id, values):
if not values:
return db_api.lease_get(lease_id)
if len(values) == 1 and 'name' in values:
db_api.lease_update(lease_id, values)
return db_api.lease_get(lease_id)
lease = db_api.lease_get(lease_id)
start_date = values.get(
'start_date',
datetime.datetime.strftime(lease['start_date'], LEASE_DATE_FORMAT))
end_date = values.get(
'end_date',
datetime.datetime.strftime(lease['end_date'], LEASE_DATE_FORMAT))
before_end_date = values.get('before_end_notification', None)
now = datetime.datetime.utcnow()
now = datetime.datetime(now.year,
now.month,
now.day,
now.hour,
now.minute)
if start_date == 'now':
start_date = now
else:
start_date = self._date_from_string(start_date)
end_date = self._date_from_string(end_date)
values['start_date'] = start_date
values['end_date'] = end_date
if (lease['start_date'] < now and
values['start_date'] != lease['start_date']):
raise common_ex.NotAuthorized(
'Cannot modify the start date of already started leases')
if (lease['start_date'] > now and
values['start_date'] < now):
raise common_ex.NotAuthorized(
'Start date must later than current date')
if lease['end_date'] < now:
raise common_ex.NotAuthorized(
'Terminated leases can only be renamed')
if (values['end_date'] < now or
values['end_date'] < values['start_date']):
raise common_ex.NotAuthorized(
'End date must be later than current and start date')
with trusts.create_ctx_from_trust(lease['trust_id']):
if before_end_date:
try:
before_end_date = self._date_from_string(before_end_date)
self._check_date_within_lease_limits(before_end_date,
values)
except common_ex.ClimateException as e:
LOG.error("Invalid before_end_date param. %s" % e.message)
raise e
# TODO(frossigneux) rollback if an exception is raised
for reservation in (
db_api.reservation_get_all_by_lease_id(lease_id)):
reservation['start_date'] = values['start_date']
reservation['end_date'] = values['end_date']
resource_type = reservation['resource_type']
self.plugins[resource_type].update_reservation(
reservation['id'],
reservation)
event = db_api.event_get_first_sorted_by_filters(
'lease_id',
'asc',
{
'lease_id': lease_id,
'event_type': 'start_lease'
}
)
if not event:
raise common_ex.ClimateException(
'Start lease event not found')
db_api.event_update(event['id'], {'time': values['start_date']})
event = db_api.event_get_first_sorted_by_filters(
'lease_id',
'asc',
{
'lease_id': lease_id,
'event_type': 'end_lease'
}
)
if not event:
raise common_ex.ClimateException(
'End lease event not found')
db_api.event_update(event['id'], {'time': values['end_date']})
notifications = ['update']
self._update_before_end_event(lease, values, notifications,
before_end_date)
db_api.lease_update(lease_id, values)
lease = db_api.lease_get(lease_id)
with trusts.create_ctx_from_trust(lease['trust_id']) as ctx:
self._send_notification(lease, ctx, events=notifications)
return lease
def delete_lease(self, lease_id):
lease = self.get_lease(lease_id)
if (datetime.datetime.utcnow() < lease['start_date'] or
datetime.datetime.utcnow() > lease['end_date']):
with trusts.create_ctx_from_trust(lease['trust_id']) as ctx:
for reservation in lease['reservations']:
plugin = self.plugins[reservation['resource_type']]
try:
plugin.on_end(reservation['resource_id'])
except (db_ex.ClimateDBException, RuntimeError):
LOG.exception("Failed to delete a reservation "
"for a lease.")
raise
db_api.lease_destroy(lease_id)
self._send_notification(lease, ctx, events=['delete'])
else:
raise common_ex.NotAuthorized(
'Already started lease cannot be deleted')
def start_lease(self, lease_id, event_id):
lease = self.get_lease(lease_id)
with trusts.create_ctx_from_trust(lease['trust_id']):
self._basic_action(lease_id, event_id, 'on_start', 'active')
def end_lease(self, lease_id, event_id):
lease = self.get_lease(lease_id)
with trusts.create_ctx_from_trust(lease['trust_id']):
self._basic_action(lease_id, event_id, 'on_end', 'deleted')
def before_end_lease(self, lease_id, event_id):
db_api.event_update(event_id, {'status': 'DONE'})
def _basic_action(self, lease_id, event_id, action_time,
reservation_status=None):
"""Commits basic lease actions such as starting and ending."""
lease = self.get_lease(lease_id)
event_status = 'DONE'
for reservation in lease['reservations']:
resource_type = reservation['resource_type']
try:
self.resource_actions[resource_type][action_time](
reservation['resource_id']
)
except common_ex.ClimateException:
LOG.exception("Failed to execute action %(action)s "
"for lease %(lease)s"
% {
'action': action_time,
'lease': lease_id,
})
event_status = 'ERROR'
db_api.reservation_update(reservation['id'],
{'status': 'error'})
else:
if reservation_status is not None:
db_api.reservation_update(reservation['id'],
{'status': reservation_status})
db_api.event_update(event_id, {'status': event_status})
def _send_notification(self, lease, ctx, events=[]):
payload = notification_api.format_lease_payload(lease)
for event in events:
notification_api.send_lease_notification(ctx, payload,
'lease.%s' % event)
def _check_date_within_lease_limits(self, date, lease):
if not lease['start_date'] < date < lease['end_date']:
raise common_ex.NotAuthorized(
'Datetime is out of lease limits')
def _update_before_end_event_date(self, event, before_end_date, lease):
event['time'] = before_end_date
if event['time'] < lease['start_date']:
LOG.warning("New start_date greater than before_end_date. "
"Setting before_end_date to %s for lease %s"
% (lease['start_date'], lease.get('id',
lease.get('name'))))
event['time'] = lease['start_date']
def _update_before_end_event(self, old_lease, new_lease,
notifications, before_end_date=None):
event = db_api.event_get_first_sorted_by_filters(
'lease_id',
'asc',
{
'lease_id': old_lease['id'],
'event_type': 'before_end_lease'
}
)
if event:
# NOTE(casanch1) do nothing if the event does not exist.
# This is for backward compatibility
update_values = {}
if not before_end_date:
# before_end_date needs to be calculated based on
# previous delta
prev_before_end_delta = old_lease['end_date'] - event['time']
before_end_date = new_lease['end_date'] - prev_before_end_delta
self._update_before_end_event_date(update_values, before_end_date,
new_lease)
if event['status'] == 'DONE':
update_values['status'] = 'UNDONE'
notifications.append('event.before_end_lease.stop')
db_api.event_update(event['id'], update_values)
def __getattr__(self, name):
"""RPC Dispatcher for plugins methods."""
fn = None
try:
resource_type, method = name.rsplit(':', 1)
except ValueError:
# NOTE(sbauza) : the dispatcher needs to know which plugin to use,
# raising error if consequently not
raise AttributeError(name)
try:
try:
fn = getattr(self.plugins[resource_type], method)
except KeyError:
LOG.error("Plugin with resource type %s not found",
resource_type)
raise exceptions.UnsupportedResourceType(resource_type)
except AttributeError:
LOG.error("Plugin %s doesn't include method %s",
self.plugins[resource_type], method)
if fn is not None:
return fn
raise AttributeError(name)
|
|
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from OpenGL.arrays import vbo
import sys
sys.path.append("S3DGLPy")
from Primitives3D import *
from PolyMesh import *
from Cameras3D import *
from struct import *
from sys import exit, argv
import numpy as np
import scipy.io as sio
import os
import math
import time
from time import sleep
import numpy as np
import matplotlib.pyplot as plt
from ICP import *
def saveImageGL(mvcanvas, filename, w, h):
view = glGetIntegerv(GL_VIEWPORT)
pixels = glReadPixels(0, 0, view[2], view[3], GL_RGB,
GL_UNSIGNED_BYTE)
I = np.fromstring(pixels, dtype=np.dtype('<b'))
I = np.reshape(I, (h, w, 3))
for k in range(3):
I[:, :, k] = np.flipud(I[:, :, k])
plt.imshow(I/255.0)
plt.axis('off')
plt.savefig(filename, dpi = 150, bbox_inches='tight')
plt.clf()
class ICPViewerCanvas(object):
def __init__(self, xmesh, ymesh, MaxIters = 200, outputPrefix = ""):
#GLUT Variables
self.GLUTwindow_height = 800
self.GLUTwindow_width = 800
self.GLUTmouse = [0, 0]
self.GLUTButton = [0, 0, 0, 0, 0]
self.GLUTModifiers = 0
self.keys = {}
self.bbox = BBox3D()
self.xmesh = xmesh
self.ymesh = ymesh
self.displayMeshEdges = False
self.displayMeshFaces = True
self.displayMeshPoints = True
self.displayCorrespondences = True
self.currCx = np.array([[0, 0, 0]]).T #Current X Centroid
self.currCy = np.array([[0, 0, 0]]).T #Current Y Centroid
self.currRx = np.eye(3) #Current rotation
self.CxList = []
self.CyList = []
self.RxList = []
self.corridx = np.zeros([]) #Current correspondences
self.corridxbuff = None #Correspondence vertex buffer
self.MaxIters = MaxIters
self.outputPrefix = outputPrefix
#Animation variables
self.animating = False
self.frameIdx = 0
self.nearDist = 0.01
self.farDist = 1000.0
def GLUTResize(self, w, h):
glViewport(0, 0, w, h)
self.GLUTwindow_height = 800
self.GLUTwindow_width = 800
#Update camera parameters based on new size
self.camera = MousePolarCamera(w, h)
self.camera.centerOnBBox(self.bbox, math.pi/2, math.pi/2)
def handleMouseStuff(self, x, y):
y = self.GLUTwindow_height - y
self.GLUTmouse[0] = x
self.GLUTmouse[1] = y
def GLUTMouse(self, button, state, x, y):
buttonMap = {GLUT_LEFT_BUTTON:0, GLUT_MIDDLE_BUTTON:1, GLUT_RIGHT_BUTTON:2, 3:3, 4:4}
if state == GLUT_DOWN:
self.GLUTButton[buttonMap[button]] = 1
else:
self.GLUTButton[buttonMap[button]] = 0
self.handleMouseStuff(x, y)
glutPostRedisplay()
def GLUTMotion(self, x, y):
lastX = self.GLUTmouse[0]
lastY = self.GLUTmouse[1]
self.handleMouseStuff(x, y)
dX = self.GLUTmouse[0] - lastX
dY = self.GLUTmouse[1] - lastY
if self.GLUTButton[1] == 1:
self.camera.translate(dX, dY)
else:
zooming = False
if 'z' in self.keys:
#Want to zoom in as the mouse goes up
if self.keys['z']:
self.camera.zoom(-dY)
zooming = True
elif 'Z' in self.keys:
#Want to zoom in as the mouse goes up
if self.keys['Z']:
self.camera.zoom(-dY)
zooming = True
if not zooming:
self.camera.orbitLeftRight(dX)
self.camera.orbitUpDown(dY)
glutPostRedisplay()
def GLUTKeyboard(self, key, x, y):
self.handleMouseStuff(x, y)
self.keys[key] = True
glutPostRedisplay()
def GLUTKeyboardUp(self, key, x, y):
self.handleMouseStuff(x, y)
self.keys[key] = False
if key in ['x', 'X']:
self.viewXMesh()
elif key in ['y', 'Y']:
self.viewYMesh()
elif key in ['p', 'P']:
self.displayMeshPoints = not self.displayMeshPoints
elif key in ['e', 'E']:
self.displayMeshEdges = not self.displayMeshEdges
elif key in ['f', 'F']:
self.displayMeshFaces = not self.displayMeshFaces
elif key in ['c', 'C']:
self.displayCorrespondences = not self.displayCorrespondences
glutPostRedisplay()
def displayCorrespondencesCheckbox(self, evt):
self.displayCorrespondences = evt.Checked()
glutPostRedisplay()
def getBBoxs(self):
#Make Y bounding box
Vy = self.ymesh.getVerticesCols() - self.currCy
ybbox = BBox3D()
ybbox.fromPoints(Vy.T)
#Make X bounding box
Vx = self.xmesh.getVerticesCols() - self.currCx
Vx = self.currRx.dot(Vx)
xbbox = BBox3D()
xbbox.fromPoints(Vx.T)
bboxall = BBox3D()
bboxall.fromPoints(np.concatenate((Vx, Vy), 1).T)
self.farDist = bboxall.getDiagLength()*20
self.nearDist = self.farDist/10000.0
return (xbbox, ybbox)
#Move the camera to look at the Y mesh (default)
def viewYMesh(self):
(xbbox, ybbox) = self.getBBoxs()
self.camera.centerOnBBox(ybbox, theta = -math.pi/2, phi = math.pi/2)
glutPostRedisplay()
#Move the camera to look at the X mesh, taking into consideration
#current transformation
def viewXMesh(self):
(xbbox, ybbox) = self.getBBoxs()
self.camera.centerOnBBox(xbbox, theta = -math.pi/2, phi = math.pi/2)
glutPostRedisplay()
def GLUTSpecial(self, key, x, y):
self.handleMouseStuff(x, y)
self.keys[key] = True
glutPostRedisplay()
def GLUTSpecialUp(self, key, x, y):
self.handleMouseStuff(x, y)
self.keys[key] = False
glutPostRedisplay()
def updateCorrBuffer(self):
X = self.xmesh.VPos.T - self.currCx
X = self.currRx.dot(X)
Y = self.ymesh.VPos.T - self.currCy
idx = self.corridx
N = idx.size
C = np.zeros((N*2, 3))
C[0::2, :] = X.T
C[1::2, :] = Y.T[idx, :]
self.corridxbuff = vbo.VBO(np.array(C, dtype=np.float32))
#Call the students' centroid centering code and update the display
def centerOnCentroids(self):
self.currCx = getCentroid(self.xmesh.getVerticesCols())
self.currCy = getCentroid(self.ymesh.getVerticesCols())
if self.corridxbuff: #If correspondences have already been found
self.updateCorrBuffer()
self.viewYMesh()
def findCorrespondences(self):
X = self.xmesh.getVerticesCols()
Y = self.ymesh.getVerticesCols()
self.corridx = getCorrespondences(X, Y, self.currCx, self.currCy, self.currRx)
self.updateCorrBuffer()
glutPostRedisplay()
def doProcrustes(self):
if not self.corridxbuff:
wx.MessageBox('Must compute correspondences before doing procrustes!', 'Error', wx.OK | wx.ICON_ERROR)
return
X = self.xmesh.getVerticesCols()
Y = self.ymesh.getVerticesCols()
(self.currCx, self.currCy, self.currRx) = getProcrustesAlignment(X, Y, self.corridx)
self.updateCorrBuffer()
glutPostRedisplay()
def doICP(self):
X = self.xmesh.getVerticesCols()
Y = self.ymesh.getVerticesCols()
(self.CxList, self.CyList, self.RxList) = doICP(X, Y, self.MaxIters)
self.currRx = self.RxList[-1]
self.corridxbuff = None
self.viewYMesh()
def doAnimation(self):
if len(self.RxList) == 0:
wx.MessageBox('Must compute ICP before playing animation!', 'Error', wx.OK | wx.ICON_ERROR)
return
self.currRx = self.RxList[0]
self.animating = True
self.frameIdx = 0
glutPostRedisplay()
def drawPoints(self, mesh):
glEnableClientState(GL_VERTEX_ARRAY)
mesh.VPosVBO.bind()
glVertexPointerf(mesh.VPosVBO)
glDisable(GL_LIGHTING)
glPointSize(POINT_SIZE)
glDrawArrays(GL_POINTS, 0, mesh.VPos.shape[0])
mesh.VPosVBO.unbind()
glDisableClientState(GL_VERTEX_ARRAY)
def drawLines(self, buff, NLines):
glEnableClientState(GL_VERTEX_ARRAY)
buff.bind()
glVertexPointerf(buff)
glDisable(GL_LIGHTING)
glPointSize(POINT_SIZE)
glDrawArrays(GL_LINES, 0, NLines*2)
buff.unbind()
glDisableClientState(GL_VERTEX_ARRAY)
def setupPerspectiveMatrix(self, nearDist = -1, farDist = -1):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
if nearDist == -1:
farDist = self.camera.eye - self.bbox.getCenter()
farDist = np.sqrt(farDist.dot(farDist)) + self.bbox.getDiagLength()
nearDist = farDist/50.0
gluPerspective(180.0*self.camera.yfov/M_PI, float(self.GLUTwindow_width)/self.GLUTwindow_height, nearDist, farDist)
def repaint(self):
if np.isnan(self.camera.eye[0]):
#TODO: Patch for a strange bug that I can't quite track down
#where camera eye is initially NaNs (likely a race condition)
self.viewYMesh()
self.setupPerspectiveMatrix(self.nearDist, self.farDist)
glClearColor(0.0, 0.0, 0.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glEnable(GL_LIGHTING)
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, [0.8, 0.8, 0.8, 1.0])
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, [0.2, 0.2, 0.2, 1.0])
glMaterialfv(GL_FRONT_AND_BACK, GL_SHININESS, 64)
#glLightfv(GL_LIGHT1, GL_POSITION, np.array([0, 0, 1, 1]))
self.camera.gotoCameraFrame()
P = np.zeros(4)
P[0:3] = self.camera.eye
glLightfv(GL_LIGHT0, GL_POSITION, P)
#Draw the Y mesh
TYC = np.eye(4)
TYC[0:3, 3] = -self.currCy.flatten()
glPushMatrix()
glMultMatrixd((TYC.T).flatten())
self.ymesh.renderGL(self.displayMeshEdges, False, self.displayMeshFaces, False, False, True, False)
if self.displayMeshPoints:
glColor3f(1.0, 0, 0)
self.drawPoints(self.ymesh)
glPopMatrix()
#Draw the X mesh transformed
Rx = np.eye(4)
Rx[0:3, 0:3] = self.currRx
#Translation to move X to its centroid
TXC = np.eye(4)
TXC[0:3, 3] = -self.currCx.flatten()
T = Rx.dot(TXC)
glPushMatrix()
#Note: OpenGL is column major
glMultMatrixd((T.T).flatten())
self.xmesh.renderGL(self.displayMeshEdges, False, self.displayMeshFaces, False, False, True, False)
if self.displayMeshPoints:
glColor3f(0, 0, 1.0)
self.drawPoints(self.xmesh)
glPopMatrix()
if self.displayCorrespondences and self.corridxbuff:
self.drawLines(self.corridxbuff, self.xmesh.VPos.shape[0])
if self.animating:
if not(self.outputPrefix == ""):
#Ouptut screenshots
saveImageGL(self, "%s%i.png"%(self.outputPrefix, self.frameIdx), self.GLUTwindow_width, self.GLUTwindow_height)
self.frameIdx += 1
if self.frameIdx == len(self.RxList):
self.animating = False
else:
self.currCx = self.CxList[self.frameIdx]
self.currCy = self.CyList[self.frameIdx]
self.currRx = self.RxList[self.frameIdx]
glutPostRedisplay()
glutSwapBuffers()
def dmenu(self, item):
self.menudict[item]()
return 0
def initGL(self):
glutInit('')
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH)
glutInitWindowSize(self.GLUTwindow_width, self.GLUTwindow_height)
glutInitWindowPosition(50, 50)
glutCreateWindow('ICP Viewer')
glutReshapeFunc(self.GLUTResize)
glutDisplayFunc(self.repaint)
glutKeyboardFunc(self.GLUTKeyboard)
glutKeyboardUpFunc(self.GLUTKeyboardUp)
glutSpecialFunc(self.GLUTSpecial)
glutSpecialUpFunc(self.GLUTSpecialUp)
glutMouseFunc(self.GLUTMouse)
glutMotionFunc(self.GLUTMotion)
glLightModelfv(GL_LIGHT_MODEL_AMBIENT, [0.2, 0.2, 0.2, 1.0])
glLightModeli(GL_LIGHT_MODEL_LOCAL_VIEWER, GL_TRUE)
glLightfv(GL_LIGHT0, GL_DIFFUSE, [1.0, 1.0, 1.0, 1.0])
glEnable(GL_LIGHT0)
glLightfv(GL_LIGHT1, GL_DIFFUSE, [0.5, 0.5, 0.5, 1.0])
glEnable(GL_LIGHT1)
glEnable(GL_NORMALIZE)
glEnable(GL_LIGHTING)
glEnable(GL_DEPTH_TEST)
#Make menus
(VOID, CENTER_ON_CENTROIDS, FIND_CORRESPONDENCES, DO_PROCRUSTES, DO_ICP, ANIMATE_ICP) = (0, 1, 2, 3, 4, 5)
self.menudict = {CENTER_ON_CENTROIDS:self.centerOnCentroids, FIND_CORRESPONDENCES:self.findCorrespondences, DO_PROCRUSTES:self.doProcrustes, DO_ICP:self.doICP, ANIMATE_ICP:self.doAnimation}
stepByStepMenu = glutCreateMenu(self.dmenu)
glutAddMenuEntry("Center On Centroids", CENTER_ON_CENTROIDS)
glutAddMenuEntry("Find Correspondences", FIND_CORRESPONDENCES)
glutAddMenuEntry("Do Procrustes", DO_PROCRUSTES)
icpMenu = glutCreateMenu(self.dmenu)
glutAddMenuEntry("Compute ICP", DO_ICP)
glutAddMenuEntry("Animate ICP", ANIMATE_ICP)
globalMenu = glutCreateMenu(self.dmenu)
glutAddSubMenu("ICP Step By Step", stepByStepMenu)
glutAddSubMenu("ICP Algorithm Full", icpMenu)
glutAttachMenu(GLUT_RIGHT_BUTTON)
glutMainLoop()
if __name__ == '__main__':
if len(sys.argv) < 3:
print "Usage: python ICPViewerGLUT.py <mesh to align file> <target mesh file> [Maximum Number of Iterations] [Output File Prefix]"
sys.exit(0)
(xmeshfile, ymeshfile) = (sys.argv[1], sys.argv[2])
MaxIters = 200
if len(argv) > 3:
MaxIters = int(argv[3])
outputPrefix = ""
if len(argv) > 4:
outputPrefix = argv[4]
xmesh = PolyMesh()
print "Loading %s..."%xmeshfile
(xmesh.VPos, xmesh.VColors, xmesh.ITris) = loadOffFileExternal(xmeshfile)
xmesh.performDisplayUpdate(True)
ymesh = PolyMesh()
print "Loading %s..."%ymeshfile
(ymesh.VPos, ymesh.VColors, ymesh.ITris) = loadOffFileExternal(ymeshfile)
ymesh.performDisplayUpdate(True)
viewer = ICPViewerCanvas(xmesh, ymesh, MaxIters, outputPrefix)
viewer.initGL()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.api import base
from openstack_dashboard.api import cinder
from openstack_dashboard.api import keystone
from openstack_dashboard.api import nova
from openstack_dashboard.usage import quotas
INDEX_URL = "horizon:admin:projects:index"
ADD_USER_URL = "horizon:admin:projects:create_user"
PROJECT_GROUP_ENABLED = keystone.VERSIONS.active >= 3
PROJECT_USER_MEMBER_SLUG = "update_members"
PROJECT_GROUP_MEMBER_SLUG = "update_group_members"
class UpdateProjectQuotaAction(workflows.Action):
ifcb_label = _("Injected File Content Bytes")
metadata_items = forms.IntegerField(min_value=-1,
label=_("Metadata Items"))
cores = forms.IntegerField(min_value=-1, label=_("VCPUs"))
instances = forms.IntegerField(min_value=-1, label=_("Instances"))
injected_files = forms.IntegerField(min_value=-1,
label=_("Injected Files"))
injected_file_content_bytes = forms.IntegerField(min_value=-1,
label=ifcb_label)
volumes = forms.IntegerField(min_value=-1, label=_("Volumes"))
snapshots = forms.IntegerField(min_value=-1, label=_("Volume Snapshots"))
gigabytes = forms.IntegerField(
min_value=-1, label=_("Total Size of Volumes and Snapshots (GB)"))
ram = forms.IntegerField(min_value=-1, label=_("RAM (MB)"))
floating_ips = forms.IntegerField(min_value=-1, label=_("Floating IPs"))
fixed_ips = forms.IntegerField(min_value=-1, label=_("Fixed IPs"))
security_groups = forms.IntegerField(min_value=-1,
label=_("Security Groups"))
security_group_rules = forms.IntegerField(min_value=-1,
label=_("Security Group Rules"))
# Neutron
security_group = forms.IntegerField(min_value=-1,
label=_("Security Groups"))
security_group_rule = forms.IntegerField(min_value=-1,
label=_("Security Group Rules"))
floatingip = forms.IntegerField(min_value=-1, label=_("Floating IPs"))
network = forms.IntegerField(min_value=-1, label=_("Networks"))
port = forms.IntegerField(min_value=-1, label=_("Ports"))
router = forms.IntegerField(min_value=-1, label=_("Routers"))
subnet = forms.IntegerField(min_value=-1, label=_("Subnets"))
def __init__(self, request, *args, **kwargs):
super(UpdateProjectQuotaAction, self).__init__(request,
*args,
**kwargs)
disabled_quotas = quotas.get_disabled_quotas(request)
for field in disabled_quotas:
if field in self.fields:
self.fields[field].required = False
self.fields[field].widget = forms.HiddenInput()
class Meta:
name = _("Quota")
slug = 'update_quotas'
help_text = _("From here you can set quotas "
"(max limits) for the project.")
class UpdateProjectQuota(workflows.Step):
action_class = UpdateProjectQuotaAction
depends_on = ("project_id",)
contributes = quotas.QUOTA_FIELDS
class CreateProjectInfoAction(workflows.Action):
# Hide the domain_id and domain_name by default
domain_id = forms.CharField(label=_("Domain ID"),
required=False,
widget=forms.HiddenInput())
domain_name = forms.CharField(label=_("Domain Name"),
required=False,
widget=forms.HiddenInput())
name = forms.CharField(label=_("Name"),
max_length=64)
description = forms.CharField(widget=forms.widgets.Textarea(),
label=_("Description"),
required=False)
enabled = forms.BooleanField(label=_("Enabled"),
required=False,
initial=True)
def __init__(self, request, *args, **kwargs):
super(CreateProjectInfoAction, self).__init__(request,
*args,
**kwargs)
# For keystone V3, display the two fields in read-only
if keystone.VERSIONS.active >= 3:
readonlyInput = forms.TextInput(attrs={'readonly': 'readonly'})
self.fields["domain_id"].widget = readonlyInput
self.fields["domain_name"].widget = readonlyInput
class Meta:
name = _("Project Info")
help_text = _("From here you can create a new "
"project to organize users.")
class CreateProjectInfo(workflows.Step):
action_class = CreateProjectInfoAction
contributes = ("domain_id",
"domain_name",
"project_id",
"name",
"description",
"enabled")
class UpdateProjectMembersAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(UpdateProjectMembersAction, self).__init__(request,
*args,
**kwargs)
err_msg = _('Unable to retrieve user list. Please try again later.')
# Use the domain_id from the project
domain_id = self.initial.get("domain_id", None)
project_id = ''
if 'project_id' in self.initial:
project_id = self.initial['project_id']
# Get the default role
try:
default_role = api.keystone.get_default_role(self.request)
# Default role is necessary to add members to a project
if default_role is None:
default = getattr(settings,
"OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
msg = _('Could not find default role "%s" in Keystone') % \
default
raise exceptions.NotFound(msg)
except Exception:
exceptions.handle(self.request,
err_msg,
redirect=reverse(INDEX_URL))
default_role_name = self.get_default_role_field_name()
self.fields[default_role_name] = forms.CharField(required=False)
self.fields[default_role_name].initial = default_role.id
# Get list of available users
all_users = []
try:
all_users = api.keystone.user_list(request,
domain=domain_id)
except Exception:
exceptions.handle(request, err_msg)
users_list = [(user.id, user.name) for user in all_users]
# Get list of roles
role_list = []
try:
role_list = api.keystone.role_list(request)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for role in role_list:
field_name = self.get_member_field_name(role.id)
label = role.name
self.fields[field_name] = forms.MultipleChoiceField(required=False,
label=label)
self.fields[field_name].choices = users_list
self.fields[field_name].initial = []
# Figure out users & roles
if project_id:
try:
project_members = api.keystone.user_list(request,
project=project_id)
except Exception:
exceptions.handle(request, err_msg)
for user in project_members:
try:
roles = api.keystone.roles_for_user(self.request,
user.id,
project_id)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for role in roles:
field_name = self.get_member_field_name(role.id)
self.fields[field_name].initial.append(user.id)
class Meta:
name = _("Project Members")
slug = PROJECT_USER_MEMBER_SLUG
class UpdateProjectMembers(workflows.UpdateMembersStep):
action_class = UpdateProjectMembersAction
available_list_title = _("All Users")
members_list_title = _("Project Members")
no_available_text = _("No users found.")
no_members_text = _("No users.")
def contribute(self, data, context):
if data:
try:
roles = api.keystone.role_list(self.workflow.request)
except Exception:
exceptions.handle(self.workflow.request,
_('Unable to retrieve user list.'))
post = self.workflow.request.POST
for role in roles:
field = self.get_member_field_name(role.id)
context[field] = post.getlist(field)
return context
class UpdateProjectGroupsAction(workflows.MembershipAction):
def __init__(self, request, *args, **kwargs):
super(UpdateProjectGroupsAction, self).__init__(request,
*args,
**kwargs)
err_msg = _('Unable to retrieve group list. Please try again later.')
# Use the domain_id from the project
domain_id = self.initial.get("domain_id", None)
project_id = ''
if 'project_id' in self.initial:
project_id = self.initial['project_id']
# Get the default role
try:
default_role = api.keystone.get_default_role(self.request)
# Default role is necessary to add members to a project
if default_role is None:
default = getattr(settings,
"OPENSTACK_KEYSTONE_DEFAULT_ROLE", None)
msg = _('Could not find default role "%s" in Keystone') % \
default
raise exceptions.NotFound(msg)
except Exception:
exceptions.handle(self.request,
err_msg,
redirect=reverse(INDEX_URL))
default_role_name = self.get_default_role_field_name()
self.fields[default_role_name] = forms.CharField(required=False)
self.fields[default_role_name].initial = default_role.id
# Get list of available groups
all_groups = []
try:
all_groups = api.keystone.group_list(request,
domain=domain_id)
except Exception:
exceptions.handle(request, err_msg)
groups_list = [(group.id, group.name) for group in all_groups]
# Get list of roles
role_list = []
try:
role_list = api.keystone.role_list(request)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for role in role_list:
field_name = self.get_member_field_name(role.id)
label = role.name
self.fields[field_name] = forms.MultipleChoiceField(required=False,
label=label)
self.fields[field_name].choices = groups_list
self.fields[field_name].initial = []
# Figure out groups & roles
if project_id:
for group in all_groups:
try:
roles = api.keystone.roles_for_group(self.request,
group=group.id,
project=project_id)
except Exception:
exceptions.handle(request,
err_msg,
redirect=reverse(INDEX_URL))
for role in roles:
field_name = self.get_member_field_name(role.id)
self.fields[field_name].initial.append(group.id)
class Meta:
name = _("Project Groups")
slug = PROJECT_GROUP_MEMBER_SLUG
class UpdateProjectGroups(workflows.UpdateMembersStep):
action_class = UpdateProjectGroupsAction
available_list_title = _("All Groups")
members_list_title = _("Project Groups")
no_available_text = _("No groups found.")
no_members_text = _("No groups.")
def contribute(self, data, context):
if data:
try:
roles = api.keystone.role_list(self.workflow.request)
except Exception:
exceptions.handle(self.workflow.request,
_('Unable to retrieve role list.'))
post = self.workflow.request.POST
for role in roles:
field = self.get_member_field_name(role.id)
context[field] = post.getlist(field)
return context
class CreateProject(workflows.Workflow):
slug = "create_project"
name = _("Create Project")
finalize_button_name = _("Create Project")
success_message = _('Created new project "%s".')
failure_message = _('Unable to create project "%s".')
success_url = "horizon:admin:projects:index"
default_steps = (CreateProjectInfo,
UpdateProjectMembers,
UpdateProjectQuota)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
if PROJECT_GROUP_ENABLED:
self.default_steps = (CreateProjectInfo,
UpdateProjectMembers,
UpdateProjectGroups,
UpdateProjectQuota)
super(CreateProject, self).__init__(request=request,
context_seed=context_seed,
entry_point=entry_point,
*args,
**kwargs)
def format_status_message(self, message):
return message % self.context.get('name', 'unknown project')
def handle(self, request, data):
# create the project
domain_id = data['domain_id']
try:
desc = data['description']
self.object = api.keystone.tenant_create(request,
name=data['name'],
description=desc,
enabled=data['enabled'],
domain=domain_id)
except Exception:
exceptions.handle(request, ignore=True)
return False
project_id = self.object.id
# update project members
users_to_add = 0
try:
available_roles = api.keystone.role_list(request)
member_step = self.get_step(PROJECT_USER_MEMBER_SLUG)
# count how many users are to be added
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
users_to_add += len(role_list)
# add new users to project
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
users_added = 0
for user in role_list:
api.keystone.add_tenant_user_role(request,
project=project_id,
user=user,
role=role.id)
users_added += 1
users_to_add -= users_added
except Exception:
if PROJECT_GROUP_ENABLED:
group_msg = _(", add project groups")
else:
group_msg = ""
exceptions.handle(request, _('Failed to add %(users_to_add)s '
'project members%(group_msg)s and '
'set project quotas.')
% {'users_to_add': users_to_add,
'group_msg': group_msg})
if PROJECT_GROUP_ENABLED:
# update project groups
groups_to_add = 0
try:
available_roles = api.keystone.role_list(request)
member_step = self.get_step(PROJECT_GROUP_MEMBER_SLUG)
# count how many groups are to be added
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
groups_to_add += len(role_list)
# add new groups to project
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
role_list = data[field_name]
groups_added = 0
for group in role_list:
api.keystone.add_group_role(request,
role=role.id,
group=group,
project=project_id)
groups_added += 1
groups_to_add -= groups_added
except Exception:
exceptions.handle(request, _('Failed to add %s project groups '
'and update project quotas.'
% groups_to_add))
# Update the project quota.
nova_data = dict(
[(key, data[key]) for key in quotas.NOVA_QUOTA_FIELDS])
try:
nova.tenant_quota_update(request, project_id, **nova_data)
if base.is_service_enabled(request, 'volume'):
cinder_data = dict([(key, data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
cinder.tenant_quota_update(request,
project_id,
**cinder_data)
if api.base.is_service_enabled(request, 'network') and \
api.neutron.is_quotas_extension_supported(request):
neutron_data = dict([(key, data[key]) for key in
quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.tenant_quota_update(request,
project_id,
**neutron_data)
except Exception:
exceptions.handle(request, _('Unable to set project quotas.'))
return True
class UpdateProjectInfoAction(CreateProjectInfoAction):
enabled = forms.BooleanField(required=False, label=_("Enabled"))
class Meta:
name = _("Project Info")
slug = 'update_info'
help_text = _("From here you can edit the project details.")
class UpdateProjectInfo(workflows.Step):
action_class = UpdateProjectInfoAction
depends_on = ("project_id",)
contributes = ("domain_id",
"domain_name",
"name",
"description",
"enabled")
class UpdateProject(workflows.Workflow):
slug = "update_project"
name = _("Edit Project")
finalize_button_name = _("Save")
success_message = _('Modified project "%s".')
failure_message = _('Unable to modify project "%s".')
success_url = "horizon:admin:projects:index"
default_steps = (UpdateProjectInfo,
UpdateProjectMembers,
UpdateProjectQuota)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
if PROJECT_GROUP_ENABLED:
self.default_steps = (UpdateProjectInfo,
UpdateProjectMembers,
UpdateProjectGroups,
UpdateProjectQuota)
super(UpdateProject, self).__init__(request=request,
context_seed=context_seed,
entry_point=entry_point,
*args,
**kwargs)
def format_status_message(self, message):
return message % self.context.get('name', 'unknown project')
def handle(self, request, data):
# FIXME(gabriel): This should be refactored to use Python's built-in
# sets and do this all in a single "roles to add" and "roles to remove"
# pass instead of the multi-pass thing happening now.
project_id = data['project_id']
domain_id = ''
# update project info
try:
project = api.keystone.tenant_update(
request,
project_id,
name=data['name'],
description=data['description'],
enabled=data['enabled'])
# Use the domain_id from the project if available
domain_id = getattr(project, "domain_id", None)
except Exception:
exceptions.handle(request, ignore=True)
return False
# update project members
users_to_modify = 0
# Project-user member step
member_step = self.get_step(PROJECT_USER_MEMBER_SLUG)
try:
# Get our role options
available_roles = api.keystone.role_list(request)
# Get the users currently associated with this project so we
# can diff against it.
project_members = api.keystone.user_list(request,
project=project_id)
users_to_modify = len(project_members)
for user in project_members:
# Check if there have been any changes in the roles of
# Existing project members.
current_roles = api.keystone.roles_for_user(self.request,
user.id,
project_id)
current_role_ids = [role.id for role in current_roles]
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Check if the user is in the list of users with this role.
if user.id in data[field_name]:
# Add it if necessary
if role.id not in current_role_ids:
# user role has changed
api.keystone.add_tenant_user_role(
request,
project=project_id,
user=user.id,
role=role.id)
else:
# User role is unchanged, so remove it from the
# remaining roles list to avoid removing it later.
index = current_role_ids.index(role.id)
current_role_ids.pop(index)
# Prevent admins from doing stupid things to themselves.
is_current_user = user.id == request.user.id
is_current_project = project_id == request.user.tenant_id
admin_roles = [role for role in current_roles
if role.name.lower() == 'admin']
if len(admin_roles):
removing_admin = any([role.id in current_role_ids
for role in admin_roles])
else:
removing_admin = False
if is_current_user and is_current_project and removing_admin:
# Cannot remove "admin" role on current(admin) project
msg = _('You cannot revoke your administrative privileges '
'from the project you are currently logged into. '
'Please switch to another project with '
'administrative privileges or remove the '
'administrative role manually via the CLI.')
messages.warning(request, msg)
# Otherwise go through and revoke any removed roles.
else:
for id_to_delete in current_role_ids:
api.keystone.remove_tenant_user_role(
request,
project=project_id,
user=user.id,
role=id_to_delete)
users_to_modify -= 1
# Grant new roles on the project.
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Count how many users may be added for exception handling.
users_to_modify += len(data[field_name])
for role in available_roles:
users_added = 0
field_name = member_step.get_member_field_name(role.id)
for user_id in data[field_name]:
if not filter(lambda x: user_id == x.id, project_members):
api.keystone.add_tenant_user_role(request,
project=project_id,
user=user_id,
role=role.id)
users_added += 1
users_to_modify -= users_added
except Exception:
if PROJECT_GROUP_ENABLED:
group_msg = _(", update project groups")
else:
group_msg = ""
exceptions.handle(request, _('Failed to modify %(users_to_modify)s'
' project members%(group_msg)s and '
'update project quotas.')
% {'users_to_modify': users_to_modify,
'group_msg': group_msg})
return True
if PROJECT_GROUP_ENABLED:
# update project groups
groups_to_modify = 0
member_step = self.get_step(PROJECT_GROUP_MEMBER_SLUG)
try:
# Get the groups currently associated with this project so we
# can diff against it.
project_groups = api.keystone.group_list(request,
domain=domain_id,
project=project_id)
groups_to_modify = len(project_groups)
for group in project_groups:
# Check if there have been any changes in the roles of
# Existing project members.
current_roles = api.keystone.roles_for_group(
self.request,
group=group.id,
project=project_id)
current_role_ids = [role.id for role in current_roles]
for role in available_roles:
# Check if the group is in the list of groups with
# this role.
field_name = member_step.get_member_field_name(role.id)
if group.id in data[field_name]:
# Add it if necessary
if role.id not in current_role_ids:
# group role has changed
api.keystone.add_group_role(
request,
role=role.id,
group=group.id,
project=project_id)
else:
# Group role is unchanged, so remove it from
# the remaining roles list to avoid removing it
# later.
index = current_role_ids.index(role.id)
current_role_ids.pop(index)
# Revoke any removed roles.
for id_to_delete in current_role_ids:
api.keystone.remove_group_role(request,
role=id_to_delete,
group=group.id,
project=project_id)
groups_to_modify -= 1
# Grant new roles on the project.
for role in available_roles:
field_name = member_step.get_member_field_name(role.id)
# Count how many groups may be added for error handling.
groups_to_modify += len(data[field_name])
for role in available_roles:
groups_added = 0
field_name = member_step.get_member_field_name(role.id)
for group_id in data[field_name]:
if not filter(lambda x: group_id == x.id,
project_groups):
api.keystone.add_group_role(request,
role=role.id,
group=group_id,
project=project_id)
groups_added += 1
groups_to_modify -= groups_added
except Exception:
exceptions.handle(request, _('Failed to modify %s project '
'members, update project groups '
'and update project quotas.'
% groups_to_modify))
return True
# update the project quota
nova_data = dict(
[(key, data[key]) for key in quotas.NOVA_QUOTA_FIELDS])
try:
nova.tenant_quota_update(request,
project_id,
**nova_data)
if base.is_service_enabled(request, 'volume'):
cinder_data = dict([(key, data[key]) for key in
quotas.CINDER_QUOTA_FIELDS])
cinder.tenant_quota_update(request,
project_id,
**cinder_data)
if api.base.is_service_enabled(request, 'network') and \
api.neutron.is_quotas_extension_supported(request):
neutron_data = dict([(key, data[key]) for key in
quotas.NEUTRON_QUOTA_FIELDS])
api.neutron.tenant_quota_update(request,
project_id,
**neutron_data)
return True
except Exception:
exceptions.handle(request, _('Modified project information and '
'members, but unable to modify '
'project quotas.'))
return True
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.data.prefetch_queue."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.slim.python.slim.data import prefetch_queue
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner_impl
class PrefetchQueueTest(test.TestCase):
def testOneThread(self):
with self.test_session() as sess:
batch_size = 10
image_size = 32
num_batches = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = random_ops.random_normal(
[image_size, image_size, 3], dtype=dtypes.float32, name='images')
label = random_ops.random_uniform(
[1], 0, 10, dtype=dtypes.int32, name='labels')
batches = input_lib.batch(
[counter, image, label], batch_size=batch_size, num_threads=1)
batches = prefetch_queue.prefetch_queue(batches).dequeue()
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
for i in range(num_batches):
results = sess.run(batches)
self.assertAllEqual(results[0],
np.arange(i * batch_size, (i + 1) * batch_size))
self.assertEquals(results[1].shape,
(batch_size, image_size, image_size, 3))
self.assertEquals(results[2].shape, (batch_size, 1))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testMultiThread(self):
with self.test_session() as sess:
batch_size = 10
image_size = 32
num_batches = 5
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = random_ops.random_normal(
[image_size, image_size, 3], dtype=dtypes.float32, name='images')
label = random_ops.random_uniform(
[1], 0, 10, dtype=dtypes.int32, name='labels')
batches = input_lib.batch(
[counter, image, label], batch_size=batch_size, num_threads=4)
batches = prefetch_queue.prefetch_queue(batches).dequeue()
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
value_counter = []
for _ in range(num_batches):
results = sess.run(batches)
value_counter.append(results[0])
self.assertEqual(results[1].shape,
(batch_size, image_size, image_size, 3))
self.assertEqual(results[2].shape, (batch_size, 1))
self.assertAllEqual(
np.sort(np.concatenate(value_counter)),
np.arange(0, num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testMultipleDequeue(self):
with self.test_session() as sess:
batch_size = 10
image_size = 32
num_batches = 4
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(num_batches * batch_size)
image = random_ops.random_normal(
[image_size, image_size, 3], dtype=dtypes.float32, name='images')
label = random_ops.random_uniform(
[1], 0, 10, dtype=dtypes.int32, name='labels')
batches = input_lib.batch(
[counter, image, label], batch_size=batch_size, num_threads=4)
batcher = prefetch_queue.prefetch_queue(batches)
batches_list = [batcher.dequeue() for _ in range(2)]
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
value_counter = []
for _ in range(int(num_batches / 2)):
for batches in batches_list:
results = sess.run(batches)
value_counter.append(results[0])
self.assertEquals(results[1].shape,
(batch_size, image_size, image_size, 3))
self.assertEquals(results[2].shape, (batch_size, 1))
self.assertAllEqual(
np.sort(np.concatenate(value_counter)),
np.arange(0, num_batches * batch_size))
# Reached the limit.
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testDynamicPad_failure(self):
with ops.Graph().as_default():
variable_tensor = array_ops.placeholder(dtypes.int32, shape=[None, 3])
with self.assertRaisesRegexp(ValueError, 'shapes must be fully defined'):
prefetch_queue.prefetch_queue([variable_tensor])
def testDynamicPad(self):
with self.test_session() as sess:
# Create 3 tensors of variable but compatible shapes.
var_shape = [None, 2]
p1 = constant_op.constant([[1, 2], [3, 4]])
p1.set_shape(var_shape)
p2 = constant_op.constant([[5, 6], [7, 8], [9, 10]])
p2.set_shape(var_shape)
p3 = constant_op.constant([[11, 12]])
p3.set_shape(var_shape)
batch = [p1, p2, p3]
batch_size = len(batch)
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(batch_size)
# Create a PaddingFIFOQueue to enqueue these tensors.
q = data_flow_ops.PaddingFIFOQueue(
capacity=10, dtypes=[dtypes.int32], shapes=[var_shape])
for tensor in [p1, p2, p3]:
q.enqueue([tensor]).run()
# Dequeue from the queue and batch them using batch().
batches = input_lib.batch([q.dequeue(), counter], batch_size=batch_size,
num_threads=1, dynamic_pad=True)
self.assertEqual([batch_size, None, 2], batches[0].shape.as_list())
# Finally, assemble them into prefetch_queue with dynamic_pad.
batcher = prefetch_queue.prefetch_queue(batches, dynamic_pad=True)
batches = batcher.dequeue()
self.assertEqual([batch_size, None, 2], batches[0].shape.as_list())
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
values, _ = sess.run(batches)
# We enqueued 3 tensors of [None, 2] shapes, so using dynamic_pad
# they should be padded to the fixed size [3, 3, 2], where 3
# is the maximum length of the batch.
self.assertTrue(np.array_equal(
np.array([[[1, 2], [3, 4], [0, 0]],
[[5, 6], [7, 8], [9, 10]],
[[11, 12], [0, 0], [0, 0]]]),
values))
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()
def testDictConstruction(self):
with ops.Graph().as_default():
batches = {
'first': constant_op.constant([1]),
'second': constant_op.constant([2.0, 2.1])
}
prefetcher = prefetch_queue.prefetch_queue(batches)
dequeued = prefetcher.dequeue()
self.assertTrue(isinstance(dequeued, dict))
self.assertEqual(2, len(dequeued))
self.assertEqual(dtypes.int32, dequeued['first'].dtype)
self.assertEqual(dtypes.float32, dequeued['second'].dtype)
if __name__ == '__main__':
test.main()
|
|
import os
from six.moves.configparser import ConfigParser, NoSectionError
from six.moves import urllib
from conans.errors import ConanException
from conans.model.env_info import unquote
from conans.paths import conan_expand_user, DEFAULT_PROFILE_NAME
from conans.util.env_reader import get_env
from conans.util.files import load
MIN_SERVER_COMPATIBLE_VERSION = '0.12.0'
default_settings_yml = """
# Only for cross building, 'os_build/arch_build' is the system that runs Conan
os_build: [Windows, WindowsStore, Linux, Macos, FreeBSD, SunOS]
arch_build: [x86, x86_64]
# Only for building cross compilation tools, 'os_target/arch_target' is the system for
# which the tools generate code
os_target: [Windows, Linux, Macos, Android, iOS, watchOS, tvOS, FreeBSD, SunOS, Arduino]
arch_target: [x86, x86_64, ppc64le, ppc64, armv6, armv7, armv7hf, armv8, sparc, sparcv9, mips, mips64, avr, armv7s, armv7k]
# Rest of the settings are "host" settings:
# - For native building/cross building: Where the library/program will run.
# - For building cross compilation tools: Where the cross compiler will run.
os:
Windows:
subsystem: [None, cygwin, msys, msys2, wsl]
WindowsStore:
version: ["8.1", "10.0"]
Linux:
Macos:
Android:
api_level: ANY
iOS:
version: ["7.0", "7.1", "8.0", "8.1", "8.2", "8.3", "9.0", "9.1", "9.2", "9.3", "10.0", "10.1", "10.2", "10.3", "11.0"]
watchOS:
version: ["4.0"]
tvOS:
version: ["11.0"]
FreeBSD:
SunOS:
Arduino:
board: ANY
arch: [x86, x86_64, ppc64le, ppc64, armv6, armv7, armv7hf, armv8, sparc, sparcv9, mips, mips64, avr, armv7s, armv7k]
compiler:
sun-cc:
version: ["5.10", "5.11", "5.12", "5.13", "5.14"]
threads: [None, posix]
libcxx: [libCstd, libstdcxx, libstlport, libstdc++]
gcc:
version: ["4.1", "4.4", "4.5", "4.6", "4.7", "4.8", "4.9",
"5", "5.1", "5.2", "5.3", "5.4",
"6", "6.1", "6.2", "6.3", "6.4",
"7", "7.1", "7.2"]
libcxx: [libstdc++, libstdc++11]
threads: [None, posix, win32] # Windows MinGW
exception: [None, dwarf2, sjlj, seh] # Windows MinGW
Visual Studio:
runtime: [MD, MT, MTd, MDd]
version: ["8", "9", "10", "11", "12", "14", "15"]
toolset: [None, v90, v100, v110, v110_xp, v120, v120_xp, v140, v140_xp, v140_clang_c2, LLVM-vs2014, LLVM-vs2014_xp, v141, v141_xp, v141_clang_c2]
clang:
version: ["3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9", "4.0", "5.0"]
libcxx: [libstdc++, libstdc++11, libc++]
apple-clang:
version: ["5.0", "5.1", "6.0", "6.1", "7.0", "7.3", "8.0", "8.1", "9.0"]
libcxx: [libstdc++, libc++]
build_type: [None, Debug, Release]
"""
default_client_conf = """
[log]
run_to_output = True # environment CONAN_LOG_RUN_TO_OUTPUT
run_to_file = False # environment CONAN_LOG_RUN_TO_FILE
level = 50 # environment CONAN_LOGGING_LEVEL
# trace_file = # environment CONAN_TRACE_FILE
print_run_commands = False # environment CONAN_PRINT_RUN_COMMANDS
[general]
default_profile = %s
compression_level = 9 # environment CONAN_COMPRESSION_LEVEL
sysrequires_sudo = True # environment CONAN_SYSREQUIRES_SUDO
# verbose_traceback = False # environment CONAN_VERBOSE_TRACEBACK
# bash_path = "" # environment CONAN_BASH_PATH (only windows)
# recipe_linter = False # environment CONAN_RECIPE_LINTER
# read_only_cache = True # environment CONAN_READ_ONLY_CACHE
# pylintrc = path/to/pylintrc_file # environment CONAN_PYLINTRC
# cache_no_locks = True
# user_home_short = your_path # environment CONAN_USER_HOME_SHORT
# conan_make_program = make # environment CONAN_MAKE_PROGRAM (overrides the make program used in AutoToolsBuildEnvironment.make)
# cmake_generator # environment CONAN_CMAKE_GENERATOR
# http://www.vtk.org/Wiki/CMake_Cross_Compiling
# cmake_toolchain_file # environment CONAN_CMAKE_TOOLCHAIN_FILE
# cmake_system_name # environment CONAN_CMAKE_SYSTEM_NAME
# cmake_system_version # environment CONAN_CMAKE_SYSTEM_VERSION
# cmake_system_processor # environment CONAN_CMAKE_SYSTEM_PROCESSOR
# cmake_find_root_path # environment CONAN_CMAKE_FIND_ROOT_PATH
# cmake_find_root_path_mode_program # environment CONAN_CMAKE_FIND_ROOT_PATH_MODE_PROGRAM
# cmake_find_root_path_mode_library # environment CONAN_CMAKE_FIND_ROOT_PATH_MODE_LIBRARY
# cmake_find_root_path_mode_include # environment CONAN_CMAKE_FIND_ROOT_PATH_MODE_INCLUDE
# cpu_count = 1 # environment CONAN_CPU_COUNT
[storage]
# This is the default path, but you can write your own. It must be an absolute path or a
# path beginning with "~" (if the environment var CONAN_USER_HOME is specified, this directory, even
# with "~/", will be relative to the conan user home, not to the system user home)
path = ~/.conan/data
[proxies]
# Empty section will try to use system proxies.
# If don't want proxy at all, remove section [proxies]
# As documented in http://docs.python-requests.org/en/latest/user/advanced/#proxies
# http = http://user:[email protected]:3128/
# http = http://10.10.1.10:3128
# https = http://10.10.1.10:1080
# Default settings now declared in the default profile
""" % DEFAULT_PROFILE_NAME
class ConanClientConfigParser(ConfigParser, object):
def __init__(self, filename):
ConfigParser.__init__(self)
self.read(filename)
self.filename = filename
# So keys are not converted to lowercase, we override the default optionxform
optionxform = str
@property
def env_vars(self):
ret = {"CONAN_LOG_RUN_TO_OUTPUT": self._env_c("log.run_to_output", "CONAN_LOG_RUN_TO_OUTPUT", "True"),
"CONAN_LOG_RUN_TO_FILE": self._env_c("log.run_to_file", "CONAN_LOG_RUN_TO_FILE", "False"),
"CONAN_LOGGING_LEVEL": self._env_c("log.level", "CONAN_LOGGING_LEVEL", "50"),
"CONAN_TRACE_FILE": self._env_c("log.trace_file", "CONAN_TRACE_FILE", None),
"CONAN_PRINT_RUN_COMMANDS": self._env_c("log.print_run_commands", "CONAN_PRINT_RUN_COMMANDS", "False"),
"CONAN_COMPRESSION_LEVEL": self._env_c("general.compression_level", "CONAN_COMPRESSION_LEVEL", "9"),
"CONAN_PYLINTRC": self._env_c("general.pylintrc", "CONAN_PYLINTRC", None),
"CONAN_PYLINT_WERR": self._env_c("general.pylint_werr", "CONAN_PYLINT_WERR", None),
"CONAN_SYSREQUIRES_SUDO": self._env_c("general.sysrequires_sudo", "CONAN_SYSREQUIRES_SUDO", "False"),
"CONAN_RECIPE_LINTER": self._env_c("general.recipe_linter", "CONAN_RECIPE_LINTER", "True"),
"CONAN_CPU_COUNT": self._env_c("general.cpu_count", "CONAN_CPU_COUNT", None),
"CONAN_READ_ONLY_CACHE": self._env_c("general.read_only_cache", "CONAN_READ_ONLY_CACHE", None),
"CONAN_USER_HOME_SHORT": self._env_c("general.user_home_short", "CONAN_USER_HOME_SHORT", None),
"CONAN_VERBOSE_TRACEBACK": self._env_c("general.verbose_traceback", "CONAN_VERBOSE_TRACEBACK", None),
# http://www.vtk.org/Wiki/CMake_Cross_Compiling
"CONAN_CMAKE_GENERATOR": self._env_c("general.cmake_generator", "CONAN_CMAKE_GENERATOR", None),
"CONAN_CMAKE_TOOLCHAIN_FILE": self._env_c("general.cmake_toolchain_file", "CONAN_CMAKE_TOOLCHAIN_FILE", None),
"CONAN_CMAKE_SYSTEM_NAME": self._env_c("general.cmake_system_name", "CONAN_CMAKE_SYSTEM_NAME", None),
"CONAN_CMAKE_SYSTEM_VERSION": self._env_c("general.cmake_system_version", "CONAN_CMAKE_SYSTEM_VERSION", None),
"CONAN_CMAKE_SYSTEM_PROCESSOR": self._env_c("general.cmake_system_processor",
"CONAN_CMAKE_SYSTEM_PROCESSOR",
None),
"CONAN_CMAKE_FIND_ROOT_PATH": self._env_c("general.cmake_find_root_path",
"CONAN_CMAKE_FIND_ROOT_PATH",
None),
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_PROGRAM": self._env_c("general.cmake_find_root_path_mode_program",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_PROGRAM",
None),
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_LIBRARY": self._env_c("general.cmake_find_root_path_mode_library",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_LIBRARY",
None),
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_INCLUDE": self._env_c("general.cmake_find_root_path_mode_include",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_INCLUDE",
None),
"CONAN_BASH_PATH": self._env_c("general.bash_path", "CONAN_BASH_PATH", None),
"CONAN_MAKE_PROGRAM": self._env_c("general.conan_make_program", "CONAN_MAKE_PROGRAM", None),
}
# Filter None values
return {name: value for name, value in ret.items() if value is not None}
def _env_c(self, var_name, env_var_name, default_value):
env = os.environ.get(env_var_name, None)
if env is not None:
return env
try:
return unquote(self.get_item(var_name))
except ConanException:
return default_value
def get_item(self, item):
if not item:
return load(self.filename)
tokens = item.split(".", 1)
section_name = tokens[0]
try:
section = self.items(section_name)
except NoSectionError:
raise ConanException("'%s' is not a section of conan.conf" % section_name)
if len(tokens) == 1:
result = []
for item in section:
result.append(" = ".join(item))
return "\n".join(result)
else:
key = tokens[1]
try:
value = dict(section)[key]
if " #" in value: # Comments
value = value[:value.find(" #")].strip()
except KeyError:
raise ConanException("'%s' doesn't exist in [%s]" % (key, section_name))
return value
def set_item(self, key, value):
tokens = key.split(".", 1)
section_name = tokens[0]
if not self.has_section(section_name):
self.add_section(section_name)
if len(tokens) == 1: # defining full section
raise ConanException("You can't set a full section, please specify a key=value")
key = tokens[1]
super(ConanClientConfigParser, self).set(section_name, key, value)
with open(self.filename, "w") as f:
self.write(f)
def rm_item(self, item):
tokens = item.split(".", 1)
section_name = tokens[0]
if not self.has_section(section_name):
raise ConanException("'%s' is not a section of conan.conf" % section_name)
if len(tokens) == 1:
self.remove_section(tokens[0])
else:
key = tokens[1]
if not self.has_option(section_name, key):
raise ConanException("'%s' doesn't exist in [%s]" % (key, section_name))
self.remove_option(section_name, key)
with open(self.filename, "w") as f:
self.write(f)
def get_conf(self, varname):
"""Gets the section from config file or raises an exception"""
try:
return self.items(varname)
except NoSectionError:
raise ConanException("Invalid configuration, missing %s" % varname)
@property
def default_profile(self):
try:
return self.get_item("general.default_profile")
except ConanException:
return DEFAULT_PROFILE_NAME
@property
def cache_no_locks(self):
try:
return self.get_item("general.cache_no_locks")
except ConanException:
return False
@property
def storage(self):
return dict(self.get_conf("storage"))
@property
def storage_path(self):
# Try with CONAN_STORAGE_PATH
result = get_env('CONAN_STORAGE_PATH', None)
# Try with conan.conf "path"
if not result:
try:
env_conan_user_home = os.getenv("CONAN_USER_HOME")
# if env var is declared, any specified path will be relative to CONAN_USER_HOME
# even with the ~/
if env_conan_user_home:
storage = self.storage["path"]
if storage[:2] == "~/":
storage = storage[2:]
result = os.path.join(env_conan_user_home, storage)
else:
result = self.storage["path"]
except KeyError:
pass
# expand the result and check if absolute
if result:
result = conan_expand_user(result)
if not os.path.isabs(result):
raise ConanException("Conan storage path has to be an absolute path")
return result
@property
def proxies(self):
""" optional field, might not exist
"""
try:
proxies = self.get_conf("proxies")
# If there is proxies section, but empty, it will try to use system proxy
if not proxies:
# We don't have evidences that this following line is necessary.
# If the proxies has been
# configured at system level, conan will use it, and shouldn't be necessary
# to return here the proxies read from the system.
# Furthermore, the urls excluded for use proxies at system level do not work in
# this case, then the only way is to remove the [proxies] section with
# conan config remote proxies, then this method will return None and the proxies
# dict passed to requests will be empty.
# We don't remove this line because we are afraid to break something, but maybe
# until now is working because no one is using system-wide proxies or those proxies
# rules don't contain excluded urls.c #1777
return urllib.request.getproxies()
result = {k: (None if v == "None" else v) for k, v in proxies}
return result
except:
return None
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import unittest
import numpy as np
import pandas as pd
from pyspark import pandas as ps
from pyspark.testing.pandasutils import PandasOnSparkTestCase
from pyspark.testing.sqlutils import SQLTestUtils
class SeriesDateTimeTest(PandasOnSparkTestCase, SQLTestUtils):
@property
def pdf1(self):
date1 = pd.Series(pd.date_range("2012-1-1 12:45:31", periods=3, freq="M"))
date2 = pd.Series(pd.date_range("2013-3-11 21:45:00", periods=3, freq="W"))
return pd.DataFrame(dict(start_date=date1, end_date=date2))
@property
def pd_start_date(self):
return self.pdf1["start_date"]
@property
def ks_start_date(self):
return ps.from_pandas(self.pd_start_date)
def check_func(self, func):
self.assert_eq(func(self.ks_start_date), func(self.pd_start_date))
def test_timestamp_subtraction(self):
pdf = self.pdf1
psdf = ps.from_pandas(pdf)
# Those fail in certain OSs presumably due to different
# timezone behaviours inherited from C library.
actual = (psdf["end_date"] - psdf["start_date"] - 1).to_pandas()
expected = (pdf["end_date"] - pdf["start_date"]) // np.timedelta64(1, "s") - 1
# self.assert_eq(actual, expected)
actual = (psdf["end_date"] - pd.Timestamp("2012-1-1 12:45:31") - 1).to_pandas()
expected = (pdf["end_date"] - pd.Timestamp("2012-1-1 12:45:31")) // np.timedelta64(
1, "s"
) - 1
# self.assert_eq(actual, expected)
actual = (pd.Timestamp("2013-3-11 21:45:00") - psdf["start_date"] - 1).to_pandas()
expected = (pd.Timestamp("2013-3-11 21:45:00") - pdf["start_date"]) // np.timedelta64(
1, "s"
) - 1
# self.assert_eq(actual, expected)
psdf = ps.DataFrame(
{"a": pd.date_range("2016-12-31", "2017-01-08", freq="D"), "b": pd.Series(range(9))}
)
expected_error_message = "Datetime subtraction can only be applied to datetime series."
with self.assertRaisesRegex(TypeError, expected_error_message):
psdf["a"] - psdf["b"]
with self.assertRaisesRegex(TypeError, expected_error_message):
psdf["a"] - 1
with self.assertRaisesRegex(TypeError, expected_error_message):
1 - psdf["a"]
def test_arithmetic_op_exceptions(self):
psser = self.ks_start_date
py_datetime = self.pd_start_date.dt.to_pydatetime()
datetime_index = ps.Index(self.pd_start_date)
for other in [1, 0.1, psser, datetime_index, py_datetime]:
expected_err_msg = "Addition can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser + other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other + psser)
expected_err_msg = "Multiplication can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser * other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other * psser)
expected_err_msg = "True division can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser / other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other / psser)
expected_err_msg = "Floor division can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser // other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other // psser)
expected_err_msg = "Modulo can not be applied to datetimes."
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser % other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other % psser)
expected_err_msg = "Datetime subtraction can only be applied to datetime series."
for other in [1, 0.1]:
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser - other)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: other - psser)
self.assertRaisesRegex(TypeError, expected_err_msg, lambda: psser - other)
self.assertRaises(NotImplementedError, lambda: py_datetime - psser)
def test_date_subtraction(self):
pdf = self.pdf1
psdf = ps.from_pandas(pdf)
self.assert_eq(
psdf["end_date"].dt.date - psdf["start_date"].dt.date,
(pdf["end_date"].dt.date - pdf["start_date"].dt.date).dt.days,
)
self.assert_eq(
psdf["end_date"].dt.date - datetime.date(2012, 1, 1),
(pdf["end_date"].dt.date - datetime.date(2012, 1, 1)).dt.days,
)
self.assert_eq(
datetime.date(2013, 3, 11) - psdf["start_date"].dt.date,
(datetime.date(2013, 3, 11) - pdf["start_date"].dt.date).dt.days,
)
psdf = ps.DataFrame(
{"a": pd.date_range("2016-12-31", "2017-01-08", freq="D"), "b": pd.Series(range(9))}
)
expected_error_message = "Date subtraction can only be applied to date series."
with self.assertRaisesRegex(TypeError, expected_error_message):
psdf["a"].dt.date - psdf["b"]
with self.assertRaisesRegex(TypeError, expected_error_message):
psdf["a"].dt.date - 1
with self.assertRaisesRegex(TypeError, expected_error_message):
1 - psdf["a"].dt.date
@unittest.skip(
"It fails in certain OSs presumably due to different "
"timezone behaviours inherited from C library."
)
def test_div(self):
pdf = self.pdf1
psdf = ps.from_pandas(pdf)
for u in "D", "s", "ms":
duration = np.timedelta64(1, u)
self.assert_eq(
(psdf["end_date"] - psdf["start_date"]) / duration,
(pdf["end_date"] - pdf["start_date"]) / duration,
)
@unittest.skip("It is currently failed probably for the same reason in 'test_subtraction'")
def test_date(self):
self.check_func(lambda x: x.dt.date)
def test_time(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.dt.time)
def test_timetz(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.dt.timetz)
def test_year(self):
self.check_func(lambda x: x.dt.year)
def test_month(self):
self.check_func(lambda x: x.dt.month)
def test_day(self):
self.check_func(lambda x: x.dt.day)
def test_hour(self):
self.check_func(lambda x: x.dt.hour)
def test_minute(self):
self.check_func(lambda x: x.dt.minute)
def test_second(self):
self.check_func(lambda x: x.dt.second)
def test_microsecond(self):
self.check_func(lambda x: x.dt.microsecond)
def test_nanosecond(self):
with self.assertRaises(NotImplementedError):
self.check_func(lambda x: x.dt.nanosecond)
def test_week(self):
self.check_func(lambda x: x.dt.week)
def test_weekofyear(self):
self.check_func(lambda x: x.dt.weekofyear)
def test_dayofweek(self):
self.check_func(lambda x: x.dt.dayofweek)
def test_weekday(self):
self.check_func(lambda x: x.dt.weekday)
def test_dayofyear(self):
self.check_func(lambda x: x.dt.dayofyear)
def test_quarter(self):
self.check_func(lambda x: x.dt.dayofyear)
def test_is_month_start(self):
self.check_func(lambda x: x.dt.is_month_start)
def test_is_month_end(self):
self.check_func(lambda x: x.dt.is_month_end)
def test_is_quarter_start(self):
self.check_func(lambda x: x.dt.is_quarter_start)
def test_is_quarter_end(self):
self.check_func(lambda x: x.dt.is_quarter_end)
def test_is_year_start(self):
self.check_func(lambda x: x.dt.is_year_start)
def test_is_year_end(self):
self.check_func(lambda x: x.dt.is_year_end)
def test_is_leap_year(self):
self.check_func(lambda x: x.dt.is_leap_year)
def test_daysinmonth(self):
self.check_func(lambda x: x.dt.daysinmonth)
def test_days_in_month(self):
self.check_func(lambda x: x.dt.days_in_month)
@unittest.expectedFailure
def test_tz_localize(self):
self.check_func(lambda x: x.dt.tz_localize("America/New_York"))
@unittest.expectedFailure
def test_tz_convert(self):
self.check_func(lambda x: x.dt.tz_convert("America/New_York"))
def test_normalize(self):
self.check_func(lambda x: x.dt.normalize())
def test_strftime(self):
self.check_func(lambda x: x.dt.strftime("%Y-%m-%d"))
def test_round(self):
self.check_func(lambda x: x.dt.round(freq="min"))
self.check_func(lambda x: x.dt.round(freq="H"))
def test_floor(self):
self.check_func(lambda x: x.dt.floor(freq="min"))
self.check_func(lambda x: x.dt.floor(freq="H"))
def test_ceil(self):
self.check_func(lambda x: x.dt.floor(freq="min"))
self.check_func(lambda x: x.dt.floor(freq="H"))
@unittest.skip("Unsupported locale setting")
def test_month_name(self):
self.check_func(lambda x: x.dt.month_name())
self.check_func(lambda x: x.dt.month_name(locale="en_US.UTF-8"))
@unittest.skip("Unsupported locale setting")
def test_day_name(self):
self.check_func(lambda x: x.dt.day_name())
self.check_func(lambda x: x.dt.day_name(locale="en_US.UTF-8"))
def test_unsupported_type(self):
self.assertRaisesRegex(
ValueError, "Cannot call DatetimeMethods on type LongType", lambda: ps.Series([0]).dt
)
if __name__ == "__main__":
from pyspark.pandas.tests.test_series_datetime import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
import types
BaseModuleStr="ShareYourSystem.Specials.Predicters.Predicter"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Classer"
SYS.setSubModule(globals())
SYS.addDo('Predisenser','Predisense','Predisensing','Predisensed')
#</DefineAugmentation>
#<ImportSpecificModules>
import scipy.stats
import numpy as np
#</ImportSpecificModules>
#<DefineLocals>
def getKrenelFloatsArray(
_LevelFloatsTuple=None,
_TimeFloatsTuple=None,
_RunTimeFloat=100.,
_StepTimeFloat=0.1,
):
#get the bins
BinsInt=_RunTimeFloat/_StepTimeFloat
#init
KrenelFloatsArray=_LevelFloatsTuple[0]*np.ones(
BinsInt,
dtype=type(_LevelFloatsTuple[0])
)
#Debug
'''
print('getKrenelFloatsArray')
print('_TimeFloatsTuple[0]/_StepTimeFloat:_TimeFloatsTuple[1]/_StepTimeFloat')
print(_TimeFloatsTuple[0]/_StepTimeFloat,_TimeFloatsTuple[1]/_StepTimeFloat)
print('_LevelFloatsTuple[1] is '+str(_LevelFloatsTuple[1]))
print('')
'''
#put the second level
KrenelFloatsArray[
int(_TimeFloatsTuple[0]/_StepTimeFloat):int(_TimeFloatsTuple[1]/_StepTimeFloat)
]=_LevelFloatsTuple[1]
#return
return KrenelFloatsArray
def getFourierFloatsArray(
_RunTimeFloat=100.,
_StepTimeFloat=0.1,
):
#get the bins
BinsInt=_RunTimeFloat/_StepTimeFloat
#compute
FourierFloatsArray=np.array(
map(
lambda __TimeFloat:
sum(
map(
lambda __FrequencyFloat,__PhaseFloat:
np.cos(2.*np.pi*0.001*__TimeFloat*__FrequencyFloat+__PhaseFloat),
[200.],
[np.pi/2.]
)
),
np.arange(0.,_RunTimeFloat,_StepTimeFloat)
)
)
#Debug
'''
print('getFourierFloatsArray l 86')
print('FourierFloatsArray is ')
print(FourierFloatsArray)
print('')
'''
#return
return FourierFloatsArray
def getTickFloatsArray(_LimList,_SampleFloat):
#Debug
'''
print('getTickFloatsArray l 64')
print('_LimList is')
print(_LimList)
print('_SampleFloat is ')
print(_SampleFloat)
print('')
'''
#return
return np.array(list(np.arange(
_LimList[0],
_LimList[1],
(_LimList[1]-_LimList[0])/float(_SampleFloat)
))+[_LimList[1]])
SYS.getTickFloatsArray=getTickFloatsArray
#</DefineLocals>
#<DefineClass>
@DecorationClass()
class PredisenserClass(BaseClass):
def default_init(self,
_PredisensingRunTimeFloat=100.,
_PredisensingStepTimeFloat=0.1,
_PredisensingClampFloat=0.1,
_PredisensingMonitorIntsList=None,
_PredisensedTimeTraceFloatsArray=None,
_PredisensedCommandTraceFloatsArray=None,
_PredisensedInputCurrentTraceFloatsArray=None,
_PredisensedInitialSensorFloatsArray=None,
_PredisensedSensorTraceFloatsArray=None,
_PredisenseCommandColorTuplesList=None,
_PredisenseSensorColorTuplesList=None,
**_KwargVariablesDict
):
""" """
#Call the parent init method
BaseClass.__init__(self,**_KwargVariablesDict)
def do_predisense(self):
#/#################/#
# External care : Prepare time and the command
#
#arange
self.PredisensedTimeTraceFloatsArray=np.arange(
0.,
self.PredisensingRunTimeFloat,
self.PredisensingStepTimeFloat
)
#array
self.PredisensedCommandTraceFloatsArray=np.array(
map(
lambda __IndexInt:
1.*getKrenelFloatsArray(
[
0.,
self.PredisensingClampFloat
],
[
self.PredisensingRunTimeFloat/4.,
3.*self.PredisensingRunTimeFloat/4.
#1.5*self.PredisensingRunTimeFloat/4.,
],
self.PredisensingRunTimeFloat,
self.PredisensingStepTimeFloat
)*self.PredisensingClampFloat*getFourierFloatsArray(
self.PredisensingRunTimeFloat,
self.PredisensingStepTimeFloat
),
xrange(self.PredictingSensorsInt)
)
)
#set
self.PredisensedInputCurrentTraceFloatsArray=self.PredisensedCommandTraceFloatsArray*self.PredictingConstantTimeFloat
#/#################/#
# Prepare the initial conditions
#
#random sensors
PredisensedInitialSensorFloatsArray=0.1*self.PredisensingClampFloat*scipy.stats.uniform.rvs(
size=self.PredictingSensorsInt
)
#/#################/#
# Shape the size of the sensors run
#
#init sensors
self.PredisensedSensorTraceFloatsArray=np.zeros(
(
self.PredictingSensorsInt,
len(self.PredisensedTimeTraceFloatsArray)
)
)
self.PredisensedSensorTraceFloatsArray[:,0]=PredisensedInitialSensorFloatsArray
#/#################/#
# integrativ Loop
#
#for loop
for __IndexInt in xrange(1,len(self.PredisensedTimeTraceFloatsArray)):
#/#################/#
# Sensor part
#
#debug
'''
self.debug(
[
'shape(self.PredisensedCommandTraceFloatsArray) is '+str(
np.shape(self.PredisensedCommandTraceFloatsArray)
),
'shape(self.PredisensedSensorTraceFloatsArray) is '+str(
np.shape(self.PredisensedSensorTraceFloatsArray)
),
('self.',self,[
'PredictedSensorJacobianFloatsArray'
])
]
)
'''
#Current
PredisensedSensorCurrentFloatsArray=np.dot(
self.PredictedSensorJacobianFloatsArray,
self.PredisensedSensorTraceFloatsArray[:,__IndexInt-1]
)+self.PredisensedCommandTraceFloatsArray[:,__IndexInt-1]
#/#################/#
# Euler part
#
#sensor
self.PredisensedSensorTraceFloatsArray[
:,
__IndexInt
]=self.PredisensedSensorTraceFloatsArray[
:,
__IndexInt-1
]+PredisensedSensorCurrentFloatsArray*self.PredisensingStepTimeFloat
#set
LocalDict=locals()
def mimic_pyplot(self):
#debug
'''
self.debug(
[
('self.',self,['PredisensingMonitorIntsList'])
]
)
'''
#/#################/#
# Build the colors
#
self.PredisenseCommandColorTuplesList=SYS.getColorTuplesList(
'black','red',len(self.PredisensingMonitorIntsList)+3,_PlotBool=False
)[3:]
self.PredisenseSensorColorTuplesList=SYS.getColorTuplesList(
'black','blue',len(self.PredisensingMonitorIntsList)+3,_PlotBool=False
)[3:]
#debug
'''
self.debug(
[
'We have built the colors',
('self.',self,[
'PredisenseCommandColorTuplesList'
'PredisenseSensorColorTuplesList'])
]
)
'''
#/#################/#
# Build the input-unit traces axes
#
#init
self.mapSet(
{
'-Charts':[
('ManagingBeforeSetVariable',
{
'FiguringShapeIntsTuple':(5,15),
'#copy:PyplotingDrawVariable':
[
(
'#axes',
[
('set_xticks',{
'#liarg:#map@get':[
">>SYS.set(SYS,'TimeTicksArray',SYS.getTickFloatsArray([0.,self.PredisensingRunTimeFloat],4)).TimeTicksArray"
]
}),
('set_xticklabels',{
'#liarg:#map@get':[
">>map(lambda __Float:'$%.0f$'%__Float,SYS.TimeTicksArray)"
]
}),
('set_xlim',{
'#liarg:#map@get':[0.,'>>self.PredisensingRunTimeFloat']
})
]
)
]
}),
('|Sensor',{
'-Draws':{
'#map@set':map(
lambda __IntsTuple:
(
'|'+str(__IntsTuple[0]),
{
'PyplotingDrawVariable':
[
('plot',
{
'#liarg:#map@get':[
'PredisensedTimeTraceFloatsArray',
'>>self.PredisensedInputCurrentTraceFloatsArray.__getitem__('+str(__IntsTuple[1])+')'
],
'#kwarg':{
'label':'$\\tau_{D}c_{'+str(__IntsTuple[1])+'}$',
'linestyle':'-',
'color':self.PredisenseCommandColorTuplesList[__IntsTuple[0]]
}
}),
('plot',
{
'#liarg:#map@get':[
'PredisensedTimeTraceFloatsArray',
'>>self.PredisensedSensorTraceFloatsArray['+str(__IntsTuple[1])+',:]'
],
'#kwarg':{
'color':self.PredisenseSensorColorTuplesList[__IntsTuple[0]],
'label':'$x_{'+str(__IntsTuple[1])+'}$',
'linewidth':3,
'linestyle':'-'
}
})
]
}
),
enumerate(self.PredisensingMonitorIntsList)
)
},
'PyplotingDrawVariable.extend':
[[
(
'#axes',
[
('set_ylabel','$\\tau_{D}c(t),\ x(t)$'),
('set_ylim',{'#liarg:#map@get':[
"".join([
">>SYS.set(SYS,'SensorLimFloatsArray',",
"[min(-0.1,self.PredisensedSensorTraceFloatsArray.min()),1.5*self.PredisensingClampFloat*self.PredictingConstantTimeFloat]",
').SensorLimFloatsArray'
])]
}),
('set_yticks',{
'#liarg:#map@get':[
"".join([
">>SYS.set(SYS,'SensorTickFloatsArray',",
"map(lambda __Float:float('%.2f'%__Float),",
"SYS.getTickFloatsArray(",
"SYS.SensorLimFloatsArray,3",
"))).SensorTickFloatsArray"
])
]
}),
('set_yticklabels',{
'#liarg:#map@get':[
"".join([
">>SYS.set(SYS,'SensorTickStrsArray',",
"map(lambda __Float:'$'+str(__Float)+'$',",
"SYS.SensorTickFloatsArray)).SensorTickStrsArray"
])
]
}),
('tick_params',{
'#kwarg':{
'length':10,
'width':5,
'which':'major'
}
}),
('tick_params',{
'#kwarg':{
'length':5,
'width':2,
'which':'minor'
}
}),
('xaxis.set_ticks_position',
{
'#liarg':['bottom']
}
),
('yaxis.set_ticks_position',
{
'#liarg':['left']
}
),
('legend',{
'#liarg':[],
'#kwarg':{
'fontsize':10,
'shadow':True,
'fancybox':True,
'ncol':max(1,len(self.PredisensingMonitorIntsList)/2),
'loc':2,
'bbox_to_anchor':(1.05, 1)
}
})
]
)
]]
})
]
}
)
#debug
self.debug(
'Ok we have setted the plots'
)
#call the base method
BaseClass.pyplot(self)
#</DefineClass>
#</DefinePrint>
PredisenserClass.PrintingClassSkipKeyStrsList.extend(
[
'PredisensingRunTimeFloat',
'PredisensingStepTimeFloat',
'PredisensingClampFloat',
'PredisensingMonitorIntsList',
'PredisensedTimeTraceFloatsArray',
'PredisensedInputCurrentTraceFloatsArray',
'PredisensedCommandTraceFloatsArray',
'PredisensedInitialSensorFloatsArray',
'PredisensedSensorTraceFloatsArray',
'PredisenseCommandColorTuplesList',
'PredisenseSensorColorTuplesList'
]
)
#<DefinePrint>
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2013 Jacek Markowski, [email protected]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import shutil
import platform
from csv import reader
from PySide import QtGui, QtCore
class Shared(object):
def __init__(self):
self.new_lines()
def delete_file(self, file_delete, path):
''' Delete file dialog'''
reply = QtGui.QMessageBox.question(self, 'Delete?',
"Are you sure to delete %s?"%file_delete, QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
if file_delete != 'default':
os.remove(path+file_delete)
def rm_lines(self, item):
''' Removes new lines from string'''
rem = item.replace('\n', '')
rem = rem.replace('\r', '')
rem = rem.replace(' ', '')
return rem
def new_lines(self):
''' Sets newline style for os'''
system = platform.system()
if system == 'Windows':
new_line = '\r\n'
elif system == 'Linux':
new_line = '\n'
else:
new_line = '\n'
self.nl = new_line
def default_directories(self):
''' Creates or recreates default directories'''
paths = ('export', 'tmp', 'profiles', 'leagues')
for i in paths:
if os.path.isdir(os.path.join(os.getcwd(),i,'')):
pass
else:
os.mkdir(os.path.join(os.getcwd(),i,''))
profiles = ('bets', 'export', 'filters', 'links',
'ranges', 'selector', 'simulation', 'teams','auto_save')
for i in profiles:
if os.path.isdir(os.path.join(os.getcwd(), 'profiles', i, '')):
pass
else:
os.mkdir(os.path.join(os.getcwd(), 'profiles', i, ''))
leagues = ('current', 'own', 'old')
for i in leagues:
if os.path.isdir(os.path.join(os.getcwd(), 'leagues', i, '')):
pass
else:
os.mkdir(os.path.join(os.getcwd(), 'leagues', i, ''))
tmp = ('leagues', 'simulations')
for i in tmp:
if os.path.isdir(os.path.join(os.getcwd(), 'tmp', i, '')):
pass
else:
os.mkdir(os.path.join(os.getcwd(), 'tmp', i, ''))
def odds_rescale(self,val,odds_level):
''' Rescaling odds from [-1,1]'''
# OldRange = (OldMax - OldMin)
# NewRange = (NewMax - NewMin)
# NewValue = (((OldValue - OldMin) * NewRange) / OldRange) + NewMin
old_range = 2
new_range = 14
odd = ((((val + 1) * new_range) / old_range) + 1)*odds_level/100.0
odd = round(odd,2)
if odd < 1:
odd = 1
return odd
def find_broken_leagues(self):
leagues = os.listdir('leagues')
paths = []
for i in leagues:
paths.append(i)
if os.path.isdir(os.path.join('tmp','leagues','')+i):
pass
else:
os.mkdir(os.path.join('tmp','leagues','')+i)
with open(os.path.join('tmp','leagues','log.txt'),'w') as log:
errors = 0
for path in paths:
files =[]
leagues = os.listdir(os.path.join('leagues',path))
for i in leagues:
with open(os.path.join('leagues',path,i),'r') as f:
for a in reader(f):
if len(a[3])> 4 or len(a[4])> 4:
errors += 1
line = path+self.nl+i+'>>>'+str(a)+self.nl
QtGui.QApplication.processEvents()
log.write(line)
file_path = os.path.join(path,i)
if not file_path in files[:]:
files.append(file_path)
for i in files:
src = os.path.join('leagues',i)
dst = os.path.join('tmp','leagues',i)
shutil.copy(src, dst)
self.fix_broken_leagues(src)
def fix_broken_leagues(self,path):
''' Removes too long lines from csv file'''
with open(path,'r') as csv_file:
tmp_file_open = reader(csv_file)
tmp_file = list(tmp_file_open)
match_list = []
for t in xrange(0, len(tmp_file)):
if len(tmp_file[t][3]) > 4 or len(tmp_file[t][4]) > 4:
print tmp_file[t][3]
print tmp_file[t][4]
else:
match_list.append(tmp_file[t])
with open(path,'w') as fix_file:
for i in xrange(0, len(match_list)):
line = str(match_list[i])
line = line.replace('[','')
line = line.replace(']','')
line = line.replace("'",'')
line = line.replace(' ','')
#print 'write', line
fix_file.write(line+self.nl)
def filters_save(self):
''' Save match filter'''
# stats diffrences
check_points = self.gui.check_points.isChecked()
check_points_ha = self.gui.check_points_ha.isChecked()
check_form = self.gui.check_form.isChecked()
check_form_ha = self.gui.check_form_ha.isChecked()
combo_points = self.gui.combo_points.currentText()
combo_points_ha = self.gui.combo_points_ha.currentText()
combo_form = self.gui.combo_form.currentText()
combo_form_ha = self.gui.combo_form_ha.currentText()
spin_points = self.gui.spin_points.value()
spin_points_ha = self.gui.spin_points_ha.value()
spin_form = self.gui.spin_form.value()
spin_form_ha = self.gui.spin_form_ha.value()
# series home
combo_h_wins = self.gui.combo_h_wins.currentText()
combo_h_winshome = self.gui.combo_h_winshome.currentText()
combo_h_draws = self.gui.combo_h_draws.currentText()
combo_h_drawshome = self.gui.combo_h_drawshome.currentText()
combo_h_loses = self.gui.combo_h_loses.currentText()
combo_h_loseshome = self.gui.combo_h_loseshome.currentText()
combo_h_nowins = self.gui.combo_h_nowins.currentText()
combo_h_nowinshome = self.gui.combo_h_nowinshome.currentText()
combo_h_nodraws = self.gui.combo_h_nodraws.currentText()
combo_h_nodrawshome = self.gui.combo_h_nodrawshome.currentText()
combo_h_noloses = self.gui.combo_h_noloses.currentText()
combo_h_noloseshome = self.gui.combo_h_noloseshome.currentText()
#bts,under,over
combo_h_bts = self.gui.combo_h_bts.currentText()
combo_h_btshome = self.gui.combo_h_btshome.currentText()
combo_h_over = self.gui.combo_h_over.currentText()
combo_h_overhome = self.gui.combo_h_overhome.currentText()
combo_h_under = self.gui.combo_h_under.currentText()
combo_h_underhome = self.gui.combo_h_underhome.currentText()
# series away
combo_a_wins = self.gui.combo_a_wins.currentText()
combo_a_winsaway = self.gui.combo_a_winsaway.currentText()
combo_a_draws = self.gui.combo_a_draws.currentText()
combo_a_drawsaway = self.gui.combo_a_drawsaway.currentText()
combo_a_loses = self.gui.combo_a_loses.currentText()
combo_a_losesaway = self.gui.combo_a_losesaway.currentText()
combo_a_nowins = self.gui.combo_a_nowins.currentText()
combo_a_nowinsaway = self.gui.combo_a_nowinsaway.currentText()
combo_a_nodraws = self.gui.combo_a_nodraws.currentText()
combo_a_nodrawsaway = self.gui.combo_a_nodrawsaway.currentText()
combo_a_noloses = self.gui.combo_a_noloses.currentText()
combo_a_nolosesaway = self.gui.combo_a_nolosesaway.currentText()
#bts,under,over
combo_a_bts = self.gui.combo_a_bts.currentText()
combo_a_btsaway = self.gui.combo_a_btsaway.currentText()
combo_a_over = self.gui.combo_a_over.currentText()
combo_a_overaway = self.gui.combo_a_overaway.currentText()
combo_a_under = self.gui.combo_a_under.currentText()
combo_a_underaway = self.gui.combo_a_underaway.currentText()
#
spin_h_wins = self.gui.spin_h_wins.value()
spin_h_winshome = self.gui.spin_h_winshome.value()
spin_h_draws = self.gui.spin_h_draws.value()
spin_h_drawshome = self.gui.spin_h_drawshome.value()
spin_h_loses = self.gui.spin_h_loses.value()
spin_h_loseshome = self.gui.spin_h_loseshome.value()
spin_h_nowins = self.gui.spin_h_nowins.value()
spin_h_nowinshome = self.gui.spin_h_nowinshome.value()
spin_h_nodraws = self.gui.spin_h_nodraws.value()
spin_h_nodrawshome = self.gui.spin_h_nodrawshome.value()
spin_h_noloses = self.gui.spin_h_noloses.value()
spin_h_noloseshome = self.gui.spin_h_noloseshome.value()
spin_h_bts = self.gui.spin_h_bts.value()
spin_h_btshome = self.gui.spin_h_btshome.value()
spin_h_over = self.gui.spin_h_over.value()
spin_h_overhome = self.gui.spin_h_overhome.value()
spin_h_under = self.gui.spin_h_under.value()
spin_h_underhome = self.gui.spin_h_underhome.value()
spin_a_wins = self.gui.spin_a_wins.value()
spin_a_winsaway = self.gui.spin_a_winsaway.value()
spin_a_draws = self.gui.spin_a_draws.value()
spin_a_drawsaway = self.gui.spin_a_drawsaway.value()
spin_a_loses = self.gui.spin_a_loses.value()
spin_a_losesaway = self.gui.spin_a_losesaway.value()
spin_a_nowins = self.gui.spin_a_nowins.value()
spin_a_nowinsaway = self.gui.spin_a_nowinsaway.value()
spin_a_nodraws = self.gui.spin_a_nodraws.value()
spin_a_nodrawsaway = self.gui.spin_a_nodrawsaway.value()
spin_a_noloses = self.gui.spin_a_noloses.value()
spin_a_nolosesaway = self.gui.spin_a_nolosesaway.value()
spin_a_bts = self.gui.spin_a_bts.value()
spin_a_btsaway = self.gui.spin_a_btsaway.value()
spin_a_over = self.gui.spin_a_over.value()
spin_a_overaway = self.gui.spin_a_overaway.value()
spin_a_under = self.gui.spin_a_under.value()
spin_a_underaway = self.gui.spin_a_underaway.value()
# odds
spin_odd_1_min = self.gui.spin_odd_1_min.value()
spin_odd_x_min = self.gui.spin_odd_x_min.value()
spin_odd_2_min = self.gui.spin_odd_2_min.value()
spin_odd_1x_min = self.gui.spin_odd_1x_min.value()
spin_odd_x2_min = self.gui.spin_odd_x2_min.value()
spin_odd_1_max = self.gui.spin_odd_1_max.value()
spin_odd_x_max = self.gui.spin_odd_x_max.value()
spin_odd_2_max = self.gui.spin_odd_2_max.value()
spin_odd_1x_max = self.gui.spin_odd_1x_max.value()
spin_odd_x2_max = self.gui.spin_odd_x2_max.value()
val =[
check_points,
check_points_ha,
check_form,
check_form_ha,
combo_points,
combo_points_ha,
combo_form,
combo_form_ha,
combo_h_wins,
combo_h_winshome,
combo_h_draws,
combo_h_drawshome,
combo_h_loses,
combo_h_loseshome,
combo_h_nowins,
combo_h_nowinshome,
combo_h_nodraws,
combo_h_nodrawshome,
combo_h_noloses,
combo_h_noloseshome,
combo_h_bts,
combo_h_btshome,
combo_h_over,
combo_h_overhome,
combo_h_under,
combo_h_underhome,
combo_a_wins,
combo_a_winsaway,
combo_a_draws,
combo_a_drawsaway,
combo_a_loses,
combo_a_losesaway,
combo_a_nowins,
combo_a_nowinsaway,
combo_a_nodraws,
combo_a_nodrawsaway,
combo_a_noloses,
combo_a_nolosesaway,
combo_a_bts,
combo_a_btsaway,
combo_a_over,
combo_a_overaway,
combo_a_under,
combo_a_underaway,
spin_points,
spin_points_ha,
spin_form,
spin_form_ha,
spin_h_wins,
spin_h_winshome,
spin_h_draws,
spin_h_drawshome,
spin_h_loses,
spin_h_loseshome,
spin_h_nowins,
spin_h_nowinshome,
spin_h_nodraws,
spin_h_nodrawshome,
spin_h_noloses,
spin_h_noloseshome,
spin_h_bts,
spin_h_btshome,
spin_h_over,
spin_h_overhome,
spin_h_under,
spin_h_underhome,
spin_a_wins,
spin_a_winsaway,
spin_a_draws,
spin_a_drawsaway,
spin_a_loses,
spin_a_losesaway,
spin_a_nowins,
spin_a_nowinsaway,
spin_a_nodraws,
spin_a_nodrawsaway,
spin_a_noloses,
spin_a_nolosesaway,
spin_a_bts,
spin_a_btsaway,
spin_a_over,
spin_a_overaway,
spin_a_under,
spin_a_underaway,
spin_odd_1_min,
spin_odd_x_min,
spin_odd_2_min,
spin_odd_1x_min,
spin_odd_x2_min,
spin_odd_1_max,
spin_odd_x_max,
spin_odd_2_max,
spin_odd_1x_max,
spin_odd_x2_max
]
file_name = self.gui.line_filters.text()
if self.app == 'simulator':
path = os.path.join('profiles','filters','')
elif self.app == 'selector':
path = os.path.join('profiles','selector','')
with open(path+file_name,'w') as save:
for i in val:
save.write(str(i)+self.nl)
self.filters_tree()
def filters_load(self,file_name=None):
''' Load match filter'''
val =[
self.gui.check_points,
self.gui.check_points_ha,
self.gui.check_form,
self.gui.check_form_ha,
self.gui.combo_points,
self.gui.combo_points_ha,
self.gui.combo_form,
self.gui.combo_form_ha,
self.gui.combo_h_wins,
self.gui.combo_h_winshome,
self.gui.combo_h_draws,
self.gui.combo_h_drawshome,
self.gui.combo_h_loses,
self.gui.combo_h_loseshome,
self.gui.combo_h_nowins,
self.gui.combo_h_nowinshome,
self.gui.combo_h_nodraws,
self.gui.combo_h_nodrawshome,
self.gui.combo_h_noloses,
self.gui.combo_h_noloseshome,
self.gui.combo_h_bts,
self.gui.combo_h_btshome,
self.gui.combo_h_over,
self.gui.combo_h_overhome,
self.gui.combo_h_under,
self.gui.combo_h_underhome,
self.gui.combo_a_wins,
self.gui.combo_a_winsaway,
self.gui.combo_a_draws,
self.gui.combo_a_drawsaway,
self.gui.combo_a_loses,
self.gui.combo_a_losesaway,
self.gui.combo_a_nowins,
self.gui.combo_a_nowinsaway,
self.gui.combo_a_nodraws,
self.gui.combo_a_nodrawsaway,
self.gui.combo_a_noloses,
self.gui.combo_a_nolosesaway,
self.gui.combo_a_bts,
self.gui.combo_a_btsaway,
self.gui.combo_a_over,
self.gui.combo_a_overaway,
self.gui.combo_a_under,
self.gui.combo_a_underaway,
self.gui.spin_points,
self.gui.spin_points_ha,
self.gui.spin_form,
self.gui.spin_form_ha,
self.gui.spin_h_wins,
self.gui.spin_h_winshome,
self.gui.spin_h_draws,
self.gui.spin_h_drawshome,
self.gui.spin_h_loses,
self.gui.spin_h_loseshome,
self.gui.spin_h_nowins,
self.gui.spin_h_nowinshome,
self.gui.spin_h_nodraws,
self.gui.spin_h_nodrawshome,
self.gui.spin_h_noloses,
self.gui.spin_h_noloseshome,
self.gui.spin_h_bts,
self.gui.spin_h_btshome,
self.gui.spin_h_over,
self.gui.spin_h_overhome,
self.gui.spin_h_under,
self.gui.spin_h_underhome,
self.gui.spin_a_wins,
self.gui.spin_a_winsaway,
self.gui.spin_a_draws,
self.gui.spin_a_drawsaway,
self.gui.spin_a_loses,
self.gui.spin_a_losesaway,
self.gui.spin_a_nowins,
self.gui.spin_a_nowinsaway,
self.gui.spin_a_nodraws,
self.gui.spin_a_nodrawsaway,
self.gui.spin_a_noloses,
self.gui.spin_a_nolosesaway,
self.gui.spin_a_bts,
self.gui.spin_a_btsaway,
self.gui.spin_a_over,
self.gui.spin_a_overaway,
self.gui.spin_a_under,
self.gui.spin_a_underaway,
self.gui.spin_odd_1_min,
self.gui.spin_odd_x_min,
self.gui.spin_odd_2_min,
self.gui.spin_odd_1x_min,
self.gui.spin_odd_x2_min,
self.gui.spin_odd_1_max,
self.gui.spin_odd_x_max,
self.gui.spin_odd_2_max,
self.gui.spin_odd_1x_max,
self.gui.spin_odd_x2_max
]
if file_name == None:
item = self.gui.tree_filters_profile.currentItem()
file_name = str(item.text(0))
print file_name, '<'
if self.app == 'simulator':
path = os.path.join('profiles','filters','')
elif self.app == 'selector':
path = os.path.join('profiles','selector','')
print path,'<'
with open(path+file_name,'r') as f:
load = list(f)
for i in range(0,len(val)):
if i <=3: #checkbutton
state = self.rm_lines(load[i])
if state == 'True':
state = 2
else:
state = 0
state =QtCore.Qt.CheckState(state)
val[i].setCheckState(state)
if i >3 and i <= 43: #combobox
item =self.rm_lines(load[i])
index = val[i].findText(item)
val[i].setCurrentIndex(index)
if i > 43:
item =self.rm_lines(load[i])
item =float(item)
val[i].setValue(item)
if file_name == None:
item = self.gui.tree_filters_profile.currentItem()
file_name = str(item.text(0))
with open(path+file_name,'r') as f:
val = list(f)
filter_vars =(
'check_points',
'check_points_ha',
'check_form',
'check_form_ha',
'combo_points',
'combo_points_ha',
'combo_form',
'combo_form_ha',
'combo_h_wins',
'combo_h_winshome',
'combo_h_draws',
'combo_h_drawshome',
'combo_h_loses',
'combo_h_loseshome',
'combo_h_nowins',
'combo_h_nowinshome',
'combo_h_nodraws',
'combo_h_nodrawshome',
'combo_h_noloses',
'combo_h_noloseshome',
'combo_h_bts',
'combo_h_btshome',
'combo_h_over',
'combo_h_overhome',
'combo_h_under',
'combo_h_underhome',
'combo_a_wins',
'combo_a_winsaway',
'combo_a_draws',
'combo_a_drawsaway',
'combo_a_loses',
'combo_a_losesaway',
'combo_a_nowins',
'combo_a_nowinsaway',
'combo_a_nodraws',
'combo_a_nodrawsaway',
'combo_a_noloses',
'combo_a_nolosesaway',
'combo_a_bts',
'combo_a_btsaway',
'combo_a_over',
'combo_a_overaway',
'combo_a_under',
'combo_a_underaway',
'spin_points',
'spin_points_ha',
'spin_form',
'spin_form_ha',
'spin_h_wins',
'spin_h_winshome',
'spin_h_draws',
'spin_h_drawshome',
'spin_h_loses',
'spin_h_loseshome',
'spin_h_nowins',
'spin_h_nowinshome',
'spin_h_nodraws',
'spin_h_nodrawshome',
'spin_h_noloses',
'spin_h_noloseshome',
'spin_h_bts',
'spin_h_btshome',
'spin_h_over',
'spin_h_overhome',
'spin_h_under',
'spin_h_underhome',
'spin_a_wins',
'spin_a_winsaway',
'spin_a_draws',
'spin_a_drawsaway',
'spin_a_loses',
'spin_a_losesaway',
'spin_a_nowins',
'spin_a_nowinsaway',
'spin_a_nodraws',
'spin_a_nodrawsaway',
'spin_a_noloses',
'spin_a_nolosesaway',
'spin_a_bts',
'spin_a_btsaway',
'spin_a_over',
'spin_a_overaway',
'spin_a_under',
'spin_a_underaway',
'spin_odd_1_min',
'spin_odd_x_min',
'spin_odd_2_min',
'spin_odd_1x_min',
'spin_odd_x2_min',
'spin_odd_1_max',
'spin_odd_x_max',
'spin_odd_2_max',
'spin_odd_1x_max',
'spin_odd_x2_max')
for i in xrange(0,len(val)):
vars(self)[filter_vars[i]] = val[i]
def simulation_match_filters(self):
''' Match filters check'''
self.filter_status = 'yes' # when 'yes' then adds match to filtered
############
## Points
############
points = [
(self.check_points,self.t1_points,self.t2_points,self.combo_points,
self.spin_points,'T-points'),
(self.check_points_ha,self.t1_points_h,self.t2_points_a,
self.combo_points_ha,self.spin_points_ha,'T-pointsH/A'),
(self.check_form,self.t1_form,self.t2_form,self.combo_form,
self.spin_form,'T-form'),
(self.check_form_ha,self.t1_form_h,self.t2_form_a,self.combo_form_ha,
self.spin_form_ha,'T-formH/A')]
#print "checkpoint : " + self.check_points + 'aa'
#print "spin points: " + self.spin_points
for i in points:
checkbutton_state = self.rm_lines(i[0])
if checkbutton_state == 'True':
sum_points= i[1]+i[2]
if sum_points == 0:
diff = 0
else:
diff = i[1]/(float(sum_points))*100 #50/ sum points
compare_vals = self.rm_lines(i[3])
value = self.rm_lines(i[4])
line = str(diff)+compare_vals+value # example 10<= 50
#print "line" + line
if eval(line):
pass
else:
self.filter_status = 'no'
# filter reports count
self.sim_stats[i[5]] = self.sim_stats[i[5]] + 1
print "filter status points/form : " + self.filter_status
############
## Series
############
T12 = (
(str(self.t1_wins)+self.combo_h_wins+self.spin_h_wins,'T1-wins'),
(str(self.t1_winshome)+self.combo_h_winshome+self.spin_h_winshome,
'T1-wins_home'),
(str(self.t1_draws)+self.combo_h_draws+self.spin_h_draws,
'T1-draws'),
(str(self.t1_drawshome)+self.combo_h_drawshome+self.spin_h_drawshome,
'T1-draws_home'),
(str(self.t1_loses)+self.combo_h_loses+self.spin_h_loses,
'T1-loses'),
(str(self.t1_loseshome)+self.combo_h_loseshome+self.spin_h_loseshome,
'T1-loses_home'),
(str(self.t1_nowins)+self.combo_h_nowins+self.spin_h_nowins,
'T1-nowins'),
(str(self.t1_nowinshome)+self.combo_h_nowinshome+self.spin_h_nowinshome,
'T1-nowins_home'),
(str(self.t1_nodraws)+self.combo_h_nodraws+self.spin_h_nodraws,
'T1-nodraws'),
(str(self.t1_nodrawshome)+self.combo_h_nodrawshome+self.spin_h_nodrawshome,
'T1-nodraws_home'),
(str(self.t1_noloses)+self.combo_h_noloses+self.spin_h_noloses,
'T1-noloses'),
(str(self.t1_noloseshome)+self.combo_h_noloseshome+self.spin_h_noloseshome,
'T1-noloses_home'),
(str(self.t1_bts)+self.combo_h_bts+self.spin_h_bts,
'T1-bts'),
(str(self.t1_btshome)+self.combo_h_btshome+self.spin_h_btshome,
'T1-bts_home'),
(str(self.t1_over)+self.combo_h_over+self.spin_h_over,
'T1-over'),
(str(self.t1_overhome)+self.combo_h_overhome+self.spin_h_overhome,
'T1-over_home'),
(str(self.t1_under)+self.combo_h_under+self.spin_h_under,
'T1-under'),
(str(self.t1_underhome)+self.combo_h_underhome+self.spin_h_underhome,
'T1-under_home'),
(str(self.t2_wins)+self.combo_a_wins+self.spin_a_wins,'T2-wins'),
(str(self.t2_winsaway)+self.combo_a_winsaway+self.spin_a_winsaway,
'T2-wins_away'),
(str(self.t2_draws)+self.combo_a_draws+self.spin_a_draws,
'T2-draws'),
(str(self.t2_drawsaway)+self.combo_a_drawsaway+self.spin_a_drawsaway,
'T2-draws_away'),
(str(self.t2_loses)+self.combo_a_loses+self.spin_a_loses,
'T2-loses'),
(str(self.t2_losesaway)+self.combo_a_losesaway+self.spin_a_losesaway,
'T2-loses_away'),
(str(self.t2_nowins)+self.combo_a_nowins+self.spin_a_nowins,
'T2-nowins'),
(str(self.t2_nowinsaway)+self.combo_a_nowinsaway+self.spin_a_nowinsaway,
'T2-nowin_saway'),
(str(self.t2_nodraws)+self.combo_a_nodraws+self.spin_a_nodraws,
'T2-nodraws'),
(str(self.t2_nodrawsaway)+self.combo_a_nodrawsaway+self.spin_a_nodrawsaway,
'T2-nodraws_away'),
(str(self.t2_noloses)+self.combo_a_noloses+self.spin_a_noloses,
'T2-noloses'),
(str(self.t2_nolosesaway)+self.combo_a_nolosesaway+self.spin_a_nolosesaway,
'T2-noloses_away'),
(str(self.t2_bts)+self.combo_a_bts+self.spin_a_bts,
'T2-bts'),
(str(self.t2_btsaway)+self.combo_a_btsaway+self.spin_a_btsaway,
'T2-bts_away'),
(str(self.t2_over)+self.combo_a_over+self.spin_a_over,
'T2-over'),
(str(self.t2_overaway)+self.combo_a_overaway+self.spin_a_overaway,
'T2-over_away'),
(str(self.t2_under)+self.combo_a_under+self.spin_a_under,
'T2-under'),
(str(self.t2_underaway)+self.combo_a_underaway+self.spin_a_underaway,
'T2-under_away'))
# HomeAway-series
for i in T12:
if self.filter_status == 'yes':
line=i[0]
'When getting file from linux on windows'
line = self.rm_lines(line)
if eval(line):
pass
else:
self.filter_status = 'no'
# for fiters report
self.sim_stats[i[1]] = self.sim_stats[i[1]] + 1
if self.app == 'selector':
######
# Odds - in match selector only !!!!
######
odds = [
(self.spin_odd_1_max,self.odd_1,self.spin_odd_1_min),
(self.spin_odd_x_max,self.odd_x,self.spin_odd_x_min),
(self.spin_odd_2_max,self.odd_2,self.spin_odd_2_min),
(self.spin_odd_1x_max,self.odd_1x,self.spin_odd_1x_min),
(self.spin_odd_x2_max,self.odd_x2,self.spin_odd_x2_min)]
for i in odds:
if self.filter_status == 'yes':
if float(i[0])>=float(i[1])>=float(i[2]):
pass
else:
self.filter_status = 'no'
#print float(i[0]),float(i[1]),float(i[2])
print 'sada'
print "filter status series: " + self.filter_status
|
|
#! /usr/bin/python
# hhaim
import sys
import os
python_ver = 'python%s' % sys.version_info[0]
yaml_path = os.path.join('external_libs', 'pyyaml-3.11', python_ver)
if yaml_path not in sys.path:
sys.path.append(yaml_path)
import yaml
import dpdk_nic_bind
import re
import argparse
import copy
import shlex
import traceback
from collections import defaultdict, OrderedDict
from distutils.util import strtobool
import subprocess
import platform
import stat
# exit code is Important should be
# -1 : don't continue
# 0 : no errors - no need to load mlx share object
# 32 : no errors - mlx share object should be loaded
# 64 : no errors - napatech 3GD should be running
MLX_EXIT_CODE = 32
NTACC_EXIT_CODE = 64
class VFIOBindErr(Exception): pass
PATH_ARR = os.getenv('PATH', '').split(':')
for path in ['/usr/local/sbin', '/usr/sbin', '/sbin']:
if path not in PATH_ARR:
PATH_ARR.append(path)
os.environ['PATH'] = ':'.join(PATH_ARR)
def if_list_remove_sub_if(if_list):
return list(map(lambda x:x.split('/')[0],if_list))
class ConfigCreator(object):
mandatory_interface_fields = ['Slot_str', 'Device_str', 'NUMA']
_2hex_re = '[\da-fA-F]{2}'
mac_re = re.compile('^({0}:){{5}}{0}$'.format(_2hex_re))
MAC_LCORE_NUM = 63 # current bitmask is 64 bit
# cpu_topology - dict: physical processor -> physical core -> logical processing unit (thread)
# interfaces - array of dicts per interface, should include "mandatory_interface_fields" values
def __init__(self, cpu_topology, interfaces, include_lcores = [], exclude_lcores = [], only_first_thread = False, zmq_rpc_port = None, zmq_pub_port = None, prefix = None, ignore_numa = False):
self.cpu_topology = copy.deepcopy(cpu_topology)
self.interfaces = copy.deepcopy(interfaces)
del cpu_topology
del interfaces
assert isinstance(self.cpu_topology, dict), 'Type of cpu_topology should be dict, got: %s' % type(self.cpu_topology)
assert len(self.cpu_topology.keys()) > 0, 'cpu_topology should contain at least one processor'
assert isinstance(self.interfaces, list), 'Type of interfaces should be list, got: %s' % type(list)
assert len(self.interfaces) % 2 == 0, 'Should be even number of interfaces, got: %s' % len(self.interfaces)
assert len(self.interfaces) >= 2, 'Should be at least two interfaces, got: %s' % len(self.interfaces)
assert isinstance(include_lcores, list), 'include_lcores should be list, got: %s' % type(include_lcores)
assert isinstance(exclude_lcores, list), 'exclude_lcores should be list, got: %s' % type(exclude_lcores)
assert len(self.interfaces) >= 2, 'Should be at least two interfaces, got: %s' % len(self.interfaces)
if only_first_thread:
for cores in self.cpu_topology.values():
for core in cores.keys():
cores[core] = cores[core][:1]
include_lcores = [int(x) for x in include_lcores]
exclude_lcores = [int(x) for x in exclude_lcores]
self.has_zero_lcore = False
self.lcores_per_numa = {}
total_lcores = 0
for numa, cores in self.cpu_topology.items():
self.lcores_per_numa[numa] = {'main': [], 'siblings': [], 'all': []}
for core, lcores in cores.items():
total_lcores += len(lcores)
for lcore in list(lcores):
if include_lcores and lcore not in include_lcores:
cores[core].remove(lcore)
if exclude_lcores and lcore in exclude_lcores:
cores[core].remove(lcore)
if lcore > self.MAC_LCORE_NUM:
cores[core].remove(lcore)
if 0 in lcores:
self.has_zero_lcore = True
lcores.remove(0)
self.lcores_per_numa[numa]['siblings'].extend(lcores)
else:
self.lcores_per_numa[numa]['main'].extend(lcores[:1])
self.lcores_per_numa[numa]['siblings'].extend(lcores[1:])
self.lcores_per_numa[numa]['all'].extend(lcores)
for interface in self.interfaces:
for mandatory_interface_field in ConfigCreator.mandatory_interface_fields:
if mandatory_interface_field not in interface:
raise DpdkSetup("Expected '%s' field in interface dictionary, got: %s" % (mandatory_interface_field, interface))
Device_str = self._verify_devices_same_type(self.interfaces)
if '40Gb' in Device_str:
self.speed = 40
else:
self.speed = 10
minimum_required_lcores = len(self.interfaces) // 2 + 2
if total_lcores < minimum_required_lcores:
raise DpdkSetup('Your system should have at least %s cores for %s interfaces, and it has: %s.' %
(minimum_required_lcores, len(self.interfaces), total_lcores))
interfaces_per_numa = defaultdict(int)
for i in range(0, len(self.interfaces), 2):
numa = self.interfaces[i]['NUMA']
if numa != self.interfaces[i+1]['NUMA'] and not ignore_numa:
raise DpdkSetup('NUMA of each pair of interfaces should be the same. Got NUMA %s for client interface %s, NUMA %s for server interface %s' %
(numa, self.interfaces[i]['Slot_str'], self.interfaces[i+1]['NUMA'], self.interfaces[i+1]['Slot_str']))
interfaces_per_numa[numa] += 2
self.interfaces_per_numa = interfaces_per_numa
self.prefix = prefix
self.zmq_pub_port = zmq_pub_port
self.zmq_rpc_port = zmq_rpc_port
self.ignore_numa = ignore_numa
@staticmethod
def verify_mac(mac_string):
if not ConfigCreator.mac_re.match(mac_string):
raise DpdkSetup('MAC address should be in format of 12:34:56:78:9a:bc, got: %s' % mac_string)
return mac_string.lower()
@staticmethod
def _exit_if_bad_ip(ip):
if not ConfigCreator._verify_ip(ip):
raise DpdkSetup("Got bad IP %s" % ip)
@staticmethod
def _verify_ip(ip):
a = ip.split('.')
if len(a) != 4:
return False
for x in a:
if not x.isdigit():
return False
i = int(x)
if i < 0 or i > 255:
return False
return True
@staticmethod
def _verify_devices_same_type(interfaces_list):
Device_str = interfaces_list[0]['Device_str']
for interface in interfaces_list:
if Device_str != interface['Device_str']:
raise DpdkSetup('Interfaces should be of same type, got:\n\t* %s\n\t* %s' % (Device_str, interface['Device_str']))
return Device_str
def create_config(self, filename = None, print_config = False):
config_str = '### Config file generated by dpdk_setup_ports.py ###\n\n'
config_str += '- port_limit: %s\n' % len(self.interfaces)
config_str += ' version: 2\n'
config_str += " interfaces: ['%s']\n" % "', '".join([interface['Slot_str'] for interface in self.interfaces])
if self.speed > 10:
config_str += ' port_bandwidth_gb: %s\n' % self.speed
if self.prefix:
config_str += ' prefix: %s\n' % self.prefix
if self.zmq_pub_port:
config_str += ' zmq_pub_port: %s\n' % self.zmq_pub_port
if self.zmq_rpc_port:
config_str += ' zmq_rpc_port: %s\n' % self.zmq_rpc_port
config_str += ' port_info:\n'
for index, interface in enumerate(self.interfaces):
if 'ip' in interface:
self._exit_if_bad_ip(interface['ip'])
self._exit_if_bad_ip(interface['def_gw'])
config_str += ' '*6 + '- ip: %s\n' % interface['ip']
config_str += ' '*8 + 'default_gw: %s\n' % interface['def_gw']
else:
config_str += ' '*6 + '- dest_mac: %s' % self.verify_mac(interface['dest_mac'])
if interface.get('loopback_dest'):
config_str += " # MAC OF LOOPBACK TO IT'S DUAL INTERFACE\n"
else:
config_str += '\n'
config_str += ' '*8 + 'src_mac: %s\n' % self.verify_mac(interface['src_mac'])
if index % 2:
config_str += '\n' # dual if barrier
if not self.ignore_numa:
config_str += ' platform:\n'
if len(self.interfaces_per_numa.keys()) == 1 and -1 in self.interfaces_per_numa: # VM, use any cores
lcores_pool = sorted([lcore for lcores in self.lcores_per_numa.values() for lcore in lcores['all']])
config_str += ' '*6 + 'master_thread_id: %s\n' % (0 if self.has_zero_lcore else lcores_pool.pop(0))
config_str += ' '*6 + 'latency_thread_id: %s\n' % lcores_pool.pop(0)
lcores_per_dual_if = int(len(lcores_pool) * 2 / len(self.interfaces))
config_str += ' '*6 + 'dual_if:\n'
for i in range(0, len(self.interfaces), 2):
lcores_for_this_dual_if = list(map(str, sorted(lcores_pool[:lcores_per_dual_if])))
lcores_pool = lcores_pool[lcores_per_dual_if:]
if not lcores_for_this_dual_if:
raise DpdkSetup('lcores_for_this_dual_if is empty (internal bug, please report with details of setup)')
config_str += ' '*8 + '- socket: 0\n'
config_str += ' '*10 + 'threads: [%s]\n\n' % ','.join(lcores_for_this_dual_if)
else:
# we will take common minimum among all NUMAs, to satisfy all
lcores_per_dual_if = 99
extra_lcores = 1 if self.has_zero_lcore else 2
# worst case 3 iterations, to ensure master and "rx" have cores left
while (lcores_per_dual_if * sum(self.interfaces_per_numa.values()) / 2) + extra_lcores > sum([len(lcores['all']) for lcores in self.lcores_per_numa.values()]):
lcores_per_dual_if -= 1
for numa, lcores_dict in self.lcores_per_numa.items():
if not self.interfaces_per_numa[numa]:
continue
lcores_per_dual_if = min(lcores_per_dual_if, int(2 * len(lcores_dict['all']) / self.interfaces_per_numa[numa]))
lcores_pool = copy.deepcopy(self.lcores_per_numa)
# first, allocate lcores for dual_if section
dual_if_section = ' '*6 + 'dual_if:\n'
for i in range(0, len(self.interfaces), 2):
numa = self.interfaces[i]['NUMA']
dual_if_section += ' '*8 + '- socket: %s\n' % numa
lcores_for_this_dual_if = lcores_pool[numa]['all'][:lcores_per_dual_if]
lcores_pool[numa]['all'] = lcores_pool[numa]['all'][lcores_per_dual_if:]
for lcore in lcores_for_this_dual_if:
if lcore in lcores_pool[numa]['main']:
lcores_pool[numa]['main'].remove(lcore)
elif lcore in lcores_pool[numa]['siblings']:
lcores_pool[numa]['siblings'].remove(lcore)
else:
raise DpdkSetup('lcore not in main nor in siblings list (internal bug, please report with details of setup)')
if not lcores_for_this_dual_if:
raise DpdkSetup('Not enough cores at NUMA %s. This NUMA has %s processing units and %s interfaces.' % (numa, len(self.lcores_per_numa[numa]), self.interfaces_per_numa[numa]))
dual_if_section += ' '*10 + 'threads: [%s]\n\n' % ','.join(list(map(str, sorted(lcores_for_this_dual_if))))
# take the cores left to master and rx
mains_left = [lcore for lcores in lcores_pool.values() for lcore in lcores['main']]
siblings_left = [lcore for lcores in lcores_pool.values() for lcore in lcores['siblings']]
if mains_left:
rx_core = mains_left.pop(0)
else:
rx_core = siblings_left.pop(0)
if self.has_zero_lcore:
master_core = 0
elif mains_left:
master_core = mains_left.pop(0)
else:
master_core = siblings_left.pop(0)
config_str += ' '*6 + 'master_thread_id: %s\n' % master_core
config_str += ' '*6 + 'latency_thread_id: %s\n' % rx_core
# add the dual_if section
config_str += dual_if_section
# verify config is correct YAML format
try:
yaml.safe_load(config_str)
except Exception as e:
raise DpdkSetup('Could not create correct yaml config.\nGenerated YAML:\n%s\nEncountered error:\n%s' % (config_str, e))
if print_config:
print(config_str)
if filename:
if os.path.exists(filename):
if not dpdk_nic_bind.confirm('File %s already exist, overwrite? (y/N)' % filename):
print('Skipping.')
return config_str
with open(filename, 'w') as f:
f.write(config_str)
print('Saved to %s.' % filename)
return config_str
# only load igb_uio if it's available
def load_igb_uio():
loaded_mods = dpdk_nic_bind.get_loaded_modules()
if 'igb_uio' in loaded_mods:
return True
if 'uio' not in loaded_mods:
ret = os.system('modprobe uio')
if ret:
return False
km = './ko/%s/igb_uio.ko' % dpdk_nic_bind.kernel_ver
if os.path.exists(km):
return os.system('insmod %s' % km) == 0
# try to compile igb_uio if it's missing
def compile_and_load_igb_uio():
loaded_mods = dpdk_nic_bind.get_loaded_modules()
if 'igb_uio' in loaded_mods:
return
if 'uio' not in loaded_mods:
ret = os.system('modprobe uio')
if ret:
print('Failed inserting uio module, please check if it is installed')
sys.exit(-1)
km = './ko/%s/igb_uio.ko' % dpdk_nic_bind.kernel_ver
if not os.path.exists(km):
print("ERROR: We don't have precompiled igb_uio.ko module for your kernel version")
print('Will try compiling automatically - make sure you have file-system read/write permission')
try:
subprocess.check_output('make', cwd = './ko/src', stderr = subprocess.STDOUT)
subprocess.check_output(['make', 'install'], cwd = './ko/src', stderr = subprocess.STDOUT)
print('\nSuccess.')
except Exception as e:
print('\n ERROR: Automatic compilation failed: (%s)' % e)
print('Make sure you have file-system read/write permission')
print('You can try compiling yourself, using the following commands:')
print(' $cd ko/src')
print(' $make')
print(' $make install')
print(' $cd -')
print('Then, try to run TRex again.')
print('Note: you might need additional Linux packages for that:')
print(' * yum based (Fedora, CentOS, RedHat):')
print(' sudo yum install kernel-devel-`uname -r`')
print(' sudo yum group install "Development tools"')
print(' * apt based (Ubuntu):')
print(' sudo apt install linux-headers-`uname -r`')
print(' sudo apt install build-essential')
sys.exit(-1)
ret = os.system('insmod %s' % km)
if ret:
print('Failed inserting igb_uio module')
sys.exit(-1)
class map_driver(object):
args=None;
cfg_file='/etc/trex_cfg.yaml'
parent_args = None
class DpdkSetup(Exception):
pass
class CIfMap:
def __init__(self, cfg_file):
self.m_cfg_file =cfg_file;
self.m_cfg_dict={};
self.m_devices={};
self.m_is_mellanox_mode=False;
def dump_error (self,err):
s="""%s
From this TRex version a configuration file must exist in /etc/ folder "
The name of the configuration file should be /etc/trex_cfg.yaml "
The minimum configuration file should include something like this
- version : 2 # version 2 of the configuration file
interfaces : ["03:00.0","03:00.1","13:00.1","13:00.0"] # list of the interfaces to bind run ./dpdk_nic_bind.py --status to see the list
port_limit : 2 # number of ports to use valid is 2,4,6,8,10,12
example of already bind devices
$ ./dpdk_nic_bind.py --status
Network devices using DPDK-compatible driver
============================================
0000:03:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv=igb_uio unused=
0000:03:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv=igb_uio unused=
0000:13:00.0 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv=igb_uio unused=
0000:13:00.1 '82599ES 10-Gigabit SFI/SFP+ Network Connection' drv=igb_uio unused=
Network devices using kernel driver
===================================
0000:02:00.0 '82545EM Gigabit Ethernet Controller (Copper)' if=eth2 drv=e1000 unused=igb_uio *Active*
Other network devices
=====================
""" % (err);
return s;
def raise_error (self,err):
s= self.dump_error (err)
raise DpdkSetup(s)
def set_only_mellanox_nics(self):
self.m_is_mellanox_mode=True;
def get_only_mellanox_nics(self):
return self.m_is_mellanox_mode
def read_pci (self,pci_id,reg_id):
out=subprocess.check_output(['setpci', '-s',pci_id, '%s.w' %(reg_id)])
out=out.decode(errors='replace');
return (out.strip());
def write_pci (self,pci_id,reg_id,val):
out=subprocess.check_output(['setpci','-s',pci_id, '%s.w=%s' %(reg_id,val)])
out=out.decode(errors='replace');
return (out.strip());
def tune_mlx5_device (self,pci_id):
# set PCIe Read to 4K and not 512 ... need to add it to startup s
val=self.read_pci (pci_id,68)
if val[0]=='0':
#hypervisor does not give the right to write to this register
return;
if val[0]!='5':
val='5'+val[1:]
self.write_pci (pci_id,68,val)
assert(self.read_pci (pci_id,68)==val);
def get_mtu_mlx5 (self,dev_id):
if len(dev_id)>0:
try:
out=subprocess.check_output(['ifconfig', dev_id])
except Exception as e:
raise DpdkSetup(' "ifconfig %s" utility does not works, try to install it using "$yum install net-tools -y" on CentOS system' %(dev_id) )
out=out.decode(errors='replace');
obj=re.search(r'MTU:(\d+)',out,flags=re.MULTILINE|re.DOTALL);
if obj:
return int(obj.group(1));
else:
obj=re.search(r'mtu (\d+)',out,flags=re.MULTILINE|re.DOTALL);
if obj:
return int(obj.group(1));
else:
return -1
def set_mtu_mlx5 (self,dev_id,new_mtu):
if len(dev_id)>0:
out=subprocess.check_output(['ifconfig', dev_id,'mtu',str(new_mtu)])
out=out.decode(errors='replace');
def set_max_mtu_mlx5_device(self,dev_id):
mtu=9*1024+22
dev_mtu=self.get_mtu_mlx5 (dev_id);
if (dev_mtu>0) and (dev_mtu!=mtu):
self.set_mtu_mlx5(dev_id,mtu);
if self.get_mtu_mlx5(dev_id) != mtu:
print("Could not set MTU to %d" % mtu)
sys.exit(-1);
def disable_flow_control_mlx5_device (self,dev_id):
if len(dev_id)>0:
my_stderr = open("/dev/null","wb")
cmd ='ethtool -A '+dev_id + ' rx off tx off '
subprocess.call(cmd, stdout=my_stderr,stderr=my_stderr, shell=True)
my_stderr.close();
def check_ofed_version (self):
ofed_info='/usr/bin/ofed_info'
ofed_ver_re = re.compile('.*[-](\d)[.](\d)[-].*')
ofed_ver= 42
ofed_ver_show= '4.2'
if not os.path.isfile(ofed_info):
print("OFED %s is not installed on this setup" % ofed_info)
sys.exit(-1);
try:
out = subprocess.check_output([ofed_info])
except Exception as e:
print("OFED %s can't run " % (ofed_info))
sys.exit(-1);
lines=out.splitlines();
if len(lines)>1:
m= ofed_ver_re.match(str(lines[0]))
if m:
ver=int(m.group(1))*10+int(m.group(2))
if ver < ofed_ver:
print("installed OFED version is '%s' should be at least '%s' and up" % (lines[0],ofed_ver_show))
sys.exit(-1);
else:
print("not found valid OFED version '%s' " % (lines[0]))
sys.exit(-1);
def verify_ofed_os(self):
err_msg = 'Warning: Mellanox NICs where tested only with RedHat/CentOS 7.4\n'
err_msg += 'Correct usage with other Linux distributions is not guaranteed.'
try:
dist = platform.dist()
if dist[0] not in ('redhat', 'centos') or not dist[1].startswith('7.4'):
print(err_msg)
except Exception as e:
print('Error while determining OS type: %s' % e)
def load_config_file (self):
fcfg=self.m_cfg_file
if not os.path.isfile(fcfg) :
self.raise_error ("There is no valid configuration file %s\n" % fcfg)
try:
stream = open(fcfg, 'r')
self.m_cfg_dict= yaml.safe_load(stream)
except Exception as e:
print(e);
raise e
stream.close();
cfg_dict = self.m_cfg_dict[0]
if 'version' not in cfg_dict:
raise DpdkSetup("Configuration file %s is old, it should include version field\n" % fcfg )
if int(cfg_dict['version'])<2 :
raise DpdkSetup("Configuration file %s is old, expected version 2, got: %s\n" % (fcfg, cfg_dict['version']))
if 'interfaces' not in self.m_cfg_dict[0]:
raise DpdkSetup("Configuration file %s is old, it should include interfaces field with even number of elements" % fcfg)
if_list= if_list_remove_sub_if(self.m_cfg_dict[0]['interfaces']);
l=len(if_list);
if l > 16:
raise DpdkSetup("Configuration file %s should include interfaces field with maximum 16 elements, got: %s." % (fcfg,l))
if l % 2:
raise DpdkSetup("Configuration file %s should include even number of interfaces, got: %s" % (fcfg,l))
if 'port_limit' in cfg_dict and cfg_dict['port_limit'] > len(if_list):
raise DpdkSetup('Error: port_limit should not be higher than number of interfaces in config file: %s\n' % fcfg)
def do_bind_all(self, drv, pci, force = False):
assert type(pci) is list
cmd = '{ptn} dpdk_nic_bind.py --bind={drv} {pci} {frc}'.format(
ptn = sys.executable,
drv = drv,
pci = ' '.join(pci),
frc = '--force' if force else '')
print(cmd)
return os.system(cmd)
# pros: no need to compile .ko per Kernel version
# cons: need special config/hw (not always works)
def try_bind_to_vfio_pci(self, to_bind_list):
krnl_params_file = '/proc/cmdline'
if not os.path.exists(krnl_params_file):
raise VFIOBindErr('Could not find file with Kernel boot parameters: %s' % krnl_params_file)
with open(krnl_params_file) as f:
krnl_params = f.read()
if 'iommu=' not in krnl_params:
raise VFIOBindErr('vfio-pci is not an option here')
if 'vfio_pci' not in dpdk_nic_bind.get_loaded_modules():
ret = os.system('modprobe vfio_pci')
if ret:
raise VFIOBindErr('Could not load vfio_pci')
ret = self.do_bind_all('vfio-pci', to_bind_list)
if ret:
raise VFIOBindErr('Binding to vfio_pci failed')
def pci_name_to_full_name (self,pci_name):
c='[0-9A-Fa-f]';
sp='[:]'
s_short=c+c+sp+c+c+'[.]'+c;
s_full=c+c+c+c+sp+s_short
re_full = re.compile(s_full)
re_short = re.compile(s_short)
if re_short.match(pci_name):
return '0000:'+pci_name
if re_full.match(pci_name):
return pci_name
err=" %s is not a valid pci address \n" %pci_name;
raise DpdkSetup(err)
def run_dpdk_lspci (self):
dpdk_nic_bind.get_nic_details()
self.m_devices= dpdk_nic_bind.devices
def preprocess_astf_file_is_needed(self):
""" check if we are in astf mode, in case we are convert the profile to json in tmp"""
is_astf_mode = map_driver.parent_args and map_driver.parent_args.astf
if is_astf_mode:
input_file = map_driver.parent_args.file
if not input_file:
return
extension = os.path.splitext(input_file)[1]
if extension != '.py':
raise DpdkSetup('ERROR when running with --astf mode, you need to have a new python profile format (.py) and not YAML')
instance_name = ""
if map_driver.parent_args.prefix is not '':
instance_name = "-" + map_driver.parent_args.prefix
elif 'prefix' in self.m_cfg_dict[0]:
instance_name = '-' + self.m_cfg_dict[0]['prefix']
json_file = "/tmp/astf{instance}.json".format(instance=instance_name)
msg="converting astf profile {file} to json {out}".format(file = input_file, out=json_file)
print(msg);
tunable='';
if map_driver.parent_args.tunable:
tunable="-t "+map_driver.parent_args.tunable+" "
cmd = './astf-sim -f {file} {tun} --json > {json_file}'.format(file=input_file, tun=tunable, json_file=json_file)
print(cmd)
ret = os.system(cmd)
os.chmod(json_file, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
if ret:
raise DpdkSetup('ERROR could not convert astf profile to JSON try to debug it using the command above.')
def do_run (self,only_check_all_mlx=False):
""" return the number of mellanox drivers"""
self.run_dpdk_lspci ()
self.load_config_file()
self.preprocess_astf_file_is_needed()
if (map_driver.parent_args is None or
map_driver.parent_args.dump_interfaces is None or
(map_driver.parent_args.dump_interfaces == [] and
map_driver.parent_args.cfg)):
if_list=if_list_remove_sub_if(self.m_cfg_dict[0]['interfaces'])
else:
if_list = map_driver.parent_args.dump_interfaces
if not if_list:
for dev in self.m_devices.values():
if dev.get('Driver_str') in dpdk_nic_bind.dpdk_drivers + dpdk_nic_bind.dpdk_and_kernel:
if_list.append(dev['Slot'])
if_list = list(map(self.pci_name_to_full_name, if_list))
# check how many mellanox cards we have
Mellanox_cnt=0;
for key in if_list:
if key not in self.m_devices:
err=" %s does not exist " %key;
raise DpdkSetup(err)
if 'Vendor_str' not in self.m_devices[key]:
err=" %s does not have Vendor_str " %key;
raise DpdkSetup(err)
if 'Mellanox' in self.m_devices[key]['Vendor_str']:
Mellanox_cnt += 1
if not (map_driver.parent_args and map_driver.parent_args.dump_interfaces):
if (Mellanox_cnt > 0) and (Mellanox_cnt != len(if_list)):
err=" All driver should be from one vendor. you have at least one driver from Mellanox but not all ";
raise DpdkSetup(err)
if Mellanox_cnt > 0:
self.set_only_mellanox_nics()
if self.get_only_mellanox_nics():
if not map_driver.parent_args.no_ofed_check:
self.verify_ofed_os()
self.check_ofed_version()
for key in if_list:
if 'Virtual' not in self.m_devices[key]['Device_str']:
pci_id = self.m_devices[key]['Slot_str']
self.tune_mlx5_device(pci_id)
if 'Interface' in self.m_devices[key]:
dev_id=self.m_devices[key]['Interface']
self.disable_flow_control_mlx5_device (dev_id)
self.set_max_mtu_mlx5_device(dev_id)
if only_check_all_mlx:
if Mellanox_cnt > 0:
sys.exit(MLX_EXIT_CODE);
else:
sys.exit(0);
if if_list and map_driver.args.parent and self.m_cfg_dict[0].get('enable_zmq_pub', True):
publisher_port = self.m_cfg_dict[0].get('zmq_pub_port', 4500)
pid = dpdk_nic_bind.get_tcp_port_usage(publisher_port)
if pid:
cmdline = dpdk_nic_bind.read_pid_cmdline(pid)
print('ZMQ port is used by following process:\npid: %s, cmd: %s' % (pid, cmdline))
if not dpdk_nic_bind.confirm('Ignore and proceed (y/N):'):
sys.exit(-1)
if map_driver.parent_args and map_driver.parent_args.stl and not map_driver.parent_args.no_scapy_server:
try:
master_core = self.m_cfg_dict[0]['platform']['master_thread_id']
except:
master_core = 0
ret = os.system('%s scapy_daemon_server restart -c %s' % (sys.executable, master_core))
if ret:
print("Could not start scapy_daemon_server, which is needed by GUI to create packets.\nIf you don't need it, use --no-scapy-server flag.")
sys.exit(-1)
Napatech_cnt=0;
to_bind_list = []
for key in if_list:
if key not in self.m_devices:
err=" %s does not exist " %key;
raise DpdkSetup(err)
if 'Napatech' in self.m_devices[key]['Vendor_str']:
# These adapters doesn't need binding
Napatech_cnt += 1
continue
if self.m_devices[key].get('Driver_str') not in (dpdk_nic_bind.dpdk_drivers + dpdk_nic_bind.dpdk_and_kernel):
to_bind_list.append(key)
if Napatech_cnt:
# This is currently a hack needed until the DPDK NTACC PMD can do proper
# cleanup.
os.system("ipcs | grep 2117a > /dev/null && ipcrm shm `ipcs | grep 2117a | cut -d' ' -f2` > /dev/null")
if to_bind_list:
if Mellanox_cnt:
ret = self.do_bind_all('mlx5_core', to_bind_list)
if ret:
raise DpdkSetup('Unable to bind interfaces to driver mlx5_core.')
return MLX_EXIT_CODE
else:
# if igb_uio is ready, use it as safer choice, afterwards try vfio-pci
if load_igb_uio():
print('Trying to bind to igb_uio ...')
ret = self.do_bind_all('igb_uio', to_bind_list)
if ret:
raise DpdkSetup('Unable to bind interfaces to driver igb_uio.') # module present, loaded, but unable to bind
return
try:
print('Trying to bind to vfio-pci ...')
self.try_bind_to_vfio_pci(to_bind_list)
return
except VFIOBindErr as e:
pass
#print(e)
print('Trying to compile and bind to igb_uio ...')
compile_and_load_igb_uio()
ret = self.do_bind_all('igb_uio', to_bind_list)
if ret:
raise DpdkSetup('Unable to bind interfaces to driver igb_uio.')
elif Mellanox_cnt:
return MLX_EXIT_CODE
elif Napatech_cnt:
return NTACC_EXIT_CODE
def do_return_to_linux(self):
if not self.m_devices:
self.run_dpdk_lspci()
dpdk_interfaces = []
check_drivers = set()
for device in self.m_devices.values():
if device.get('Driver_str') in dpdk_nic_bind.dpdk_drivers:
dpdk_interfaces.append(device['Slot'])
check_drivers.add(device['Driver_str'])
if not dpdk_interfaces:
print('No DPDK bound interfaces.')
return
any_driver_used = False
for driver in check_drivers:
if dpdk_nic_bind.is_module_used(driver):
any_driver_used = True
if any_driver_used:
pid = dpdk_nic_bind.get_pid_using_pci(dpdk_interfaces)
if pid:
cmdline = dpdk_nic_bind.read_pid_cmdline(pid)
print('DPDK interfaces are in use. Unbinding them might cause following process to hang:\npid: %s, cmd: %s' % (pid, cmdline))
if not dpdk_nic_bind.confirm('Confirm (y/N):'):
sys.exit(-1)
# DPDK => Linux
drivers_table = {
'net_ixgbe': 'ixgbe',
'net_ixgbe_vf': 'ixgbevf',
'net_e1000_igb': 'igb',
'net_i40e': 'i40e',
'net_i40e_vf': 'i40evf',
'net_e1000_em': 'e1000',
'net_vmxnet3': 'vmxnet3',
'net_virtio': 'virtio-pci',
'net_enic': 'enic',
}
nics_info = dpdk_nic_bind.get_info_from_trex(dpdk_interfaces)
if not nics_info:
raise DpdkSetup('Could not determine interfaces information. Try to run manually: sudo ./t-rex-64 --dump-interfaces')
for pci, info in nics_info.items():
if pci not in self.m_devices:
raise DpdkSetup('Internal error: PCI %s is not found among devices' % pci)
dev = self.m_devices[pci]
if info['TRex_Driver'] not in drivers_table:
raise DpdkSetup("Got unknown driver '%s', description: %s" % (info['TRex_Driver'], dev['Device_str']))
linux_driver = drivers_table[info['TRex_Driver']]
if linux_driver not in dpdk_nic_bind.get_loaded_modules():
print("No Linux driver installed, or wrong module name: %s" % linux_driver)
else:
print('Returning to Linux %s' % pci)
dpdk_nic_bind.bind_one(pci, linux_driver, False)
def _get_cpu_topology(self):
cpu_topology_file = '/proc/cpuinfo'
# physical processor -> physical core -> logical processing units (threads)
cpu_topology = OrderedDict()
if not os.path.exists(cpu_topology_file):
raise DpdkSetup('File with CPU topology (%s) does not exist.' % cpu_topology_file)
with open(cpu_topology_file) as f:
for lcore in f.read().split('\n\n'):
if not lcore:
continue
lcore_dict = OrderedDict()
for line in lcore.split('\n'):
key, val = line.split(':', 1)
lcore_dict[key.strip()] = val.strip()
if 'processor' not in lcore_dict:
continue
numa = int(lcore_dict.get('physical id', -1))
if numa not in cpu_topology:
cpu_topology[numa] = OrderedDict()
core = int(lcore_dict.get('core id', lcore_dict['processor']))
if core not in cpu_topology[numa]:
cpu_topology[numa][core] = []
cpu_topology[numa][core].append(int(lcore_dict['processor']))
if not cpu_topology:
raise DpdkSetup('Could not determine CPU topology from %s' % cpu_topology_file)
return cpu_topology
# input: list of different descriptions of interfaces: index, pci, name etc.
# Binds to dpdk wanted interfaces, not bound to any driver.
# output: list of maps of devices in dpdk_* format (self.m_devices.values())
def _get_wanted_interfaces(self, input_interfaces, get_macs = True):
if type(input_interfaces) is not list:
raise DpdkSetup('type of input interfaces should be list')
if not len(input_interfaces):
raise DpdkSetup('Please specify interfaces to use in the config')
if len(input_interfaces) % 2:
raise DpdkSetup('Please specify even number of interfaces')
wanted_interfaces = []
sorted_pci = sorted(self.m_devices.keys())
for interface in input_interfaces:
dev = None
try:
interface = int(interface)
if interface < 0 or interface >= len(sorted_pci):
raise DpdkSetup('Index of an interfaces should be in range 0:%s' % (len(sorted_pci) - 1))
dev = self.m_devices[sorted_pci[interface]]
except ValueError:
for d in self.m_devices.values():
if interface in (d['Interface'], d['Slot'], d['Slot_str']):
dev = d
break
if not dev:
raise DpdkSetup('Could not find information about this interface: %s' % interface)
if dev in wanted_interfaces:
raise DpdkSetup('Interface %s is specified twice' % interface)
dev['Interface_argv'] = interface
wanted_interfaces.append(dev)
if get_macs:
unbound = []
dpdk_bound = []
for interface in wanted_interfaces:
if 'Driver_str' not in interface:
unbound.append(interface['Slot'])
elif interface.get('Driver_str') in dpdk_nic_bind.dpdk_drivers:
dpdk_bound.append(interface['Slot'])
if unbound or dpdk_bound:
for pci, info in dpdk_nic_bind.get_info_from_trex(unbound + dpdk_bound).items():
if pci not in self.m_devices:
raise DpdkSetup('Internal error: PCI %s is not found among devices' % pci)
self.m_devices[pci].update(info)
return wanted_interfaces
def do_create(self):
ips = map_driver.args.ips
def_gws = map_driver.args.def_gws
dest_macs = map_driver.args.dest_macs
if map_driver.args.force_macs:
ip_config = False
if ips:
raise DpdkSetup("If using --force-macs, should not specify ips")
if def_gws:
raise DpdkSetup("If using --force-macs, should not specify default gateways")
elif ips:
ip_config = True
if not def_gws:
raise DpdkSetup("If specifying ips, must specify also def-gws")
if dest_macs:
raise DpdkSetup("If specifying ips, should not specify dest--macs")
if len(ips) != len(def_gws) or len(ips) != len(map_driver.args.create_interfaces):
raise DpdkSetup("Number of given IPs should equal number of given def-gws and number of interfaces")
else:
if dest_macs:
ip_config = False
else:
ip_config = True
# gather info about NICS from dpdk_nic_bind.py
if not self.m_devices:
self.run_dpdk_lspci()
wanted_interfaces = self._get_wanted_interfaces(map_driver.args.create_interfaces, get_macs = not ip_config)
for i, interface in enumerate(wanted_interfaces):
dual_index = i + 1 - (i % 2) * 2
if ip_config:
if isinstance(ips, list) and len(ips) > i:
interface['ip'] = ips[i]
else:
interface['ip'] = '.'.join([str(i+1) for _ in range(4)])
if isinstance(def_gws, list) and len(def_gws) > i:
interface['def_gw'] = def_gws[i]
else:
interface['def_gw'] = '.'.join([str(dual_index+1) for _ in range(4)])
else:
dual_if = wanted_interfaces[dual_index]
if 'MAC' not in interface:
raise DpdkSetup('Could not determine MAC of interface: %s. Please verify with -t flag.' % interface['Interface_argv'])
if 'MAC' not in dual_if:
raise DpdkSetup('Could not determine MAC of interface: %s. Please verify with -t flag.' % dual_if['Interface_argv'])
interface['src_mac'] = interface['MAC']
if isinstance(dest_macs, list) and len(dest_macs) > i:
interface['dest_mac'] = dest_macs[i]
else:
interface['dest_mac'] = dual_if['MAC']
interface['loopback_dest'] = True
config = ConfigCreator(self._get_cpu_topology(), wanted_interfaces, include_lcores = map_driver.args.create_include, exclude_lcores = map_driver.args.create_exclude,
only_first_thread = map_driver.args.no_ht, ignore_numa = map_driver.args.ignore_numa,
prefix = map_driver.args.prefix, zmq_rpc_port = map_driver.args.zmq_rpc_port, zmq_pub_port = map_driver.args.zmq_pub_port)
if map_driver.args.output_config:
config.create_config(filename = map_driver.args.output_config)
else:
print('### Dumping config to screen, use -o flag to save to file')
config.create_config(print_config = True)
def do_interactive_create(self):
ignore_numa = False
cpu_topology = self._get_cpu_topology()
total_lcores = sum([len(lcores) for cores in cpu_topology.values() for lcores in cores.values()])
if total_lcores < 1:
raise DpdkSetup('Script could not determine number of cores of the system, exiting.')
elif total_lcores < 2:
if dpdk_nic_bind.confirm("You only have 1 core and can't run TRex at all. Ignore and continue? (y/N): "):
ignore_numa = True
else:
sys.exit(1)
elif total_lcores < 3:
if dpdk_nic_bind.confirm("You only have 2 cores and will be able to run only stateful without latency checks.\nIgnore and continue? (y/N): "):
ignore_numa = True
else:
sys.exit(1)
if map_driver.args.force_macs:
ip_based = False
elif dpdk_nic_bind.confirm("By default, IP based configuration file will be created. Do you want to use MAC based config? (y/N)"):
ip_based = False
else:
ip_based = True
ip_addr_digit = 1
if not self.m_devices:
self.run_dpdk_lspci()
dpdk_nic_bind.show_table(get_macs = not ip_based)
print('Please choose even number of interfaces from the list above, either by ID , PCI or Linux IF')
print('Stateful will use order of interfaces: Client1 Server1 Client2 Server2 etc. for flows.')
print('Stateless can be in any order.')
numa = None
for dev in self.m_devices.values():
if numa is None:
numa = dev['NUMA']
elif numa != dev['NUMA']:
print('For performance, try to choose each pair of interfaces to be on the same NUMA.')
break
while True:
try:
input = dpdk_nic_bind.read_line('Enter list of interfaces separated by space (for example: 1 3) : ')
create_interfaces = input.replace(',', ' ').replace(';', ' ').split()
wanted_interfaces = self._get_wanted_interfaces(create_interfaces)
ConfigCreator._verify_devices_same_type(wanted_interfaces)
except Exception as e:
print(e)
continue
break
print('')
for interface in wanted_interfaces:
if interface['Active']:
print('Interface %s is active. Using it by TRex might close ssh connections etc.' % interface['Interface_argv'])
if not dpdk_nic_bind.confirm('Ignore and continue? (y/N): '):
sys.exit(-1)
for i, interface in enumerate(wanted_interfaces):
if not ip_based:
if 'MAC' not in interface:
raise DpdkSetup('Could not determine MAC of interface: %s. Please verify with -t flag.' % interface['Interface_argv'])
interface['src_mac'] = interface['MAC']
dual_index = i + 1 - (i % 2) * 2
dual_int = wanted_interfaces[dual_index]
if not ignore_numa and interface['NUMA'] != dual_int['NUMA']:
print('NUMA is different at pair of interfaces: %s and %s. It will reduce performance.' % (interface['Interface_argv'], dual_int['Interface_argv']))
if dpdk_nic_bind.confirm('Ignore and continue? (y/N): '):
ignore_numa = True
print('')
else:
return
if ip_based:
if ip_addr_digit % 2 == 0:
dual_ip_digit = ip_addr_digit - 1
else:
dual_ip_digit = ip_addr_digit + 1
ip = '.'.join([str(ip_addr_digit) for _ in range(4)])
def_gw= '.'.join([str(dual_ip_digit) for _ in range(4)])
ip_addr_digit += 1
print("For interface %s, assuming loopback to it's dual interface %s." % (interface['Interface_argv'], dual_int['Interface_argv']))
if dpdk_nic_bind.confirm("Putting IP %s, default gw %s Change it?(y/N)." % (ip, def_gw)):
while True:
ip = dpdk_nic_bind.read_line('Please enter IP address for interface %s: ' % interface['Interface_argv'])
if not ConfigCreator._verify_ip(ip):
print ("Bad IP address format")
else:
break
while True:
def_gw = dpdk_nic_bind.read_line('Please enter default gateway for interface %s: ' % interface['Interface_argv'])
if not ConfigCreator._verify_ip(def_gw):
print ("Bad IP address format")
else:
break
wanted_interfaces[i]['ip'] = ip
wanted_interfaces[i]['def_gw'] = def_gw
else:
dest_mac = dual_int['MAC']
loopback_dest = True
print("For interface %s, assuming loopback to it's dual interface %s." % (interface['Interface_argv'], dual_int['Interface_argv']))
if dpdk_nic_bind.confirm("Destination MAC is %s. Change it to MAC of DUT? (y/N)." % dest_mac):
while True:
input_mac = dpdk_nic_bind.read_line('Please enter new destination MAC of interface %s: ' % interface['Interface_argv'])
try:
if input_mac:
ConfigCreator.verify_mac(input_mac) # verify format
dest_mac = input_mac
loopback_dest = False
else:
print('Leaving the loopback MAC.')
except Exception as e:
print(e)
continue
break
wanted_interfaces[i]['dest_mac'] = dest_mac
wanted_interfaces[i]['loopback_dest'] = loopback_dest
config = ConfigCreator(cpu_topology, wanted_interfaces, include_lcores = map_driver.args.create_include, exclude_lcores = map_driver.args.create_exclude,
only_first_thread = map_driver.args.no_ht, ignore_numa = map_driver.args.ignore_numa or ignore_numa,
prefix = map_driver.args.prefix, zmq_rpc_port = map_driver.args.zmq_rpc_port, zmq_pub_port = map_driver.args.zmq_pub_port)
if dpdk_nic_bind.confirm('Print preview of generated config? (Y/n)', default = True):
config.create_config(print_config = True)
if dpdk_nic_bind.confirm('Save the config to file? (Y/n)', default = True):
print('Default filename is /etc/trex_cfg.yaml')
filename = dpdk_nic_bind.read_line('Press ENTER to confirm or enter new file: ')
if not filename:
filename = '/etc/trex_cfg.yaml'
config.create_config(filename = filename)
def parse_parent_cfg (parent_cfg):
parent_parser = argparse.ArgumentParser(add_help = False)
parent_parser.add_argument('-?', '-h', '--help', dest = 'help', action = 'store_true')
parent_parser.add_argument('--cfg', default='')
parent_parser.add_argument('--prefix', default='')
parent_parser.add_argument('--dump-interfaces', nargs='*', default=None)
parent_parser.add_argument('--no-ofed-check', action = 'store_true')
parent_parser.add_argument('--no-scapy-server', action = 'store_true')
parent_parser.add_argument('--no-watchdog', action = 'store_true')
parent_parser.add_argument('--astf', action = 'store_true')
parent_parser.add_argument('-f', dest = 'file')
parent_parser.add_argument('-t', dest = 'tunable',default=None)
parent_parser.add_argument('-i', action = 'store_true', dest = 'stl', default = False)
map_driver.parent_args, _ = parent_parser.parse_known_args(shlex.split(parent_cfg))
if map_driver.parent_args.help:
sys.exit(0)
def process_options ():
parser = argparse.ArgumentParser(usage="""
Examples:
---------
To return to Linux the DPDK bound interfaces (for ifconfig etc.)
sudo ./dpdk_set_ports.py -l
To create TRex config file using interactive mode
sudo ./dpdk_set_ports.py -i
To create a default config file (example)
sudo ./dpdk_setup_ports.py -c 02:00.0 02:00.1 -o /etc/trex_cfg.yaml
To show interfaces status
sudo ./dpdk_set_ports.py -s
To see more detailed info on interfaces (table):
sudo ./dpdk_set_ports.py -t
""",
description=" unbind dpdk interfaces ",
epilog=" written by hhaim");
parser.add_argument("-l", "--linux", action='store_true',
help=""" Return all DPDK interfaces to Linux driver """,
)
parser.add_argument("--cfg",
help=""" configuration file name """,
)
parser.add_argument("--parent",
help=argparse.SUPPRESS
)
parser.add_argument('--dump-pci-description', help=argparse.SUPPRESS, dest='dump_pci_desc', action='store_true')
parser.add_argument("-i", "--interactive", action='store_true',
help=""" Create TRex config in interactive mode """,
)
parser.add_argument("-c", "--create", nargs='*', default=None, dest='create_interfaces', metavar='<interface>',
help="""Try to create a configuration file by specifying needed interfaces by PCI address or Linux names: eth1 etc.""",
)
parser.add_argument("--ci", "--cores-include", nargs='*', default=[], dest='create_include', metavar='<cores>',
help="""White list of cores to use. Make sure there is enough for each NUMA.""",
)
parser.add_argument("--ce", "--cores-exclude", nargs='*', default=[], dest='create_exclude', metavar='<cores>',
help="""Black list of cores to exclude. Make sure there will be enough for each NUMA.""",
)
parser.add_argument("--no-ht", default=False, dest='no_ht', action='store_true',
help="""Use only one thread of each Core in created config yaml (No Hyper-Threading).""",
)
parser.add_argument("--dest-macs", nargs='*', default=[], action='store',
help="""Destination MACs to be used in created yaml file. Without them, will assume loopback (0<->1, 2<->3 etc.)""",
)
parser.add_argument("--force-macs", default=False, action='store_true',
help="""Use MACs in created config file.""",
)
parser.add_argument("--ips", nargs='*', default=[], action='store',
help="""IP addresses to be used in created yaml file. Without them, will assume loopback (0<->1, 2<->3 etc.)""",
)
parser.add_argument("--def-gws", nargs='*', default=[], action='store',
help="""Default gateways to be used in created yaml file. Without them, will assume loopback (0<->1, 2<->3 etc.)""",
)
parser.add_argument("-o", default=None, action='store', metavar='PATH', dest = 'output_config',
help="""Output the config to this file.""",
)
parser.add_argument("--prefix", default=None, action='store',
help="""Advanced option: prefix to be used in TRex config in case of parallel instances.""",
)
parser.add_argument("--zmq-pub-port", default=None, action='store',
help="""Advanced option: ZMQ Publisher port to be used in TRex config in case of parallel instances.""",
)
parser.add_argument("--zmq-rpc-port", default=None, action='store',
help="""Advanced option: ZMQ RPC port to be used in TRex config in case of parallel instances.""",
)
parser.add_argument("--ignore-numa", default=False, action='store_true',
help="""Advanced option: Ignore NUMAs for config creation. Use this option only if you have to, as it will reduce performance.""",
)
parser.add_argument("-s", "--show", action='store_true',
help=""" show the status """,
)
parser.add_argument("-t", "--table", action='store_true',
help=""" show table with NICs info """,
)
parser.add_argument('--version', action='version',
version="0.2" )
map_driver.args = parser.parse_args();
if map_driver.args.parent :
parse_parent_cfg (map_driver.args.parent)
if map_driver.parent_args.cfg:
map_driver.cfg_file = map_driver.parent_args.cfg;
if map_driver.parent_args.prefix:
map_driver.prefix = map_driver.parent_args.prefix
if map_driver.args.cfg :
map_driver.cfg_file = map_driver.args.cfg;
def main ():
try:
if os.getuid() != 0:
raise DpdkSetup('Please run this program as root/with sudo')
process_options ()
if map_driver.args.show:
dpdk_nic_bind.show_status()
return
if map_driver.args.table:
dpdk_nic_bind.show_table()
return
if map_driver.args.dump_pci_desc:
dpdk_nic_bind.dump_pci_description()
return
obj =CIfMap(map_driver.cfg_file);
if map_driver.args.create_interfaces is not None:
obj.do_create();
elif map_driver.args.interactive:
obj.do_interactive_create();
elif map_driver.args.linux:
obj.do_return_to_linux();
elif not (map_driver.parent_args and map_driver.parent_args.dump_interfaces is not None):
ret = obj.do_run()
print('The ports are bound/configured.')
sys.exit(ret)
print('')
except DpdkSetup as e:
print(e)
sys.exit(-1)
except Exception:
traceback.print_exc()
sys.exit(-1)
except KeyboardInterrupt:
print('Ctrl+C')
sys.exit(-1)
if __name__ == '__main__':
main()
|
|
"""
Finite difference weights
=========================
This module implements an algorithm for efficient generation of finite
difference weights for ordinary differentials of functions for
derivatives from 0 (interpolation) up to arbitrary order.
The core algorithm is provided in the finite difference weight generating
function (``finite_diff_weights``), and two convenience functions are provided
for:
- estimating a derivative (or interpolate) directly from a series of points
is also provided (``apply_finite_diff``).
- differentiating by using finite difference approximations
(``differentiate_finite``).
"""
from sympy import Derivative, S
from sympy.core.compatibility import iterable, range
from sympy.core.decorators import deprecated
def finite_diff_weights(order, x_list, x0=S.One):
"""
Calculates the finite difference weights for an arbitrarily spaced
one-dimensional grid (``x_list``) for derivatives at ``x0`` of order
0, 1, ..., up to ``order`` using a recursive formula. Order of accuracy
is at least ``len(x_list) - order``, if ``x_list`` is defined correctly.
Parameters
==========
order: int
Up to what derivative order weights should be calculated.
0 corresponds to interpolation.
x_list: sequence
Sequence of (unique) values for the independent variable.
It is useful (but not necessary) to order ``x_list`` from
nearest to furthest from ``x0``; see examples below.
x0: Number or Symbol
Root or value of the independent variable for which the finite
difference weights should be generated. Default is ``S.One``.
Returns
=======
list
A list of sublists, each corresponding to coefficients for
increasing derivative order, and each containing lists of
coefficients for increasing subsets of x_list.
Examples
========
>>> from sympy import S
>>> from sympy.calculus import finite_diff_weights
>>> res = finite_diff_weights(1, [-S(1)/2, S(1)/2, S(3)/2, S(5)/2], 0)
>>> res
[[[1, 0, 0, 0],
[1/2, 1/2, 0, 0],
[3/8, 3/4, -1/8, 0],
[5/16, 15/16, -5/16, 1/16]],
[[0, 0, 0, 0],
[-1, 1, 0, 0],
[-1, 1, 0, 0],
[-23/24, 7/8, 1/8, -1/24]]]
>>> res[0][-1] # FD weights for 0th derivative, using full x_list
[5/16, 15/16, -5/16, 1/16]
>>> res[1][-1] # FD weights for 1st derivative
[-23/24, 7/8, 1/8, -1/24]
>>> res[1][-2] # FD weights for 1st derivative, using x_list[:-1]
[-1, 1, 0, 0]
>>> res[1][-1][0] # FD weight for 1st deriv. for x_list[0]
-23/24
>>> res[1][-1][1] # FD weight for 1st deriv. for x_list[1], etc.
7/8
Each sublist contains the most accurate formula at the end.
Note, that in the above example ``res[1][1]`` is the same as ``res[1][2]``.
Since res[1][2] has an order of accuracy of
``len(x_list[:3]) - order = 3 - 1 = 2``, the same is true for ``res[1][1]``!
>>> from sympy import S
>>> from sympy.calculus import finite_diff_weights
>>> res = finite_diff_weights(1, [S(0), S(1), -S(1), S(2), -S(2)], 0)[1]
>>> res
[[0, 0, 0, 0, 0],
[-1, 1, 0, 0, 0],
[0, 1/2, -1/2, 0, 0],
[-1/2, 1, -1/3, -1/6, 0],
[0, 2/3, -2/3, -1/12, 1/12]]
>>> res[0] # no approximation possible, using x_list[0] only
[0, 0, 0, 0, 0]
>>> res[1] # classic forward step approximation
[-1, 1, 0, 0, 0]
>>> res[2] # classic centered approximation
[0, 1/2, -1/2, 0, 0]
>>> res[3:] # higher order approximations
[[-1/2, 1, -1/3, -1/6, 0], [0, 2/3, -2/3, -1/12, 1/12]]
Let us compare this to a differently defined ``x_list``. Pay attention to
``foo[i][k]`` corresponding to the gridpoint defined by ``x_list[k]``.
>>> from sympy import S
>>> from sympy.calculus import finite_diff_weights
>>> foo = finite_diff_weights(1, [-S(2), -S(1), S(0), S(1), S(2)], 0)[1]
>>> foo
[[0, 0, 0, 0, 0],
[-1, 1, 0, 0, 0],
[1/2, -2, 3/2, 0, 0],
[1/6, -1, 1/2, 1/3, 0],
[1/12, -2/3, 0, 2/3, -1/12]]
>>> foo[1] # not the same and of lower accuracy as res[1]!
[-1, 1, 0, 0, 0]
>>> foo[2] # classic double backward step approximation
[1/2, -2, 3/2, 0, 0]
>>> foo[4] # the same as res[4]
[1/12, -2/3, 0, 2/3, -1/12]
Note that, unless you plan on using approximations based on subsets of
``x_list``, the order of gridpoints does not matter.
The capability to generate weights at arbitrary points can be
used e.g. to minimize Runge's phenomenon by using Chebyshev nodes:
>>> from sympy import cos, symbols, pi, simplify
>>> from sympy.calculus import finite_diff_weights
>>> N, (h, x) = 4, symbols('h x')
>>> x_list = [x+h*cos(i*pi/(N)) for i in range(N,-1,-1)] # chebyshev nodes
>>> print(x_list)
[-h + x, -sqrt(2)*h/2 + x, x, sqrt(2)*h/2 + x, h + x]
>>> mycoeffs = finite_diff_weights(1, x_list, 0)[1][4]
>>> [simplify(c) for c in mycoeffs] #doctest: +NORMALIZE_WHITESPACE
[(h**3/2 + h**2*x - 3*h*x**2 - 4*x**3)/h**4,
(-sqrt(2)*h**3 - 4*h**2*x + 3*sqrt(2)*h*x**2 + 8*x**3)/h**4,
6*x/h**2 - 8*x**3/h**4,
(sqrt(2)*h**3 - 4*h**2*x - 3*sqrt(2)*h*x**2 + 8*x**3)/h**4,
(-h**3/2 + h**2*x + 3*h*x**2 - 4*x**3)/h**4]
Notes
=====
If weights for a finite difference approximation of 3rd order
derivative is wanted, weights for 0th, 1st and 2nd order are
calculated "for free", so are formulae using subsets of ``x_list``.
This is something one can take advantage of to save computational cost.
Be aware that one should define ``x_list`` from nearest to furthest from
``x0``. If not, subsets of ``x_list`` will yield poorer approximations,
which might not grand an order of accuracy of ``len(x_list) - order``.
See also
========
sympy.calculus.finite_diff.apply_finite_diff
References
==========
.. [1] Generation of Finite Difference Formulas on Arbitrarily Spaced
Grids, Bengt Fornberg; Mathematics of computation; 51; 184;
(1988); 699-706; doi:10.1090/S0025-5718-1988-0935077-0
"""
# The notation below closely corresponds to the one used in the paper.
order = S(order)
if not order.is_number:
raise ValueError("Cannot handle symbolic order.")
if order < 0:
raise ValueError("Negative derivative order illegal.")
if int(order) != order:
raise ValueError("Non-integer order illegal")
M = order
N = len(x_list) - 1
delta = [[[0 for nu in range(N+1)] for n in range(N+1)] for
m in range(M+1)]
delta[0][0][0] = S.One
c1 = S.One
for n in range(1, N+1):
c2 = S.One
for nu in range(0, n):
c3 = x_list[n]-x_list[nu]
c2 = c2 * c3
if n <= M:
delta[n][n-1][nu] = 0
for m in range(0, min(n, M)+1):
delta[m][n][nu] = (x_list[n]-x0)*delta[m][n-1][nu] -\
m*delta[m-1][n-1][nu]
delta[m][n][nu] /= c3
for m in range(0, min(n, M)+1):
delta[m][n][n] = c1/c2*(m*delta[m-1][n-1][n-1] -
(x_list[n-1]-x0)*delta[m][n-1][n-1])
c1 = c2
return delta
def apply_finite_diff(order, x_list, y_list, x0=S.Zero):
"""
Calculates the finite difference approximation of
the derivative of requested order at ``x0`` from points
provided in ``x_list`` and ``y_list``.
Parameters
==========
order: int
order of derivative to approximate. 0 corresponds to interpolation.
x_list: sequence
Sequence of (unique) values for the independent variable.
y_list: sequence
The function value at corresponding values for the independent
variable in x_list.
x0: Number or Symbol
At what value of the independent variable the derivative should be
evaluated. Defaults to 0.
Returns
=======
sympy.core.add.Add or sympy.core.numbers.Number
The finite difference expression approximating the requested
derivative order at ``x0``.
Examples
========
>>> from sympy.calculus import apply_finite_diff
>>> cube = lambda arg: (1.0*arg)**3
>>> xlist = range(-3,3+1)
>>> apply_finite_diff(2, xlist, map(cube, xlist), 2) - 12 # doctest: +SKIP
-3.55271367880050e-15
we see that the example above only contain rounding errors.
apply_finite_diff can also be used on more abstract objects:
>>> from sympy import IndexedBase, Idx
>>> from sympy.calculus import apply_finite_diff
>>> x, y = map(IndexedBase, 'xy')
>>> i = Idx('i')
>>> x_list, y_list = zip(*[(x[i+j], y[i+j]) for j in range(-1,2)])
>>> apply_finite_diff(1, x_list, y_list, x[i])
((x[i + 1] - x[i])/(-x[i - 1] + x[i]) - 1)*y[i]/(x[i + 1] - x[i]) - \
(x[i + 1] - x[i])*y[i - 1]/((x[i + 1] - x[i - 1])*(-x[i - 1] + x[i])) + \
(-x[i - 1] + x[i])*y[i + 1]/((x[i + 1] - x[i - 1])*(x[i + 1] - x[i]))
Notes
=====
Order = 0 corresponds to interpolation.
Only supply so many points you think makes sense
to around x0 when extracting the derivative (the function
need to be well behaved within that region). Also beware
of Runge's phenomenon.
See also
========
sympy.calculus.finite_diff.finite_diff_weights
References
==========
Fortran 90 implementation with Python interface for numerics: finitediff_
.. _finitediff: https://github.com/bjodah/finitediff
"""
# In the original paper the following holds for the notation:
# M = order
# N = len(x_list) - 1
N = len(x_list) - 1
if len(x_list) != len(y_list):
raise ValueError("x_list and y_list not equal in length.")
delta = finite_diff_weights(order, x_list, x0)
derivative = 0
for nu in range(0, len(x_list)):
derivative += delta[order][N][nu]*y_list[nu]
return derivative
def _as_finite_diff(derivative, points=1, x0=None, wrt=None):
"""
Returns an approximation of a derivative of a function in
the form of a finite difference formula. The expression is a
weighted sum of the function at a number of discrete values of
(one of) the independent variable(s).
Parameters
==========
derivative: a Derivative instance
points: sequence or coefficient, optional
If sequence: discrete values (length >= order+1) of the
independent variable used for generating the finite
difference weights.
If it is a coefficient, it will be used as the step-size
for generating an equidistant sequence of length order+1
centered around ``x0``. default: 1 (step-size 1)
x0: number or Symbol, optional
the value of the independent variable (``wrt``) at which the
derivative is to be approximated. Default: same as ``wrt``.
wrt: Symbol, optional
"with respect to" the variable for which the (partial)
derivative is to be approximated for. If not provided it
is required that the Derivative is ordinary. Default: ``None``.
Examples
========
>>> from sympy import symbols, Function, exp, sqrt, Symbol, as_finite_diff
>>> from sympy.utilities.exceptions import SymPyDeprecationWarning
>>> import warnings
>>> warnings.simplefilter("ignore", SymPyDeprecationWarning)
>>> x, h = symbols('x h')
>>> f = Function('f')
>>> as_finite_diff(f(x).diff(x))
-f(x - 1/2) + f(x + 1/2)
The default step size and number of points are 1 and ``order + 1``
respectively. We can change the step size by passing a symbol
as a parameter:
>>> as_finite_diff(f(x).diff(x), h)
-f(-h/2 + x)/h + f(h/2 + x)/h
We can also specify the discretized values to be used in a sequence:
>>> as_finite_diff(f(x).diff(x), [x, x+h, x+2*h])
-3*f(x)/(2*h) + 2*f(h + x)/h - f(2*h + x)/(2*h)
The algorithm is not restricted to use equidistant spacing, nor
do we need to make the approximation around ``x0``, but we can get
an expression estimating the derivative at an offset:
>>> e, sq2 = exp(1), sqrt(2)
>>> xl = [x-h, x+h, x+e*h]
>>> as_finite_diff(f(x).diff(x, 1), xl, x+h*sq2)
2*h*((h + sqrt(2)*h)/(2*h) - (-sqrt(2)*h + h)/(2*h))*f(E*h + x)/\
((-h + E*h)*(h + E*h)) + (-(-sqrt(2)*h + h)/(2*h) - \
(-sqrt(2)*h + E*h)/(2*h))*f(-h + x)/(h + E*h) + \
(-(h + sqrt(2)*h)/(2*h) + (-sqrt(2)*h + E*h)/(2*h))*f(h + x)/(-h + E*h)
Partial derivatives are also supported:
>>> y = Symbol('y')
>>> d2fdxdy=f(x,y).diff(x,y)
>>> as_finite_diff(d2fdxdy, wrt=x)
-Derivative(f(x - 1/2, y), y) + Derivative(f(x + 1/2, y), y)
See also
========
sympy.calculus.finite_diff.apply_finite_diff
sympy.calculus.finite_diff.finite_diff_weights
"""
if derivative.is_Derivative:
pass
elif derivative.is_Atom:
return derivative
else:
return derivative.fromiter(
[_as_finite_diff(ar, points, x0, wrt) for ar
in derivative.args], **derivative.assumptions0)
if wrt is None:
old = None
for v in derivative.variables:
if old is v:
continue
derivative = _as_finite_diff(derivative, points, x0, v)
old = v
return derivative
order = derivative.variables.count(wrt)
if x0 is None:
x0 = wrt
if not iterable(points):
if getattr(points, 'is_Function', False) and wrt in points.args:
points = points.subs(wrt, x0)
# points is simply the step-size, let's make it a
# equidistant sequence centered around x0
if order % 2 == 0:
# even order => odd number of points, grid point included
points = [x0 + points*i for i
in range(-order//2, order//2 + 1)]
else:
# odd order => even number of points, half-way wrt grid point
points = [x0 + points*S(i)/2 for i
in range(-order, order + 1, 2)]
others = [wrt, 0]
for v in set(derivative.variables):
if v == wrt:
continue
others += [v, derivative.variables.count(v)]
if len(points) < order+1:
raise ValueError("Too few points for order %d" % order)
return apply_finite_diff(order, points, [
Derivative(derivative.expr.subs({wrt: x}), *others) for
x in points], x0)
as_finite_diff = deprecated(
useinstead="Derivative.as_finite_difference",
deprecated_since_version="1.1", issue=11410)(_as_finite_diff)
as_finite_diff.__doc__ = """
Deprecated function. Use Diff.as_finite_difference instead.
"""
def differentiate_finite(expr, *symbols,
# points=1, x0=None, wrt=None, evaluate=True, #Py2:
**kwargs):
r""" Differentiate expr and replace Derivatives with finite differences.
Parameters
==========
expr : expression
\*symbols : differentiate with respect to symbols
points: sequence or coefficient, optional
see ``Derivative.as_finite_difference``
x0: number or Symbol, optional
see ``Derivative.as_finite_difference``
wrt: Symbol, optional
see ``Derivative.as_finite_difference``
evaluate : bool
kwarg passed on to ``diff``, whether or not to
evaluate the Derivative intermediately (default: ``False``).
Examples
========
>>> from sympy import cos, sin, Function, differentiate_finite
>>> from sympy.abc import x, y, h
>>> f, g = Function('f'), Function('g')
>>> differentiate_finite(f(x)*g(x), x, points=[x-h, x+h])
-f(-h + x)*g(-h + x)/(2*h) + f(h + x)*g(h + x)/(2*h)
Note that the above form preserves the product rule in discrete form.
If we want we can pass ``evaluate=True`` to get another form (which is
usually not what we want):
>>> differentiate_finite(f(x)*g(x), x, points=[x-h, x+h], evaluate=True).simplify()
-((f(-h + x) - f(h + x))*g(x) + (g(-h + x) - g(h + x))*f(x))/(2*h)
``differentiate_finite`` works on any expression:
>>> differentiate_finite(f(x) + sin(x), x, 2)
-2*f(x) + f(x - 1) + f(x + 1) - 2*sin(x) + sin(x - 1) + sin(x + 1)
>>> differentiate_finite(f(x) + sin(x), x, 2, evaluate=True)
-2*f(x) + f(x - 1) + f(x + 1) - sin(x)
>>> differentiate_finite(f(x, y), x, y)
f(x - 1/2, y - 1/2) - f(x - 1/2, y + 1/2) - f(x + 1/2, y - 1/2) + f(x + 1/2, y + 1/2)
"""
# Key-word only arguments only available in Python 3
points = kwargs.pop('points', 1)
x0 = kwargs.pop('x0', None)
wrt = kwargs.pop('wrt', None)
evaluate = kwargs.pop('evaluate', False)
if kwargs:
raise ValueError("Unknown kwargs: %s" % kwargs)
Dexpr = expr.diff(*symbols, evaluate=evaluate)
return Dexpr.replace(
lambda arg: arg.is_Derivative,
lambda arg: arg.as_finite_difference(points=points, x0=x0, wrt=wrt))
|
|
import os, sys
import pytest
from _pytest.monkeypatch import monkeypatch as MonkeyPatch
def pytest_funcarg__mp(request):
cwd = os.getcwd()
sys_path = list(sys.path)
def cleanup():
sys.path[:] = sys_path
os.chdir(cwd)
request.addfinalizer(cleanup)
return MonkeyPatch()
def test_setattr():
class A:
x = 1
monkeypatch = MonkeyPatch()
pytest.raises(AttributeError, "monkeypatch.setattr(A, 'notexists', 2)")
monkeypatch.setattr(A, 'y', 2, raising=False)
assert A.y == 2
monkeypatch.undo()
assert not hasattr(A, 'y')
monkeypatch = MonkeyPatch()
monkeypatch.setattr(A, 'x', 2)
assert A.x == 2
monkeypatch.setattr(A, 'x', 3)
assert A.x == 3
monkeypatch.undo()
assert A.x == 1
A.x = 5
monkeypatch.undo() # double-undo makes no modification
assert A.x == 5
def test_delattr():
class A:
x = 1
monkeypatch = MonkeyPatch()
monkeypatch.delattr(A, 'x')
assert not hasattr(A, 'x')
monkeypatch.undo()
assert A.x == 1
monkeypatch = MonkeyPatch()
monkeypatch.delattr(A, 'x')
pytest.raises(AttributeError, "monkeypatch.delattr(A, 'y')")
monkeypatch.delattr(A, 'y', raising=False)
monkeypatch.setattr(A, 'x', 5, raising=False)
assert A.x == 5
monkeypatch.undo()
assert A.x == 1
def test_setitem():
d = {'x': 1}
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, 'x', 2)
monkeypatch.setitem(d, 'y', 1700)
monkeypatch.setitem(d, 'y', 1700)
assert d['x'] == 2
assert d['y'] == 1700
monkeypatch.setitem(d, 'x', 3)
assert d['x'] == 3
monkeypatch.undo()
assert d['x'] == 1
assert 'y' not in d
d['x'] = 5
monkeypatch.undo()
assert d['x'] == 5
def test_setitem_deleted_meanwhile():
d = {}
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, 'x', 2)
del d['x']
monkeypatch.undo()
assert not d
@pytest.mark.parametrize("before", [True, False])
def test_setenv_deleted_meanwhile(before):
key = "qwpeoip123"
if before:
os.environ[key] = "world"
monkeypatch = MonkeyPatch()
monkeypatch.setenv(key, 'hello')
del os.environ[key]
monkeypatch.undo()
if before:
assert os.environ[key] == "world"
del os.environ[key]
else:
assert key not in os.environ
def test_delitem():
d = {'x': 1}
monkeypatch = MonkeyPatch()
monkeypatch.delitem(d, 'x')
assert 'x' not in d
monkeypatch.delitem(d, 'y', raising=False)
pytest.raises(KeyError, "monkeypatch.delitem(d, 'y')")
assert not d
monkeypatch.setitem(d, 'y', 1700)
assert d['y'] == 1700
d['hello'] = 'world'
monkeypatch.setitem(d, 'x', 1500)
assert d['x'] == 1500
monkeypatch.undo()
assert d == {'hello': 'world', 'x': 1}
def test_setenv():
monkeypatch = MonkeyPatch()
monkeypatch.setenv('XYZ123', 2)
import os
assert os.environ['XYZ123'] == "2"
monkeypatch.undo()
assert 'XYZ123' not in os.environ
def test_delenv():
name = 'xyz1234'
assert name not in os.environ
monkeypatch = MonkeyPatch()
pytest.raises(KeyError, "monkeypatch.delenv(%r, raising=True)" % name)
monkeypatch.delenv(name, raising=False)
monkeypatch.undo()
os.environ[name] = "1"
try:
monkeypatch = MonkeyPatch()
monkeypatch.delenv(name)
assert name not in os.environ
monkeypatch.setenv(name, "3")
assert os.environ[name] == "3"
monkeypatch.undo()
assert os.environ[name] == "1"
finally:
if name in os.environ:
del os.environ[name]
def test_setenv_prepend():
import os
monkeypatch = MonkeyPatch()
monkeypatch.setenv('XYZ123', 2, prepend="-")
assert os.environ['XYZ123'] == "2"
monkeypatch.setenv('XYZ123', 3, prepend="-")
assert os.environ['XYZ123'] == "3-2"
monkeypatch.undo()
assert 'XYZ123' not in os.environ
def test_monkeypatch_plugin(testdir):
reprec = testdir.inline_runsource("""
def test_method(monkeypatch):
assert monkeypatch.__class__.__name__ == "monkeypatch"
""")
res = reprec.countoutcomes()
assert tuple(res) == (1, 0, 0), res
def test_syspath_prepend(mp):
old = list(sys.path)
mp.syspath_prepend('world')
mp.syspath_prepend('hello')
assert sys.path[0] == "hello"
assert sys.path[1] == "world"
mp.undo()
assert sys.path == old
mp.undo()
assert sys.path == old
def test_syspath_prepend_double_undo(mp):
mp.syspath_prepend('hello world')
mp.undo()
sys.path.append('more hello world')
mp.undo()
assert sys.path[-1] == 'more hello world'
def test_chdir_with_path_local(mp, tmpdir):
mp.chdir(tmpdir)
assert os.getcwd() == tmpdir.strpath
def test_chdir_with_str(mp, tmpdir):
mp.chdir(tmpdir.strpath)
assert os.getcwd() == tmpdir.strpath
def test_chdir_undo(mp, tmpdir):
cwd = os.getcwd()
mp.chdir(tmpdir)
mp.undo()
assert os.getcwd() == cwd
def test_chdir_double_undo(mp, tmpdir):
mp.chdir(tmpdir.strpath)
mp.undo()
tmpdir.chdir()
mp.undo()
assert os.getcwd() == tmpdir.strpath
def test_issue185_time_breaks(testdir):
testdir.makepyfile("""
import time
def test_m(monkeypatch):
def f():
raise Exception
monkeypatch.setattr(time, "time", f)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines("""
*1 passed*
""")
class SampleNew(object):
@staticmethod
def hello():
return True
class SampleNewInherit(SampleNew):
pass
class SampleOld:
#oldstyle on python2
@staticmethod
def hello():
return True
class SampleOldInherit(SampleOld):
pass
@pytest.mark.parametrize('Sample', [
SampleNew, SampleNewInherit,
SampleOld, SampleOldInherit,
], ids=['new', 'new-inherit', 'old', 'old-inherit'])
def test_issue156_undo_staticmethod(Sample):
monkeypatch = MonkeyPatch()
monkeypatch.setattr(Sample, 'hello', None)
assert Sample.hello is None
monkeypatch.undo()
assert Sample.hello()
|
|
#!/usr/bin/env python3
import os.path
import sys
import configparser
import argparse
import subprocess
import validate
import mysql.connector
from mysql.connector import errorcode
from tabulate import tabulate
import json
import requests
import safe_json_decoder as decoder
import thomas_queries
import thomas_utils
import thomas_create
def getargs(argv):
parser = argparse.ArgumentParser(description="Show, refresh or update and close tickets from SAFE.")
parser.add_argument("-s", "--show", dest="show", help="Show all current open tickets in our DB", action='store_true')
parser.add_argument("-f", "--file", dest="jsonfile", default=None, help="Parse json tickets from a file (parser test)")
parser.add_argument("-r", "--refresh", dest="refresh", help="Refresh open tickets in DB from SAFE and display them", action='store_true')
parser.add_argument("-c", "--close", dest="close", default=None, help="Carry out and close this ticket ID")
parser.add_argument("--reject", dest="reject", default=None, help="Reject this ticket ID")
parser.add_argument("--debug", help="Show what would be submitted without committing the change", action='store_true')
# Show the usage if no arguments are supplied
if len(argv) < 1:
parser.print_usage()
exit(1)
# return the arguments
# contains only the attributes for the main parser and the subparser that was used
return parser.parse_args(argv)
# end getargs
def parsejsonfile(filename):
f = open(filename, 'r')
jdata = f.read()
ticketlist = decoder.JSONtoTickets(jdata)
f.close()
for t in ticketlist:
print(str(t.Ticket))
print("Number of tickets included: " + str(len(ticketlist)))
# Connect to SAFE, get open tickets as JSON
def getopentickets(config):
request = requests.get(config['safe']['host'] + "?mode=json", auth = (config['safe']['user'], config['safe']['password']))
if request.status_code == 200:
try:
data = request.json()
return data
except json.decoder.JSONDecodeError as err:
print("Received invalid json, contents: " + str(request.content))
exit(1)
else:
print("Request not successful, code " + str(request.status_code))
# end getopentickets
def gettickets(config):
# get SAFE tickets
jsontickets = getopentickets(config)
# parse SAFE tickets
ticketlist = decoder.JSONDataToTickets(jsontickets)
return ticketlist
# end gettickets
# Update and complete a budget (project) ticket
def updatebudget(ticket_id, projectname):
parameters = {'qtid':ticket_id, 'new_username':projectname, 'mode':'completed'}
return parameters
def updateaddtobudget(ticket_id):
parameters = {'qtid':ticket_id, 'mode':'completed'}
return parameters
# Update and complete a New User ticket
def updatenewuser(ticket_id, username):
parameters = {'qtid':ticket_id, 'new_username':username, 'mode':'completed'}
return parameters
# Reject the ticket because it would cause an error
def rejecterror(ticket_id):
parameters = {'qtid':ticket_id, 'mode':'error'}
return parameters
# Reject the ticket for any other reason
def rejectother(ticket_id):
parameters = {'qtid':ticket_id, 'mode':'refused'}
return parameters
# Update and close a ticket.
# parameters is a dictionary of values: {'qtid':id,'new_username':'Test', 'mode':'completed'}
def updateticket(config, args, parameters):
if args.debug:
print("Post request would be to " + config['safe']['host'] + " with params = " + str(parameters))
else:
request = requests.post(config['safe']['host'], auth = (config['safe']['user'], config['safe']['password']), params = parameters)
if "<title>SysAdminServlet Success</title>" in request.text:
print("Ticket " + parameters['qtid'] + " closed.")
# end updateticket
# Deal with a New User ticket
# (thomas_utils and thomas_create commands that add and create users contain debugging).
def newuser(cursor, config, args, ticketid):
# get the ticket (the ID is unique so there is only one)
cursor.execute(thomas_queries.getsafeticket(), {'id':ticketid})
result = cursor.fetchall()
# this dict will be needed when we create the user
user_dict = {'username': result[0]['account_name'],
'given_name': result[0]['firstname'],
'surname': result[0]['lastname'],
'email': result[0]['email'],
'ssh_key': result[0]['publickey'],
'status': "active"}
# check that we don't already have a username for them
if "to_be_allocated_" in user_dict['username']:
# check if they are a UCL user: UCL email
if "ucl.ac.uk" in user_dict['email']:
# UCL: get username from AD
user_dict['username'] = thomas_utils.AD_username_from_email(config, user_dict['email'])
print("UCL username found from AD: " + user_dict['username'])
else:
# not UCL, get next mmm username
user_dict['username'] = thomas_utils.getunusedmmm(cursor)
print("Not UCL email, username is " + user_dict['username'])
# we have a non-placeholder username
else:
print("Using ticket-provided username: " + user_dict['username'])
# check we are on the correct machine
cluster = thomas_utils.getnodename()
if result[0]['machine'].casefold() in cluster:
# Add new user to database: need the user_dict dictionary we created.
# Surname may be empty.
args.surname = result[0]['lastname']
thomas_utils.addusertodb(args, user_dict, cursor)
# make sure the point of contact gets copied in on account creation
args.cc_email = result[0]['poc_email']
args.username = user_dict['username']
args.ssh_key = user_dict['ssh_key']
args.email = user_dict['email']
args.noemail = ''
# thomas_create.createaccount takes a Namespace rather than a dict
#user_namespace = argparse.Namespace(cc_email = result[0]['poc_email'], username = user_dict['username'], ssh_key = user_dict['ssh_key'], email = user_dict['email'], noemail = '', debug = args.debug)
thomas_create.createaccount(args, cluster)
else:
print("SAFE ticket was for " + result[0]['machine'].casefold() + "and you are on " + cluster + ", exiting.")
exit(1)
# update SAFE and close the ticket
updateticket(config, args, updatenewuser(ticketid, user_dict['username']))
# end newuser
# Deal with a New Budget ticket
# (thomas_utils.addproject contains debugging)
def newbudget(cursor, config, args, ticketid):
# get the ticket (the ID is unique so there is only one)
cursor.execute(thomas_queries.getsafeticket(), {'id':ticketid})
result = cursor.fetchall()
# this dict will be needed when we create the budget
budget_dict = {'project_ID': result[0]['project'],
'inst_ID': ''}
# need to work out what institute it is for
# use the first part of the project_ID up to any underscore as institute
budget_dict['inst_ID'] = budget_dict['project_ID'].partition("_")[0]
# add new project to database
thomas_utils.addproject(args, budget_dict, cursor)
# update SAFE and close the ticket
updateticket(config, args, updatebudget(ticketid, projectname))
# end newbudget
# Deal with an Add to budget ticket
# (thomas_utils.addprojectuser contains debugging)
def addtobudget(cursor, config, args, ticketid):
# get the ticket (the ID is unique so there is only one)
cursor.execute(thomas_queries.getsafeticket(), {'id':ticketid})
result = cursor.fetchall()
# this dict will be needed when we create the projectuser
projectuser_dict = {'username': result[0]['account_name'],
'project_ID': result[0]['project'],
'poc_id': '',
'poc_firstname': result[0]['poc_firstname'],
'poc_lastname': result[0]['poc_lastname'],
'poc_email': result[0]['poc_email'],
'status': 'active'}
# budget exists: get the point of contact
projectuser_dict['poc_id'] = thomas_utils.findpocID(cursor, projectuser_dict)
thomas_utils.addprojectuser(args, projectuser_dict, cursor)
# update SAFE and close the ticket
updateticket(config, args, updateaddtobudget(ticketid))
# end addtobudget
# Match a New User ticket with an Add to budget ticket for the same user
def matchbudgetticket(cursor, ticketid):
# get the username from the New User ticket
cursor.execute(thomas_queries.getsafeticket(), {'id':ticketid})
result = cursor.fetchall()
user = result[0]['account_name']
# get the matching add to budget tickets
cursor.execute(thomas_queries.getusersbudgettickets(), {'account_name':user})
result = cursor.fetchall()
rowcount = cursor.rowcount
# There were no matches! We did that ticket already (or we need to refresh).
if rowcount == 0:
print("No pending Add to budget tickets for " + user)
print("You may wish to use --refresh to refresh tickets.")
return None
# May be multiple matches - we just want the first one as they can be done in any order.
# Return ticket id so we know which one we are completing.
return {'project': result[0]['project'], 'ticket_ID': result[0]['id']}
# end matchbudgetticket
# Turn a list of tickets into a list of dicts for use in SQL queries
def ticketstodicts(ticketlist):
ticket_dicts = []
for t in ticketlist:
t_dict = {
"id": t.Ticket.Id,
"type": t.Ticket.Type,
"status": t.Ticket.Status,
"account_name": t.Ticket.Account.Name,
"machine": t.Ticket.Machine,
"project": t.Ticket.ProjectGroup.Code,
"firstname": t.Ticket.Account.Person.FirstName,
"lastname": t.Ticket.Account.Person.LastName,
"email": t.Ticket.Account.Person.Email,
"publickey": t.Ticket.Account.Person.NormalisedPublicKey,
"poc_firstname": t.Ticket.Approver.FirstName,
"poc_lastname": t.Ticket.Approver.LastName,
"poc_email": t.Ticket.Approver.Email,
"startdate": t.Ticket.StartDate,
"enddate": t.Ticket.EndDate
}
ticket_dicts.append(t_dict)
return ticket_dicts
# Put main in a function so it is importable.
def main(argv):
try:
args = getargs(argv)
# make a dictionary from args to make string substitutions doable by key name
args_dict = vars(args)
except ValueError as err:
print(err)
exit(1)
try:
config = configparser.ConfigParser()
config.read_file(open(os.path.expanduser('~/.thomas.cnf')))
#except FileNotFoundError as err:
except OSError as err:
print(err)
# Read tickets from a file
if args.jsonfile is not None:
parsejsonfile(args.jsonfile)
# Show tickets live from SAFE
if args.show:
# get SAFE tickets
ticketlist = gettickets(config)
# print SAFE tickets
for t in ticketlist:
#print(str(t.Ticket))
values = [t.Ticket.Id, t.Ticket.Type, t.Ticket.Status, t.Ticket.Account.Name, t.Ticket.Machine, t.Ticket.ProjectGroup.Code, t.Ticket.Account.Person.FirstName, t.Ticket.Account.Person.LastName, t.Ticket.Account.Person.Email, t.Ticket.Account.Person.NormalisedPublicKey, t.Ticket.Approver.FirstName, t.Ticket.Approver.LastName, t.Ticket.Approver.Email, t.Ticket.StartDate, t.Ticket.EndDate]
print(values)
print("Number of pending tickets: " + str(len(ticketlist)))
# these options require a database connection
if args.refresh or args.close is not None or args.reject is not None:
try:
conn = mysql.connector.connect(option_files=os.path.expanduser('~/.thomas.cnf'), option_groups='thomas_update', database='thomas')
cursor = conn.cursor(dictionary=True)
# Refresh the database tickets
if args.refresh:
# get SAFE tickets as list of dicts
ticketdicts = ticketstodicts(gettickets(config))
# refresh tickets in database
for t in ticketdicts:
cursor.execute(thomas_queries.refreshsafetickets(), t)
thomas_utils.debugcursor(cursor, args.debug)
# show database tickets (not inc ssh key)
print("Refreshed tickets:")
cursor.execute(thomas_queries.showpendingtickets())
thomas_utils.tableprint_dict(cursor.fetchall())
# Update and close SAFE tickets
if args.close is not None:
# for readability below
ticket = args.close
# get the type of ticket - ticket id is unique so there is only one
# (Either make a temporary dict or pass in (ticket,) with the comma which is ugly).
cursor.execute(thomas_queries.safetickettype(), {'id':ticket})
result = cursor.fetchall()
# make sure we got a result, or exit
if cursor.rowcount < 1:
print("No tickets with id " + ticket + " found, exiting.")
exit(1)
tickettype = result[0]['type']
# store all the ticket info
# new user
if tickettype == "New User":
newuser(cursor, config, args, ticket)
# Each new user ticket should have a matching Add to budget ticket.
# Find it if it exists and complete it too.
match = matchbudgetticket(cursor, ticket)
if match is not None:
print("Matching 'Add to budget' ticket " + str(match['ticket_ID']) + " found for this new user, carrying out.")
addtobudget(cursor, config, args, match['ticket_ID'])
# new budget
elif tickettype == "New Budget":
newbudget(cursor, config, args, ticket)
# add to budget
elif tickettype == "Add to budget":
addtobudget(cursor, config, args, ticket)
else:
print("Ticket " + ticket + " type unrecognised: " + tickettype)
exit(1)
# Reject SAFE tickets - there are two types of rejection so ask
if args.reject is not None:
ticket = args.reject
answer = thomas_utils.select_from_list("Reason to reject ticket: would it cause an error, or is it being rejected for any other reason?", ("other", "error"), default_ans="other")
if answer == "error":
updateticket(config, args, rejecterror(ticket))
else:
updateticket(config, args, rejectother(ticket))
# commit the change to the database unless we are debugging
if not args.debug:
conn.commit()
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Access denied: Something is wrong with your user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cursor.close()
conn.close()
# end main
# When not imported, use the normal global arguments
if __name__ == "__main__":
main(sys.argv[1:])
|
|
# Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import re
import mock
from oslo.vmware import exceptions as vexc
from nova import exception
from nova.i18n import _
from nova.openstack.common import units
from nova import test
from nova.tests.virt.vmwareapi import fake
from nova.virt.vmwareapi import ds_util
class DsUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(DsUtilTestCase, self).setUp()
self.session = fake.FakeSession()
self.flags(api_retry_count=1, group='vmware')
fake.reset()
def tearDown(self):
super(DsUtilTestCase, self).tearDown()
fake.reset()
def test_file_delete(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('DeleteDatastoreFile_Task', method)
name = kwargs.get('name')
self.assertEqual('[ds] fake/path', name)
datacenter = kwargs.get('datacenter')
self.assertEqual('fake-dc-ref', datacenter)
return 'fake_delete_task'
with contextlib.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
ds_path = ds_util.DatastorePath('ds', 'fake/path')
ds_util.file_delete(self.session,
ds_path, 'fake-dc-ref')
_wait_for_task.assert_has_calls([
mock.call('fake_delete_task')])
def test_file_move(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('MoveDatastoreFile_Task', method)
sourceName = kwargs.get('sourceName')
self.assertEqual('[ds] tmp/src', sourceName)
destinationName = kwargs.get('destinationName')
self.assertEqual('[ds] base/dst', destinationName)
sourceDatacenter = kwargs.get('sourceDatacenter')
self.assertEqual('fake-dc-ref', sourceDatacenter)
destinationDatacenter = kwargs.get('destinationDatacenter')
self.assertEqual('fake-dc-ref', destinationDatacenter)
return 'fake_move_task'
with contextlib.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
src_ds_path = ds_util.DatastorePath('ds', 'tmp/src')
dst_ds_path = ds_util.DatastorePath('ds', 'base/dst')
ds_util.file_move(self.session,
'fake-dc-ref', src_ds_path, dst_ds_path)
_wait_for_task.assert_has_calls([
mock.call('fake_move_task')])
def test_mkdir(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('MakeDirectory', method)
name = kwargs.get('name')
self.assertEqual('[ds] fake/path', name)
datacenter = kwargs.get('datacenter')
self.assertEqual('fake-dc-ref', datacenter)
createParentDirectories = kwargs.get('createParentDirectories')
self.assertTrue(createParentDirectories)
with mock.patch.object(self.session, '_call_method',
fake_call_method):
ds_path = ds_util.DatastorePath('ds', 'fake/path')
ds_util.mkdir(self.session, ds_path, 'fake-dc-ref')
def test_file_exists(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'SearchDatastore_Task':
ds_browser = args[0]
self.assertEqual('fake-browser', ds_browser)
datastorePath = kwargs.get('datastorePath')
self.assertEqual('[ds] fake/path', datastorePath)
return 'fake_exists_task'
# Should never get here
self.fail()
def fake_wait_for_task(task_ref):
if task_ref == 'fake_exists_task':
result_file = fake.DataObject()
result_file.path = 'fake-file'
result = fake.DataObject()
result.file = [result_file]
result.path = '[ds] fake/path'
task_info = fake.DataObject()
task_info.result = result
return task_info
# Should never get here
self.fail()
with contextlib.nested(
mock.patch.object(self.session, '_call_method',
fake_call_method),
mock.patch.object(self.session, '_wait_for_task',
fake_wait_for_task)):
ds_path = ds_util.DatastorePath('ds', 'fake/path')
file_exists = ds_util.file_exists(self.session,
'fake-browser', ds_path, 'fake-file')
self.assertTrue(file_exists)
def test_file_exists_fails(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'SearchDatastore_Task':
return 'fake_exists_task'
# Should never get here
self.fail()
def fake_wait_for_task(task_ref):
if task_ref == 'fake_exists_task':
raise vexc.FileNotFoundException()
# Should never get here
self.fail()
with contextlib.nested(
mock.patch.object(self.session, '_call_method',
fake_call_method),
mock.patch.object(self.session, '_wait_for_task',
fake_wait_for_task)):
ds_path = ds_util.DatastorePath('ds', 'fake/path')
file_exists = ds_util.file_exists(self.session,
'fake-browser', ds_path, 'fake-file')
self.assertFalse(file_exists)
def test_get_datastore(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore())
fake_objects.add_object(fake.Datastore("fake-ds-2", 2048, 1000,
False, "normal"))
fake_objects.add_object(fake.Datastore("fake-ds-3", 4096, 2000,
True, "inMaintenance"))
result = ds_util.get_datastore(
fake.FakeObjectRetrievalSession(fake_objects))
self.assertEqual("fake-ds", result.name)
self.assertEqual(units.Ti, result.capacity)
self.assertEqual(500 * units.Gi, result.freespace)
def test_get_datastore_with_regex(self):
# Test with a regex that matches with a datastore
datastore_valid_regex = re.compile("^openstack.*\d$")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("openstack-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds1"))
result = ds_util.get_datastore(
fake.FakeObjectRetrievalSession(fake_objects), None, None,
datastore_valid_regex)
self.assertEqual("openstack-ds0", result.name)
def test_get_datastore_with_token(self):
regex = re.compile("^ds.*\d$")
fake0 = fake.FakeRetrieveResult()
fake0.add_object(fake.Datastore("ds0", 10 * units.Gi, 5 * units.Gi))
fake0.add_object(fake.Datastore("foo", 10 * units.Gi, 9 * units.Gi))
setattr(fake0, 'token', 'token-0')
fake1 = fake.FakeRetrieveResult()
fake1.add_object(fake.Datastore("ds2", 10 * units.Gi, 8 * units.Gi))
fake1.add_object(fake.Datastore("ds3", 10 * units.Gi, 1 * units.Gi))
result = ds_util.get_datastore(
fake.FakeObjectRetrievalSession(fake0, fake1), None, None, regex)
self.assertEqual("ds2", result.name)
def test_get_datastore_with_list(self):
# Test with a regex containing whitelist of datastores
datastore_valid_regex = re.compile("(openstack-ds0|openstack-ds2)")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("openstack-ds0"))
fake_objects.add_object(fake.Datastore("openstack-ds1"))
fake_objects.add_object(fake.Datastore("openstack-ds2"))
result = ds_util.get_datastore(
fake.FakeObjectRetrievalSession(fake_objects), None, None,
datastore_valid_regex)
self.assertNotEqual("openstack-ds1", result.name)
def test_get_datastore_with_regex_error(self):
# Test with a regex that has no match
# Checks if code raises DatastoreNotFound with a specific message
datastore_invalid_regex = re.compile("unknown-ds")
exp_message = (_("Datastore regex %s did not match any datastores")
% datastore_invalid_regex.pattern)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("fake-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds1"))
# assertRaisesRegExp would have been a good choice instead of
# try/catch block, but it's available only from Py 2.7.
try:
ds_util.get_datastore(
fake.FakeObjectRetrievalSession(fake_objects), None, None,
datastore_invalid_regex)
except exception.DatastoreNotFound as e:
self.assertEqual(exp_message, e.args[0])
else:
self.fail("DatastoreNotFound Exception was not raised with "
"message: %s" % exp_message)
def test_get_datastore_without_datastore(self):
self.assertRaises(exception.DatastoreNotFound,
ds_util.get_datastore,
fake.FakeObjectRetrievalSession(None), host="fake-host")
self.assertRaises(exception.DatastoreNotFound,
ds_util.get_datastore,
fake.FakeObjectRetrievalSession(None), cluster="fake-cluster")
def test_get_datastore_no_host_in_cluster(self):
self.assertRaises(exception.DatastoreNotFound,
ds_util.get_datastore,
fake.FakeObjectRetrievalSession(""), 'fake_cluster')
def test_get_datastore_inaccessible_ds(self):
data_store = fake.Datastore()
data_store.set("summary.accessible", False)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(data_store)
self.assertRaises(exception.DatastoreNotFound,
ds_util.get_datastore,
fake.FakeObjectRetrievalSession(fake_objects))
def test_get_datastore_ds_in_maintenance(self):
data_store = fake.Datastore()
data_store.set("summary.maintenanceMode", "inMaintenance")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(data_store)
self.assertRaises(exception.DatastoreNotFound,
ds_util.get_datastore,
fake.FakeObjectRetrievalSession(fake_objects))
def _test_is_datastore_valid(self, accessible=True,
maintenance_mode="normal",
type="VMFS",
datastore_regex=None):
propdict = {}
propdict["summary.accessible"] = accessible
propdict["summary.maintenanceMode"] = maintenance_mode
propdict["summary.type"] = type
propdict["summary.name"] = "ds-1"
return ds_util._is_datastore_valid(propdict, datastore_regex)
def test_is_datastore_valid(self):
for ds_type in ds_util.ALLOWED_DATASTORE_TYPES:
self.assertTrue(self._test_is_datastore_valid(True,
"normal",
ds_type))
def test_is_datastore_valid_inaccessible_ds(self):
self.assertFalse(self._test_is_datastore_valid(False,
"normal",
"VMFS"))
def test_is_datastore_valid_ds_in_maintenance(self):
self.assertFalse(self._test_is_datastore_valid(True,
"inMaintenance",
"VMFS"))
def test_is_datastore_valid_ds_type_invalid(self):
self.assertFalse(self._test_is_datastore_valid(True,
"normal",
"vfat"))
def test_is_datastore_valid_not_matching_regex(self):
datastore_regex = re.compile("ds-2")
self.assertFalse(self._test_is_datastore_valid(True,
"normal",
"VMFS",
datastore_regex))
def test_is_datastore_valid_matching_regex(self):
datastore_regex = re.compile("ds-1")
self.assertTrue(self._test_is_datastore_valid(True,
"normal",
"VMFS",
datastore_regex))
class DatastoreTestCase(test.NoDBTestCase):
def test_ds(self):
ds = ds_util.Datastore(
"fake_ref", "ds_name", 2 * units.Gi, 1 * units.Gi)
self.assertEqual('ds_name', ds.name)
self.assertEqual('fake_ref', ds.ref)
self.assertEqual(2 * units.Gi, ds.capacity)
self.assertEqual(1 * units.Gi, ds.freespace)
def test_ds_invalid_space(self):
self.assertRaises(ValueError, ds_util.Datastore,
"fake_ref", "ds_name", 1 * units.Gi, 2 * units.Gi)
self.assertRaises(ValueError, ds_util.Datastore,
"fake_ref", "ds_name", None, 2 * units.Gi)
def test_ds_no_capacity_no_freespace(self):
ds = ds_util.Datastore("fake_ref", "ds_name")
self.assertIsNone(ds.capacity)
self.assertIsNone(ds.freespace)
def test_ds_invalid(self):
self.assertRaises(ValueError, ds_util.Datastore, None, "ds_name")
self.assertRaises(ValueError, ds_util.Datastore, "fake_ref", None)
def test_build_path(self):
ds = ds_util.Datastore("fake_ref", "ds_name")
ds_path = ds.build_path("some_dir", "foo.vmdk")
self.assertEqual('[ds_name] some_dir/foo.vmdk', str(ds_path))
class DatastorePathTestCase(test.NoDBTestCase):
def test_ds_path(self):
p = ds_util.DatastorePath('dsname', 'a/b/c', 'file.iso')
self.assertEqual('[dsname] a/b/c/file.iso', str(p))
self.assertEqual('a/b/c/file.iso', p.rel_path)
self.assertEqual('a/b/c', p.parent.rel_path)
self.assertEqual('[dsname] a/b/c', str(p.parent))
self.assertEqual('dsname', p.datastore)
self.assertEqual('file.iso', p.basename)
self.assertEqual('a/b/c', p.dirname)
def test_ds_path_no_ds_name(self):
bad_args = [
('', ['a/b/c', 'file.iso']),
(None, ['a/b/c', 'file.iso'])]
for t in bad_args:
self.assertRaises(
ValueError, ds_util.DatastorePath,
t[0], *t[1])
def test_ds_path_invalid_path_components(self):
bad_args = [
('dsname', [None]),
('dsname', ['', None]),
('dsname', ['a', None]),
('dsname', ['a', None, 'b']),
('dsname', [None, '']),
('dsname', [None, 'b'])]
for t in bad_args:
self.assertRaises(
ValueError, ds_util.DatastorePath,
t[0], *t[1])
def test_ds_path_no_subdir(self):
args = [
('dsname', ['', 'x.vmdk']),
('dsname', ['x.vmdk'])]
canonical_p = ds_util.DatastorePath('dsname', 'x.vmdk')
self.assertEqual('[dsname] x.vmdk', str(canonical_p))
self.assertEqual('', canonical_p.dirname)
self.assertEqual('x.vmdk', canonical_p.basename)
self.assertEqual('x.vmdk', canonical_p.rel_path)
for t in args:
p = ds_util.DatastorePath(t[0], *t[1])
self.assertEqual(str(canonical_p), str(p))
def test_ds_path_ds_only(self):
args = [
('dsname', []),
('dsname', ['']),
('dsname', ['', ''])]
canonical_p = ds_util.DatastorePath('dsname')
self.assertEqual('[dsname]', str(canonical_p))
self.assertEqual('', canonical_p.rel_path)
self.assertEqual('', canonical_p.basename)
self.assertEqual('', canonical_p.dirname)
for t in args:
p = ds_util.DatastorePath(t[0], *t[1])
self.assertEqual(str(canonical_p), str(p))
self.assertEqual(canonical_p.rel_path, p.rel_path)
def test_ds_path_equivalence(self):
args = [
('dsname', ['a/b/c/', 'x.vmdk']),
('dsname', ['a/', 'b/c/', 'x.vmdk']),
('dsname', ['a', 'b', 'c', 'x.vmdk']),
('dsname', ['a/b/c', 'x.vmdk'])]
canonical_p = ds_util.DatastorePath('dsname', 'a/b/c', 'x.vmdk')
for t in args:
p = ds_util.DatastorePath(t[0], *t[1])
self.assertEqual(str(canonical_p), str(p))
self.assertEqual(canonical_p.datastore, p.datastore)
self.assertEqual(canonical_p.rel_path, p.rel_path)
self.assertEqual(str(canonical_p.parent), str(p.parent))
def test_ds_path_non_equivalence(self):
args = [
# leading slash
('dsname', ['/a', 'b', 'c', 'x.vmdk']),
('dsname', ['/a/b/c/', 'x.vmdk']),
('dsname', ['a/b/c', '/x.vmdk']),
# leading space
('dsname', ['a/b/c/', ' x.vmdk']),
('dsname', ['a/', ' b/c/', 'x.vmdk']),
('dsname', [' a', 'b', 'c', 'x.vmdk']),
# trailing space
('dsname', ['/a/b/c/', 'x.vmdk ']),
('dsname', ['a/b/c/ ', 'x.vmdk'])]
canonical_p = ds_util.DatastorePath('dsname', 'a/b/c', 'x.vmdk')
for t in args:
p = ds_util.DatastorePath(t[0], *t[1])
self.assertNotEqual(str(canonical_p), str(p))
def test_equal(self):
a = ds_util.DatastorePath('ds_name', 'a')
b = ds_util.DatastorePath('ds_name', 'a')
self.assertEqual(a, b)
def test_join(self):
p = ds_util.DatastorePath('ds_name', 'a')
ds_path = p.join('b')
self.assertEqual('[ds_name] a/b', str(ds_path))
p = ds_util.DatastorePath('ds_name', 'a')
ds_path = p.join()
self.assertEqual('[ds_name] a', str(ds_path))
bad_args = [
[None],
['', None],
['a', None],
['a', None, 'b']]
for arg in bad_args:
self.assertRaises(ValueError, p.join, *arg)
def test_ds_path_parse(self):
p = ds_util.DatastorePath.parse('[dsname]')
self.assertEqual('dsname', p.datastore)
self.assertEqual('', p.rel_path)
p = ds_util.DatastorePath.parse('[dsname] folder')
self.assertEqual('dsname', p.datastore)
self.assertEqual('folder', p.rel_path)
p = ds_util.DatastorePath.parse('[dsname] folder/file')
self.assertEqual('dsname', p.datastore)
self.assertEqual('folder/file', p.rel_path)
for p in [None, '']:
self.assertRaises(ValueError, ds_util.DatastorePath.parse, p)
for p in ['bad path', '/a/b/c', 'a/b/c']:
self.assertRaises(IndexError, ds_util.DatastorePath.parse, p)
|
|
"""
Tests of role and membership calculations.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django.test import TestCase
from ..constants import role_kinds
from ..models import Classroom
from ..models import Facility
from ..models import FacilityUser
from ..models import KolibriAnonymousUser
from ..models import LearnerGroup
from .helpers import create_dummy_facility_data
from .helpers import create_superuser
def flatten(lst):
if lst == []:
return lst
if isinstance(lst[0], list):
return flatten(lst[0]) + flatten(lst[1:])
return lst[:1] + flatten(lst[1:])
class RolesWithinFacilityTestCase(TestCase):
def setUp(self):
self.data = create_dummy_facility_data()
def test_admin_has_admin_role_for_own_facility(self):
admin = self.data["facility_admin"]
facility = self.data["facility"]
self.assertTrue(admin.has_role_for(role_kinds.ADMIN, facility))
self.assertIn(role_kinds.ADMIN, admin.get_roles_for(facility))
def test_coach_has_coach_role_for_own_classroom(self):
coach0 = self.data["classroom_coaches"][0]
classroom0 = self.data["classrooms"][0]
self.assertTrue(coach0.has_role_for(role_kinds.COACH, classroom0))
self.assertIn(role_kinds.COACH, coach0.get_roles_for(classroom0))
def test_coach_has_no_coach_role_for_other_classroom(self):
coach0 = self.data["classroom_coaches"][0]
classroom1 = self.data["classrooms"][1]
self.assertFalse(coach0.has_role_for(role_kinds.COACH, classroom1))
self.assertNotIn(role_kinds.COACH, coach0.get_roles_for(classroom1))
def test_coach_has_coach_role_for_learner_from_own_classroom(self):
coach0 = self.data["classroom_coaches"][0]
learner0 = self.data["learners_one_group"][0][0]
self.assertTrue(coach0.has_role_for(role_kinds.COACH, learner0))
self.assertIn(role_kinds.COACH, coach0.get_roles_for(learner0))
def test_coach_has_no_coach_role_for_learner_from_other_classroom(self):
coach0 = self.data["classroom_coaches"][0]
learner1 = self.data["learners_one_group"][1][0]
self.assertFalse(coach0.has_role_for(role_kinds.COACH, learner1))
self.assertNotIn(role_kinds.COACH, coach0.get_roles_for(learner1))
class ImplicitMembershipTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create(name="My Facility")
self.admin = FacilityUser.objects.create(username="admin", facility=self.facility)
self.facility.add_admin(self.admin)
self.learner = FacilityUser.objects.create(username="learner", facility=self.facility)
def test_has_admin_role_for_learner(self):
self.assertTrue(self.admin.has_role_for(role_kinds.ADMIN, self.learner))
def test_only_has_admin_role_for_learner(self):
self.assertEqual(self.admin.get_roles_for(self.learner), set([role_kinds.ADMIN]))
def test_admin_can_read_learner_object(self):
self.assertTrue(self.admin.can_read(self.learner))
def test_learner_is_in_list_of_readable_objects(self):
self.assertIn(self.learner, self.admin.filter_readable(FacilityUser.objects.all()))
class ExplicitMembershipTestCase(TestCase):
def setUp(self):
self.facility = Facility.objects.create(name="My Facility")
self.admin = FacilityUser.objects.create(username="admin", facility=self.facility)
self.classroom = Classroom.objects.create(name="Class", parent=self.facility)
self.classroom.add_admin(self.admin)
self.learner = FacilityUser.objects.create(username="learner", facility=self.facility)
self.group = LearnerGroup.objects.create(name="Group", parent=self.classroom)
self.group.add_member(self.learner)
def test_has_admin_role_for_learner(self):
self.assertTrue(self.admin.has_role_for(role_kinds.ADMIN, self.learner))
def test_only_has_admin_role_for_learner(self):
self.assertEqual(self.admin.get_roles_for(self.learner), set([role_kinds.ADMIN]))
def test_admin_can_read_learner_object(self):
self.assertTrue(self.admin.can_read(self.learner))
def test_learner_is_in_list_of_readable_objects(self):
self.assertIn(self.learner, self.admin.filter_readable(FacilityUser.objects.all()))
class RolesAcrossFacilitiesTestCase(TestCase):
def setUp(self):
self.data1 = create_dummy_facility_data()
self.data2 = create_dummy_facility_data()
def test_no_roles_between_users_across_facilities(self):
users1 = self.data1["all_users"]
users2 = self.data2["all_users"]
for user1 in users1:
for user2 in users2:
if not user1.is_superuser:
self.assertEqual(len(user1.get_roles_for(user2)), 0)
def test_no_roles_for_collections_across_facilities(self):
users1 = self.data1["classroom_coaches"] + [self.data1["facility_admin"]] + list(self.data1["facility"].get_members())
collections2 = [self.data2["facility"]] + self.data2["classrooms"] + flatten(self.data2["learnergroups"])
for user1 in users1:
for collection2 in collections2:
if not user1.is_superuser:
self.assertEqual(len(user1.get_roles_for(collection2)), 0)
class MembershipWithinFacilityTestCase(TestCase):
def setUp(self):
self.data = create_dummy_facility_data()
self.anon_user = KolibriAnonymousUser()
def test_facility_membership(self):
actual_members = flatten(self.data["learners_one_group"] + [self.data["learner_all_groups"]] +
self.data["unattached_users"] + [self.data["facility_admin"]] +
[self.data["facility_coach"]] + self.data["classroom_admins"] +
self.data["classroom_coaches"] + [self.data["superuser"]])
returned_members = self.data["facility"].get_members()
self.assertSetEqual(set(actual_members), set(returned_members))
for user in actual_members:
self.assertTrue(user.is_member_of(self.data["facility"]))
self.assertFalse(self.anon_user.is_member_of(self.data["facility"]))
def test_classroom_membership(self):
for i, classroom in enumerate(self.data["classrooms"]):
actual_members = flatten(self.data["learners_one_group"][i] + [self.data["learner_all_groups"]])
returned_members = classroom.get_members()
self.assertSetEqual(set(actual_members), set(returned_members))
# ensure that `is_member` is True for all users in the classroom
for user in actual_members:
self.assertTrue(user.is_member_of(classroom))
# ensure that `is_member` is False for all users not in the classroom
for user in set(self.data["all_users"]) - set(actual_members):
self.assertFalse(user.is_member_of(classroom))
self.assertFalse(self.anon_user.is_member_of(classroom))
def test_learnergroup_membership(self):
for i, classroom_users in enumerate(self.data["learners_one_group"]):
for j, learnergroup_users in enumerate(classroom_users):
learnergroup = self.data["learnergroups"][i][j]
actual_members = [self.data["learners_one_group"][i][j]] + [self.data["learner_all_groups"]]
returned_members = learnergroup.get_members()
self.assertSetEqual(set(actual_members), set(returned_members))
# ensure that `is_member` is True for all users in the learnergroup
for user in actual_members:
self.assertTrue(user.is_member_of(learnergroup))
# ensure that `is_member` is False for all users not in the learnergroup
for user in set(self.data["all_users"]) - set(actual_members):
self.assertFalse(user.is_member_of(learnergroup))
class MembershipAcrossFacilitiesTestCase(TestCase):
def setUp(self):
self.data1 = create_dummy_facility_data()
self.data2 = create_dummy_facility_data()
def test_users_are_not_members_of_other_facility(self):
for user in self.data1["all_users"]:
self.assertFalse(user.is_member_of(self.data2["facility"]))
def test_users_are_not_members_of_other_facility_classroom(self):
for user in self.data1["all_users"]:
self.assertFalse(user.is_member_of(self.data2["classrooms"][0]))
def test_users_are_not_members_of_other_facility_learnergroup(self):
for user in self.data1["all_users"]:
self.assertFalse(user.is_member_of(self.data2["learnergroups"][0][0]))
class SuperuserRolesTestCase(TestCase):
def setUp(self):
self.data = create_dummy_facility_data()
self.superuser = self.data["superuser"]
self.superuser2 = create_superuser(self.data["facility"], username="superuser2")
def test_superuser_has_admin_role_for_everyone(self):
for user in self.data["all_users"]:
self.assertTrue(self.superuser.has_role_for(role_kinds.ADMIN, user))
def test_superuser_has_admin_role_for_all_collections(self):
for coll in self.data["all_collections"]:
self.assertTrue(self.superuser.has_role_for(role_kinds.ADMIN, coll))
def test_superuser_has_admin_role_for_itself(self):
self.assertTrue(self.superuser.has_role_for(role_kinds.ADMIN, self.superuser))
def test_superuser_has_admin_role_for_other_superuser(self):
self.assertTrue(self.superuser.has_role_for(role_kinds.ADMIN, self.superuser2))
class AnonymousUserRolesTestCase(TestCase):
def setUp(self):
self.data = create_dummy_facility_data()
self.anon_user = KolibriAnonymousUser()
def test_anon_user_has_no_admin_role_for_anyone(self):
for user in self.data["all_users"]:
self.assertFalse(self.anon_user.has_role_for(role_kinds.ADMIN, user))
self.assertEqual(len(self.anon_user.get_roles_for(user)), 0)
def test_anon_user_has_no_admin_role_for_any_collection(self):
for coll in self.data["all_collections"]:
self.assertFalse(self.anon_user.has_role_for(role_kinds.ADMIN, coll))
self.assertEqual(len(self.anon_user.get_roles_for(coll)), 0)
def test_nobody_but_superuser_has_roles_for_anon_user(self):
for user in self.data["all_users"]:
if not user.is_superuser:
self.assertEqual(len(user.get_roles_for(self.anon_user)), 0)
|
|
"""Support for ISY994 binary sensors."""
from datetime import timedelta
import logging
from typing import Callable
from homeassistant.components.binary_sensor import DOMAIN, BinarySensorDevice
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import dt as dt_util
from . import ISY994_NODES, ISY994_PROGRAMS, ISYDevice
_LOGGER = logging.getLogger(__name__)
ISY_DEVICE_TYPES = {
"moisture": ["16.8", "16.13", "16.14"],
"opening": ["16.9", "16.6", "16.7", "16.2", "16.17", "16.20", "16.21"],
"motion": ["16.1", "16.4", "16.5", "16.3"],
}
def setup_platform(
hass, config: ConfigType, add_entities: Callable[[list], None], discovery_info=None
):
"""Set up the ISY994 binary sensor platform."""
devices = []
devices_by_nid = {}
child_nodes = []
for node in hass.data[ISY994_NODES][DOMAIN]:
if node.parent_node is None:
device = ISYBinarySensorDevice(node)
devices.append(device)
devices_by_nid[node.nid] = device
else:
# We'll process the child nodes last, to ensure all parent nodes
# have been processed
child_nodes.append(node)
for node in child_nodes:
try:
parent_device = devices_by_nid[node.parent_node.nid]
except KeyError:
_LOGGER.error(
"Node %s has a parent node %s, but no device "
"was created for the parent. Skipping.",
node.nid,
node.parent_nid,
)
else:
device_type = _detect_device_type(node)
subnode_id = int(node.nid[-1], 16)
if device_type in ("opening", "moisture"):
# These sensors use an optional "negative" subnode 2 to snag
# all state changes
if subnode_id == 2:
parent_device.add_negative_node(node)
elif subnode_id == 4:
# Subnode 4 is the heartbeat node, which we will represent
# as a separate binary_sensor
device = ISYBinarySensorHeartbeat(node, parent_device)
parent_device.add_heartbeat_device(device)
devices.append(device)
else:
# We don't yet have any special logic for other sensor types,
# so add the nodes as individual devices
device = ISYBinarySensorDevice(node)
devices.append(device)
for name, status, _ in hass.data[ISY994_PROGRAMS][DOMAIN]:
devices.append(ISYBinarySensorProgram(name, status))
add_entities(devices)
def _detect_device_type(node) -> str:
try:
device_type = node.type
except AttributeError:
# The type attribute didn't exist in the ISY's API response
return None
split_type = device_type.split(".")
for device_class, ids in ISY_DEVICE_TYPES.items():
if "{}.{}".format(split_type[0], split_type[1]) in ids:
return device_class
return None
def _is_val_unknown(val):
"""Determine if a number value represents UNKNOWN from PyISY."""
return val == -1 * float("inf")
class ISYBinarySensorDevice(ISYDevice, BinarySensorDevice):
"""Representation of an ISY994 binary sensor device.
Often times, a single device is represented by multiple nodes in the ISY,
allowing for different nuances in how those devices report their on and
off events. This class turns those multiple nodes in to a single Home
Assistant entity and handles both ways that ISY binary sensors can work.
"""
def __init__(self, node) -> None:
"""Initialize the ISY994 binary sensor device."""
super().__init__(node)
self._negative_node = None
self._heartbeat_device = None
self._device_class_from_type = _detect_device_type(self._node)
if _is_val_unknown(self._node.status._val):
self._computed_state = None
self._status_was_unknown = True
else:
self._computed_state = bool(self._node.status._val)
self._status_was_unknown = False
async def async_added_to_hass(self) -> None:
"""Subscribe to the node and subnode event emitters."""
await super().async_added_to_hass()
self._node.controlEvents.subscribe(self._positive_node_control_handler)
if self._negative_node is not None:
self._negative_node.controlEvents.subscribe(
self._negative_node_control_handler
)
def add_heartbeat_device(self, device) -> None:
"""Register a heartbeat device for this sensor.
The heartbeat node beats on its own, but we can gain a little
reliability by considering any node activity for this sensor
to be a heartbeat as well.
"""
self._heartbeat_device = device
def _heartbeat(self) -> None:
"""Send a heartbeat to our heartbeat device, if we have one."""
if self._heartbeat_device is not None:
self._heartbeat_device.heartbeat()
def add_negative_node(self, child) -> None:
"""Add a negative node to this binary sensor device.
The negative node is a node that can receive the 'off' events
for the sensor, depending on device configuration and type.
"""
self._negative_node = child
# pylint: disable=protected-access
if not _is_val_unknown(self._negative_node.status._val):
# If the negative node has a value, it means the negative node is
# in use for this device. Next we need to check to see if the
# negative and positive nodes disagree on the state (both ON or
# both OFF).
if self._negative_node.status._val == self._node.status._val:
# The states disagree, therefore we cannot determine the state
# of the sensor until we receive our first ON event.
self._computed_state = None
def _negative_node_control_handler(self, event: object) -> None:
"""Handle an "On" control event from the "negative" node."""
if event == "DON":
_LOGGER.debug(
"Sensor %s turning Off via the Negative node sending a DON command",
self.name,
)
self._computed_state = False
self.schedule_update_ha_state()
self._heartbeat()
def _positive_node_control_handler(self, event: object) -> None:
"""Handle On and Off control event coming from the primary node.
Depending on device configuration, sometimes only On events
will come to this node, with the negative node representing Off
events
"""
if event == "DON":
_LOGGER.debug(
"Sensor %s turning On via the Primary node sending a DON command",
self.name,
)
self._computed_state = True
self.schedule_update_ha_state()
self._heartbeat()
if event == "DOF":
_LOGGER.debug(
"Sensor %s turning Off via the Primary node sending a DOF command",
self.name,
)
self._computed_state = False
self.schedule_update_ha_state()
self._heartbeat()
def on_update(self, event: object) -> None:
"""Primary node status updates.
We MOSTLY ignore these updates, as we listen directly to the Control
events on all nodes for this device. However, there is one edge case:
If a leak sensor is unknown, due to a recent reboot of the ISY, the
status will get updated to dry upon the first heartbeat. This status
update is the only way that a leak sensor's status changes without
an accompanying Control event, so we need to watch for it.
"""
if self._status_was_unknown and self._computed_state is None:
self._computed_state = bool(int(self._node.status))
self._status_was_unknown = False
self.schedule_update_ha_state()
self._heartbeat()
@property
def value(self) -> object:
"""Get the current value of the device.
Insteon leak sensors set their primary node to On when the state is
DRY, not WET, so we invert the binary state if the user indicates
that it is a moisture sensor.
"""
if self._computed_state is None:
# Do this first so we don't invert None on moisture sensors
return None
if self.device_class == "moisture":
return not self._computed_state
return self._computed_state
@property
def is_on(self) -> bool:
"""Get whether the ISY994 binary sensor device is on.
Note: This method will return false if the current state is UNKNOWN
"""
return bool(self.value)
@property
def state(self):
"""Return the state of the binary sensor."""
if self._computed_state is None:
return None
return STATE_ON if self.is_on else STATE_OFF
@property
def device_class(self) -> str:
"""Return the class of this device.
This was discovered by parsing the device type code during init
"""
return self._device_class_from_type
class ISYBinarySensorHeartbeat(ISYDevice, BinarySensorDevice):
"""Representation of the battery state of an ISY994 sensor."""
def __init__(self, node, parent_device) -> None:
"""Initialize the ISY994 binary sensor device."""
super().__init__(node)
self._computed_state = None
self._parent_device = parent_device
self._heartbeat_timer = None
async def async_added_to_hass(self) -> None:
"""Subscribe to the node and subnode event emitters."""
await super().async_added_to_hass()
self._node.controlEvents.subscribe(self._heartbeat_node_control_handler)
# Start the timer on bootup, so we can change from UNKNOWN to ON
self._restart_timer()
def _heartbeat_node_control_handler(self, event: object) -> None:
"""Update the heartbeat timestamp when an On event is sent."""
if event == "DON":
self.heartbeat()
def heartbeat(self):
"""Mark the device as online, and restart the 25 hour timer.
This gets called when the heartbeat node beats, but also when the
parent sensor sends any events, as we can trust that to mean the device
is online. This mitigates the risk of false positives due to a single
missed heartbeat event.
"""
self._computed_state = False
self._restart_timer()
self.schedule_update_ha_state()
def _restart_timer(self):
"""Restart the 25 hour timer."""
try:
self._heartbeat_timer()
self._heartbeat_timer = None
except TypeError:
# No heartbeat timer is active
pass
@callback
def timer_elapsed(now) -> None:
"""Heartbeat missed; set state to indicate dead battery."""
self._computed_state = True
self._heartbeat_timer = None
self.schedule_update_ha_state()
point_in_time = dt_util.utcnow() + timedelta(hours=25)
_LOGGER.debug(
"Timer starting. Now: %s Then: %s", dt_util.utcnow(), point_in_time
)
self._heartbeat_timer = async_track_point_in_utc_time(
self.hass, timer_elapsed, point_in_time
)
def on_update(self, event: object) -> None:
"""Ignore node status updates.
We listen directly to the Control events for this device.
"""
pass
@property
def value(self) -> object:
"""Get the current value of this sensor."""
return self._computed_state
@property
def is_on(self) -> bool:
"""Get whether the ISY994 binary sensor device is on.
Note: This method will return false if the current state is UNKNOWN
"""
return bool(self.value)
@property
def state(self):
"""Return the state of the binary sensor."""
if self._computed_state is None:
return None
return STATE_ON if self.is_on else STATE_OFF
@property
def device_class(self) -> str:
"""Get the class of this device."""
return "battery"
@property
def device_state_attributes(self):
"""Get the state attributes for the device."""
attr = super().device_state_attributes
attr["parent_entity_id"] = self._parent_device.entity_id
return attr
class ISYBinarySensorProgram(ISYDevice, BinarySensorDevice):
"""Representation of an ISY994 binary sensor program.
This does not need all of the subnode logic in the device version of binary
sensors.
"""
def __init__(self, name, node) -> None:
"""Initialize the ISY994 binary sensor program."""
super().__init__(node)
self._name = name
@property
def is_on(self) -> bool:
"""Get whether the ISY994 binary sensor device is on."""
return bool(self.value)
|
|
"""
Form generation utilities for App Engine's new ``ndb.Model`` class.
The goal of ``model_form()`` is to provide a clean, explicit and predictable
way to create forms based on ``ndb.Model`` classes. No malabarism or black
magic should be necessary to generate a form for models, and to add custom
non-model related fields: ``model_form()`` simply generates a form class
that can be used as it is, or that can be extended directly or even be used
to create other forms using ``model_form()``.
Example usage:
.. code-block:: python
from google.appengine.ext import ndb
from wtforms_appengine.ndb import model_form
# Define an example model and add a record.
class Contact(ndb.Model):
name = ndb.StringProperty(required=True)
city = ndb.StringProperty()
age = ndb.IntegerProperty(required=True)
is_admin = ndb.BooleanProperty(default=False)
new_entity = Contact(key_name='test', name='Test Name', age=17)
new_entity.put()
# Generate a form based on the model.
ContactForm = model_form(Contact)
# Get a form populated with entity data.
entity = Contact.get_by_key_name('test')
form = ContactForm(obj=entity)
Properties from the model can be excluded from the generated form, or it can
include just a set of properties. For example:
.. code-block:: python
# Generate a form based on the model, excluding 'city' and 'is_admin'.
ContactForm = model_form(Contact, exclude=('city', 'is_admin'))
# or...
# Generate a form based on the model, only including 'name' and 'age'.
ContactForm = model_form(Contact, only=('name', 'age'))
The form can be generated setting field arguments:
.. code-block:: python
ContactForm = model_form(Contact, only=('name', 'age'), field_args={
'name': {
'label': 'Full name',
'description': 'Your name',
},
'age': {
'label': 'Age',
'validators': [validators.NumberRange(min=14, max=99)],
}
})
The class returned by ``model_form()`` can be used as a base class for forms
mixing non-model fields and/or other model forms. For example:
.. code-block:: python
# Generate a form based on the model.
BaseContactForm = model_form(Contact)
# Generate a form based on other model.
ExtraContactForm = model_form(MyOtherModel)
class ContactForm(BaseContactForm):
# Add an extra, non-model related field.
subscribe_to_news = f.BooleanField()
# Add the other model form as a subform.
extra = f.FormField(ExtraContactForm)
The class returned by ``model_form()`` can also extend an existing form
class:
.. code-block:: python
class BaseContactForm(Form):
# Add an extra, non-model related field.
subscribe_to_news = f.BooleanField()
# Generate a form based on the model.
ContactForm = model_form(Contact, base_class=BaseContactForm)
"""
from argeweb.libs.wtforms import Form, validators, fields as f
from argeweb.libs.wtforms.compat import string_types
from .fields import (GeoPtPropertyField,
JsonPropertyField,
KeyPropertyField,
RepeatedKeyPropertyField,
StringListPropertyField,
IntegerListPropertyField)
def get_TextField(kwargs):
"""
Returns a ``TextField``, applying the ``ndb.StringProperty`` length limit
of 500 bytes.
"""
kwargs['validators'].append(validators.length(max=500))
return f.StringField(**kwargs)
def get_IntegerField(kwargs):
"""
Returns an ``IntegerField``, applying the ``ndb.IntegerProperty`` range
limits.
"""
v = validators.NumberRange(min=-0x8000000000000000, max=0x7fffffffffffffff)
kwargs['validators'].append(v)
return f.IntegerField(**kwargs)
class ModelConverterBase(object):
def __init__(self, converters=None):
"""
Constructs the converter, setting the converter callables.
:param converters:
A dictionary of converter callables for each property type. The
callable must accept the arguments (model, prop, kwargs).
"""
self.converters = {}
for name in dir(self):
if not name.startswith('convert_'):
continue
self.converters[name[8:]] = getattr(self, name)
def convert(self, model, prop, field_args):
"""
Returns a form field for a single model property.
:param model:
The ``db.Model`` class that contains the property.
:param prop:
The model property: a ``db.Property`` instance.
:param field_args:
Optional keyword arguments to construct the field.
"""
prop_type_name = type(prop).__name__
# check for generic property
if(prop_type_name == "GenericProperty"):
# try to get type from field args
generic_type = field_args.get("type")
if generic_type:
prop_type_name = field_args.get("type")
# if no type is found, the generic property uses string set in
# convert_GenericProperty
kwargs = {
'label': (prop._verbose_name or
prop._code_name.replace('_', ' ').title()),
'default': prop._default,
'validators': [],
}
if field_args:
kwargs.update(field_args)
if prop._required and prop_type_name not in self.NO_AUTO_REQUIRED:
kwargs['validators'].append(validators.required())
try:
kwargs["label"] = model.Meta.label_name[prop._code_name]
except:
pass
choices = kwargs.get('choices', None) or prop._choices
if choices:
# Use choices in a select field.
choicess_list = []
for item in choices:
if item in prop._choices_text:
choicess_list.append((item, prop._choices_text[item]))
else:
choicess_list.append((item, item))
# kwargs['choices'] = [(v, v) for v in choices]
kwargs['choices'] = choicess_list
if prop._repeated:
return f.SelectMultipleField(**kwargs)
else:
return f.SelectField(**kwargs)
else:
converter = self.converters.get(prop_type_name, None)
if converter is not None:
return converter(model, prop, kwargs)
else:
return self.fallback_converter(model, prop, kwargs)
class ModelConverter(ModelConverterBase):
"""
Converts properties from a ``ndb.Model`` class to form fields.
Default conversions between properties and fields:
+====================+===================+==============+==================+
| Property subclass | Field subclass | datatype | notes |
+====================+===================+==============+==================+
| StringProperty | TextField | unicode | TextArea | repeated support
| | | | if multiline |
+--------------------+-------------------+--------------+------------------+
| BooleanProperty | BooleanField | bool | |
+--------------------+-------------------+--------------+------------------+
| IntegerProperty | IntegerField | int or long | | repeated support
+--------------------+-------------------+--------------+------------------+
| FloatProperty | TextField | float | |
+--------------------+-------------------+--------------+------------------+
| DateTimeProperty | DateTimeField | datetime | skipped if |
| | | | auto_now[_add] |
+--------------------+-------------------+--------------+------------------+
| DateProperty | DateField | date | skipped if |
| | | | auto_now[_add] |
+--------------------+-------------------+--------------+------------------+
| TimeProperty | DateTimeField | time | skipped if |
| | | | auto_now[_add] |
+--------------------+-------------------+--------------+------------------+
| TextProperty | TextAreaField | unicode | |
+--------------------+-------------------+--------------+------------------+
| GeoPtProperty | TextField | db.GeoPt | |
+--------------------+-------------------+--------------+------------------+
| KeyProperty | KeyProperyField | ndb.Key | |
+--------------------+-------------------+--------------+------------------+
| BlobKeyProperty | None | ndb.BlobKey | always skipped |
+--------------------+-------------------+--------------+------------------+
| UserProperty | None | users.User | always skipped |
+--------------------+-------------------+--------------+------------------+
| StructuredProperty | None | ndb.Model | always skipped |
+--------------------+-------------------+--------------+------------------+
| LocalStructuredPro | None | ndb.Model | always skipped |
+--------------------+-------------------+--------------+------------------+
| JsonProperty | JsonPropertyField | datastucture | |
+--------------------+-------------------+--------------+------------------+
| PickleProperty | None | bytedata | always skipped |
+--------------------+-------------------+--------------+------------------+
| GenericProperty | None | generic | always skipped |
+--------------------+-------------------+--------------+------------------+
| ComputedProperty | none | | always skipped |
+--------------------+-------------------+--------------+------------------+
| _ClassKeyProperty | none | | always skipped |
+====================+===================+==============+==================+
""" # noqa
# Don't automatically add a required validator for these properties
NO_AUTO_REQUIRED = frozenset([
'ListProperty',
'StringListProperty',
'BooleanProperty'])
def convert_StringProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.StringProperty``."""
if prop._repeated:
return StringListPropertyField(**kwargs)
if prop._required:
kwargs['validators'].append(validators.InputRequired())
# kwargs['validators'].append(validators.length(max=500))
return get_TextField(kwargs)
def convert_BooleanProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.BooleanProperty``."""
return f.BooleanField(**kwargs)
def convert_IntegerProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.IntegerProperty``."""
if prop._repeated:
return IntegerListPropertyField(**kwargs)
return get_IntegerField(kwargs)
def convert_FloatProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.FloatProperty``."""
if prop._code_name == 'sort':
return None
return f.FloatField(**kwargs)
def convert_DateTimeProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.DateTimeProperty``."""
if prop._auto_now or prop._auto_now_add:
return None
kwargs.setdefault('format', '%Y-%m-%d %H:%M:%S')
return f.DateTimeField(**kwargs)
def convert_DateProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.DateProperty``."""
if prop._auto_now or prop._auto_now_add:
return None
kwargs.setdefault('format', '%Y-%m-%d')
return f.DateField(**kwargs)
def convert_TimeProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.TimeProperty``."""
if prop._auto_now or prop._auto_now_add:
return None
kwargs.setdefault('format', '%H:%M:%S')
return f.DateTimeField(**kwargs)
def convert_UserProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.UserProperty``."""
return None
def convert_StructuredProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.StructuredProperty``."""
return None
def convert_LocalStructuredProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.LocalStructuredProperty``."""
return None
def convert_JsonProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.JsonProperty``."""
return JsonPropertyField(**kwargs)
def convert_PickleProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.PickleProperty``."""
return None
def convert_GenericProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.GenericProperty``."""
kwargs['validators'].append(validators.length(max=500))
return get_TextField(kwargs)
def convert_BlobKeyProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.BlobKeyProperty``."""
return f.FileField(**kwargs)
def convert_TextProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.TextProperty``."""
return f.TextAreaField(**kwargs)
def convert_ComputedProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ComputedProperty``."""
return None
def convert_GeoPtProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.GeoPtProperty``."""
return GeoPtPropertyField(**kwargs)
def convert_KeyProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.KeyProperty``."""
if 'reference_class' not in kwargs\
or 'query' not in kwargs:
try:
reference_class = prop._kind
except AttributeError:
reference_class = prop._reference_class
if isinstance(reference_class, string_types):
# This assumes that the referenced module is already imported.
try:
reference_class = model._kind_map[reference_class]
except KeyError:
# If it's not imported, just bail, as we can't
# edit this field safely.
return None
kwargs['reference_class'] = reference_class
kwargs.setdefault('allow_blank', not prop._required)
if prop._repeated:
return RepeatedKeyPropertyField(**kwargs)
else:
return KeyPropertyField(**kwargs)
def convert__ClassKeyProperty(self, model, prop, kwargs):
"""Returns a form field for a ``ndb.ComputedProperty``."""
return None
def model_fields(model, only=None, exclude=None, field_args=None,
converter=None):
"""
Extracts and returns a dictionary of form fields for a given
``db.Model`` class.
:param model:
The ``db.Model`` class to extract fields from.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to a keyword arguments
used to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
converter = converter or ModelConverter()
field_args = field_args or {}
# Get the field names we want to include or exclude, starting with the
# full list of model properties.
props = model._properties
field_names = [x[0] for x in
sorted(props.items(), key=lambda x: x[1]._creation_counter)]
if only:
field_names = list(f for f in only if f in field_names)
elif exclude:
field_names = list(f for f in field_names if f not in exclude)
# Create all fields.
field_dict = {}
for name in field_names:
field = converter.convert(model, props[name], field_args.get(name))
if field is not None:
field_dict[name] = field
return field_dict
def model_form(model, base_class=Form, only=None, exclude=None,
field_args=None, converter=None):
"""
Creates and returns a dynamic ``wtforms.Form`` class for a given
``ndb.Model`` class. The form class can be used as it is or serve as a base
for extended form classes, which can then mix non-model related fields,
subforms with other model forms, among other possibilities.
:param model:
The ``ndb.Model`` class to generate a form for.
:param base_class:
Base form class to extend from. Must be a ``wtforms.Form`` subclass.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to keyword arguments
used to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
# Extract the fields from the model.
field_dict = model_fields(model, only, exclude, field_args, converter)
# Return a dynamically created form class, extending from base_class and
# including the created fields as properties.
return type(model._get_kind() + 'Form', (base_class,), field_dict)
|
|
#!/usr/bin/python
#coding=utf-8
#Python 3.6
import os
import sys
import hashlib
import zlib
import re
import tempfile
import struct
import shutil
import time,datetime
from config import *
from subprocess import *
from multiprocessing import Lock
from distutils.spawn import find_executable
def g_init(param):
global args
args = param
if args.path:
args.path = args.path.replace('\\','/')
if args.input:
args.input = args.input.replace('\\','/')
if args.output:
args.output = args.output.replace('\\','/')
pass
def get_args():
return args
def command_available(command):
return bool(find_executable(command))
def is_pvrtool_valid():
return command_available("PVRTexToolCLI")
def is_texturepacker_valid():
command = r"TexturePacker"
is_valid = command_available(command)
if is_valid:
p = Popen(command,stdin=PIPE,stdout=PIPE, shell=True,stderr=PIPE)
p.communicate(input=b'agree')
# p = Popen("TexturePacker --version",stdin=PIPE,stdout=PIPE, shell=True,stderr=PIPE)
# out, err = p.communicate()
# re.search('TexturePacker ')
pass
return is_valid
def is_ignore_path(relpath, ignores):
if relpath and ignores:
for path in ignores:
if relpath.startswith(path):
return True
return False
def is_ignore_path2(relpath, ignores):
if relpath and ignores:
for path in ignores:
if relpath.endswith(path):
return True
return False
def copytree(src, dst, ignores=None, symlinks=False):
names = os.listdir(src)
if not os.path.isdir(dst):
os.makedirs(dst)
errors = []
for name in names:
srcname = os.path.join(src, name).replace("\\", "/")
if is_ignore_path2(srcname, ignores):
continue
dstname = os.path.join(dst, name).replace("\\", "/")
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, ignores, symlinks)
else:
if os.path.isdir(dstname):
os.rmdir(dstname)
elif os.path.isfile(dstname):
os.remove(dstname)
shutil.copy2(srcname, dstname)
except (IOError, os.error) as why:
errors.append((srcname, dstname, str(why)))
except OSError as err:
errors.extend(err.args[0])
if errors:
raise Exception(errors)
loglock = Lock()
def log(s):
if loglock.acquire():
print(s)
loglock.release()
pass
def timestamp_to_time(timestamp):
time_struct = time.localtime(timestamp)
return time.strftime('%Y-%m-%d %H:%M:%S', time_struct)
pass
def get_file_relpath(path, basepath):
return path[len(basepath)+1:].replace("\\", "/")
def get_image_ext(image_file):
for ext in pvr_file_ext:
if image_file.endswith(ext):
return ext
return os.path.splitext(image_file)[1]
def get_file_modifytime(image_file):
# image_file = unicode(image_file, 'utf8')
t = os.path.getmtime(image_file)
return t
def get_file_md5(filename):
if not os.path.isfile(filename):
return
myhash = hashlib.md5()
f = open(filename,'rb')
while True:
b = f.read()
if not b :
break
myhash.update(b)
f.close()
return myhash.hexdigest()
def get_all_dirfiles(path, extentions=None, ignores=None):
tempfiles = []
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for name in files:
fileName, fileSuffix = os.path.splitext(name)
if (extentions == None or (fileSuffix in extentions)) \
and (ignores == None or not (fileSuffix in ignores)):
fullPath = path + root[len(path):]
fullName = fullPath + '/' + name
if not os.path.exists(fullName):
continue
tempfiles.append(fullName)
else:
continue
break
elif os.path.exists(path):
fileName, fileSuffix = os.path.splitext(path)
if extentions == None or (fileSuffix in extentions):
tempfiles.append(path)
pass
return tempfiles
def get_all_files(path, extentions=None, ignores=None):
tempfiles = []
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for name in files:
fileName, fileSuffix = os.path.splitext(name)
fullName = os.path.join(root, name)
fullName = fullName.replace('\\','/')
relpath = get_file_relpath(fullName, path)
if (extentions == None or (len(fileSuffix) > 0 and fileSuffix in extentions)) \
and (ignores == None or not is_ignore_path(relpath, ignores)) \
and (ignores == None or not (fileSuffix in ignores)):
if not os.path.exists(fullName):
continue
tempfiles.append(fullName)
else:
continue
break
elif os.path.exists(path):
fileName, fileSuffix = os.path.splitext(path)
if extentions == None or (fileSuffix in extentions):
tempfiles.append(path)
pass
return tempfiles
def convert_pvr_to_png(image_file, image_ext):
print("convert_pvr_to_png > ", image_file)
pvr_path = image_file.replace(image_ext, "")
if os.system("TexturePacker {pvr_pathname} --sheet {pvr_path}.png --data {pvr_path}_temp.plist --texture-format png --border-padding 0 --shape-padding 0 --disable-rotation --allow-free-size --no-trim".format(pvr_pathname = image_file, pvr_path = pvr_path)) == 0:
os.remove(pvr_path + "_temp.plist")
return True
return False
def serialize_luafile(data):
return "return " + serialize_lua(data)
def serialize_lua(data):
lua = ""
vtype = type(data)
if vtype == int or vtype == float:
lua = lua + str(data)
pass
elif vtype == bool:
if data:
lua = lua + "true"
else:
lua = lua + "false"
pass
elif vtype == str:
lua = lua + '"' + data + '"'
pass
elif vtype == list:
lua = lua + "{"
temp = []
for value in data:
temp.append(serialize_lua(value))
pass
lua = lua + ",\n".join(temp)
lua = lua + "}"
pass
elif vtype == dict:
lua = lua + "{"
temp = []
for key, value in sorted(data.items(), key=lambda d:dict_sorts.index(d[0]) if d[0] in dict_sorts else 999):
temp.append("[" + serialize_lua(key) + "]=" + serialize_lua(value))
lua = lua + ",\n".join(temp)
lua = lua + "}"
pass
else:
return ""
return lua
pass
|
|
import pysal
from pysal.common import *
import pysal.weights
import numpy as np
from scipy import sparse, float32
import scipy.spatial
import os
import operator
import scipy
__all__ = ['lat2W', 'block_weights', 'comb', 'order', 'higher_order',
'shimbel', 'remap_ids', 'full2W', 'full', 'WSP2W',
'insert_diagonal', 'get_ids', 'get_points_array_from_shapefile',
'min_threshold_distance', 'lat2SW', 'w_local_cluster',
'higher_order_sp', 'hexLat2W', 'regime_weights']
def hexLat2W(nrows=5, ncols=5):
"""
Create a W object for a hexagonal lattice.
Parameters
----------
nrows : int
number of rows
ncols : int
number of columns
Returns
-------
w : W
instance of spatial weights class W
Notes
-----
Observations are row ordered: first k observations are in row 0, next k in row 1, and so on.
Construction is based on shifting every other column of a regular lattice
down 1/2 of a cell.
Examples
--------
>>> import pysal as ps
>>> w = ps.lat2W()
>>> w.neighbors[1]
[0, 6, 2]
>>> w.neighbors[21]
[16, 20, 22]
>>> wh = ps.hexLat2W()
>>> wh.neighbors[1]
[0, 6, 2, 5, 7]
>>> wh.neighbors[21]
[16, 20, 22]
>>>
"""
if nrows == 1 or ncols == 1:
print "Hexagon lattice requires at least 2 rows and columns"
print "Returning a linear contiguity structure"
return lat2W(nrows, ncols)
n = nrows * ncols
rid = [i // ncols for i in xrange(n)]
cid = [i % ncols for i in xrange(n)]
r1 = nrows - 1
c1 = ncols - 1
w = lat2W(nrows, ncols).neighbors
for i in xrange(n):
odd = cid[i] % 2
if odd:
if rid[i] < r1: # odd col index above last row
# new sw neighbor
if cid[i] > 0:
j = i + ncols - 1
w[i] = w.get(i, []) + [j]
# new se neighbor
if cid[i] < c1:
j = i + ncols + 1
w[i] = w.get(i, []) + [j]
else: # even col
# nw
jnw = [i - ncols - 1]
# ne
jne = [i - ncols + 1]
if rid[i] > 0:
w[i]
if cid[i] == 0:
w[i] = w.get(i, []) + jne
elif cid[i] == c1:
w[i] = w.get(i, []) + jnw
else:
w[i] = w.get(i, []) + jne
w[i] = w.get(i, []) + jnw
return pysal.weights.W(w)
def lat2W(nrows=5, ncols=5, rook=True, id_type='int'):
"""
Create a W object for a regular lattice.
Parameters
----------
nrows : int
number of rows
ncols : int
number of columns
rook : boolean
type of contiguity. Default is rook. For queen, rook =False
id_type : string
string defining the type of IDs to use in the final W object;
options are 'int' (0, 1, 2 ...; default), 'float' (0.0,
1.0, 2.0, ...) and 'string' ('id0', 'id1', 'id2', ...)
Returns
-------
w : W
instance of spatial weights class W
Notes
-----
Observations are row ordered: first k observations are in row 0, next k in row 1, and so on.
Examples
--------
>>> from pysal import lat2W
>>> w9 = lat2W(3,3)
>>> "%.3f"%w9.pct_nonzero
'29.630'
>>> w9[0]
{1: 1.0, 3: 1.0}
>>> w9[3]
{0: 1.0, 4: 1.0, 6: 1.0}
>>>
"""
n = nrows * ncols
r1 = nrows - 1
c1 = ncols - 1
rid = [i // ncols for i in xrange(n)] #must be floor!
cid = [i % ncols for i in xrange(n)]
w = {}
r = below = 0
for i in xrange(n - 1):
if rid[i] < r1:
below = rid[i] + 1
r = below * ncols + cid[i]
w[i] = w.get(i, []) + [r]
w[r] = w.get(r, []) + [i]
if cid[i] < c1:
right = cid[i] + 1
c = rid[i] * ncols + right
w[i] = w.get(i, []) + [c]
w[c] = w.get(c, []) + [i]
if not rook:
# southeast bishop
if cid[i] < c1 and rid[i] < r1:
r = (rid[i] + 1) * ncols + 1 + cid[i]
w[i] = w.get(i, []) + [r]
w[r] = w.get(r, []) + [i]
# southwest bishop
if cid[i] > 0 and rid[i] < r1:
r = (rid[i] + 1) * ncols - 1 + cid[i]
w[i] = w.get(i, []) + [r]
w[r] = w.get(r, []) + [i]
neighbors = {}
weights = {}
for key in w:
weights[key] = [1.] * len(w[key])
ids = range(n)
if id_type == 'string':
ids = ['id' + str(i) for i in ids]
elif id_type == 'float':
ids = [i * 1. for i in ids]
if id_type == 'string' or id_type == 'float':
id_dict = dict(zip(range(n), ids))
alt_w = {}
alt_weights = {}
for i in w:
values = [id_dict[j] for j in w[i]]
key = id_dict[i]
alt_w[key] = values
alt_weights[key] = weights[i]
w = alt_w
weights = alt_weights
return pysal.weights.W(w, weights, ids=ids, id_order=ids[:])
def regime_weights(regimes):
"""
Construct spatial weights for regime neighbors.
Block contiguity structures are relevant when defining neighbor relations
based on membership in a regime. For example, all counties belonging to
the same state could be defined as neighbors, in an analysis of all
counties in the US.
Parameters
----------
regimes : array, list
ids of which regime an observation belongs to
Returns
-------
W : spatial weights instance
Examples
--------
>>> from pysal import regime_weights
>>> import numpy as np
>>> regimes = np.ones(25)
>>> regimes[range(10,20)] = 2
>>> regimes[range(21,25)] = 3
>>> regimes
array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 2., 2.,
2., 2., 2., 2., 2., 2., 2., 1., 3., 3., 3., 3.])
>>> w = regime_weights(regimes)
PendingDepricationWarning: regime_weights will be renamed to block_weights in PySAL 2.0
>>> w.weights[0]
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
>>> w.neighbors[0]
[1, 2, 3, 4, 5, 6, 7, 8, 9, 20]
>>> regimes = ['n','n','s','s','e','e','w','w','e']
>>> n = len(regimes)
>>> w = regime_weights(regimes)
PendingDepricationWarning: regime_weights will be renamed to block_weights in PySAL 2.0
>>> w.neighbors
{0: [1], 1: [0], 2: [3], 3: [2], 4: [5, 8], 5: [4, 8], 6: [7], 7: [6], 8: [4, 5]}
Notes
-----
regime_weights will be deprecated in PySAL 2.0 and renamed to block_weights.
"""
msg = "PendingDepricationWarning: regime_weights will be "
msg += "renamed to block_weights in PySAL 2.0"
print msg
return block_weights(regimes)
def block_weights(regimes, ids=None, sparse=False):
"""
Construct spatial weights for regime neighbors.
Block contiguity structures are relevant when defining neighbor relations
based on membership in a regime. For example, all counties belonging to
the same state could be defined as neighbors, in an analysis of all
counties in the US.
Parameters
----------
regimes : list, array
ids of which regime an observation belongs to
ids : list, array
Ordered sequence of IDs for the observations
sparse : boolean
If True return WSP instance
If False return W instance
Returns
-------
W : spatial weights instance
Examples
--------
>>> from pysal import block_weights
>>> import numpy as np
>>> regimes = np.ones(25)
>>> regimes[range(10,20)] = 2
>>> regimes[range(21,25)] = 3
>>> regimes
array([ 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 2., 2.,
2., 2., 2., 2., 2., 2., 2., 1., 3., 3., 3., 3.])
>>> w = block_weights(regimes)
>>> w.weights[0]
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
>>> w.neighbors[0]
[1, 2, 3, 4, 5, 6, 7, 8, 9, 20]
>>> regimes = ['n','n','s','s','e','e','w','w','e']
>>> n = len(regimes)
>>> w = block_weights(regimes)
>>> w.neighbors
{0: [1], 1: [0], 2: [3], 3: [2], 4: [5, 8], 5: [4, 8], 6: [7], 7: [6], 8: [4, 5]}
"""
rids = np.unique(regimes)
neighbors = {}
NPNZ = np.nonzero
regimes = np.array(regimes)
for rid in rids:
members = NPNZ(regimes == rid)[0]
for member in members:
neighbors[member] = members[NPNZ(members != member)[0]].tolist()
w = pysal.weights.W(neighbors)
if ids is not None:
w.remap_ids(ids)
if sparse:
w = pysal.weights.WSP(w.sparse, id_order=ids)
return w
def comb(items, n=None):
"""
Combinations of size n taken from items
Parameters
----------
items : list
items to be drawn from
n : integer
size of combinations to take from items
Returns
-------
implicit : generator
combinations of size n taken from items
Examples
--------
>>> x = range(4)
>>> for c in comb(x, 2):
... print c
...
[0, 1]
[0, 2]
[0, 3]
[1, 2]
[1, 3]
[2, 3]
"""
if n is None:
n = len(items)
for i in range(len(items)):
v = items[i:i + 1]
if n == 1:
yield v
else:
rest = items[i + 1:]
for c in comb(rest, n - 1):
yield v + c
def order(w, kmax=3):
"""
Determine the non-redundant order of contiguity up to a specific
order.
Parameters
----------
w : W
spatial weights object
kmax : int
maximum order of contiguity
Returns
-------
info : dictionary
observation id is the key, value is a list of contiguity
orders with a negative 1 in the ith position
Notes
-----
Implements the algorithm in Anselin and Smirnov (1996) [Anselin1996b]_
Examples
--------
>>> from pysal import rook_from_shapefile as rfs
>>> w = rfs(pysal.examples.get_path('10740.shp'))
WARNING: there is one disconnected observation (no neighbors)
Island id: [163]
>>> w3 = order(w, kmax = 3)
>>> w3[1][0:5]
[1, -1, 1, 2, 1]
"""
#ids = w.neighbors.keys()
ids = w.id_order
info = {}
for id_ in ids:
s = [0] * w.n
s[ids.index(id_)] = -1
for j in w.neighbors[id_]:
s[ids.index(j)] = 1
k = 1
while k < kmax:
knext = k + 1
if s.count(k):
# get neighbors of order k
js = [ids[j] for j, val in enumerate(s) if val == k]
# get first order neighbors for order k neighbors
for j in js:
next_neighbors = w.neighbors[j]
for neighbor in next_neighbors:
nid = ids.index(neighbor)
if s[nid] == 0:
s[nid] = knext
k = knext
info[id_] = s
return info
def higher_order(w, k=2):
"""
Contiguity weights object of order k.
Parameters
----------
w : W
spatial weights object
k : int
order of contiguity
Returns
-------
implicit : W
spatial weights object
Notes
-----
Proper higher order neighbors are returned such that i and j are k-order
neighbors iff the shortest path from i-j is of length k.
Examples
--------
>>> from pysal import lat2W, higher_order
>>> w10 = lat2W(10, 10)
>>> w10_2 = higher_order(w10, 2)
>>> w10_2[0]
{2: 1.0, 11: 1.0, 20: 1.0}
>>> w5 = lat2W()
>>> w5[0]
{1: 1.0, 5: 1.0}
>>> w5[1]
{0: 1.0, 2: 1.0, 6: 1.0}
>>> w5_2 = higher_order(w5,2)
>>> w5_2[0]
{10: 1.0, 2: 1.0, 6: 1.0}
"""
return higher_order_sp(w, k)
def higher_order_sp(w, k=2, shortest_path=True, diagonal=False):
"""
Contiguity weights for either a sparse W or pysal.weights.W for order k.
Parameters
----------
w : W
sparse_matrix, spatial weights object or scipy.sparse.csr.csr_instance
k : int
Order of contiguity
shortest_path : boolean
True: i,j and k-order neighbors if the
shortest path for i,j is k
False: i,j are k-order neighbors if there
is a path from i,j of length k
diagonal : boolean
True: keep k-order (i,j) joins when i==j
False: remove k-order (i,j) joins when i==j
Returns
-------
wk : W
WSP, type matches type of w argument
Notes
-----
Lower order contiguities are removed.
Examples
--------
>>> import pysal
>>> w25 = pysal.lat2W(5,5)
>>> w25.n
25
>>> w25[0]
{1: 1.0, 5: 1.0}
>>> w25_2 = pysal.weights.util.higher_order_sp(w25, 2)
>>> w25_2[0]
{10: 1.0, 2: 1.0, 6: 1.0}
>>> w25_2 = pysal.weights.util.higher_order_sp(w25, 2, diagonal=True)
>>> w25_2[0]
{0: 1.0, 10: 1.0, 2: 1.0, 6: 1.0}
>>> w25_3 = pysal.weights.util.higher_order_sp(w25, 3)
>>> w25_3[0]
{15: 1.0, 3: 1.0, 11: 1.0, 7: 1.0}
>>> w25_3 = pysal.weights.util.higher_order_sp(w25, 3, shortest_path=False)
>>> w25_3[0]
{1: 1.0, 3: 1.0, 5: 1.0, 7: 1.0, 11: 1.0, 15: 1.0}
"""
tw = type(w)
id_order = None
if tw == pysal.weights.weights.W:
id_order = w.id_order
w = w.sparse
elif tw != scipy.sparse.csr.csr_matrix:
print "Unsupported sparse argument."
return None
wk = w**k
rk, ck = wk.nonzero()
sk = set(zip(rk, ck))
if shortest_path:
for j in range(1, k):
wj = w**j
rj, cj = wj.nonzero()
sj = set(zip(rj, cj))
sk.difference_update(sj)
if not diagonal:
sk = set([(i,j) for i,j in sk if i!=j])
if id_order:
d = dict([(i,[]) for i in id_order])
for pair in sk:
k, v = pair
k = id_order[k]
v = id_order[v]
d[k].append(v)
return pysal.W(neighbors=d)
else:
d = {}
for pair in sk:
k, v = pair
if k in d:
d[k].append(v)
else:
d[k] = [v]
return pysal.weights.WSP(pysal.W(neighbors=d).sparse)
def w_local_cluster(w):
"""
Local clustering coefficients for each unit as a node in a graph. [ws]_
Parameters
----------
w : W
spatial weights object
Returns
-------
c : array
(w.n,1)
local clustering coefficients
Notes
-----
The local clustering coefficient :math:`c_i` quantifies how close the
neighbors of observation :math:`i` are to being a clique:
.. math::
c_i = | \{w_{j,k}\} |/ (k_i(k_i - 1)): j,k \in N_i
where :math:`N_i` is the set of neighbors to :math:`i`, :math:`k_i =
|N_i|` and :math:`\{w_{j,k}\}` is the set of non-zero elements of the
weights between pairs in :math:`N_i`. [Watts1998]_
Examples
--------
>>> w = pysal.lat2W(3,3, rook=False)
>>> w_local_cluster(w)
array([[ 1. ],
[ 0.6 ],
[ 1. ],
[ 0.6 ],
[ 0.42857143],
[ 0.6 ],
[ 1. ],
[ 0.6 ],
[ 1. ]])
"""
c = np.zeros((w.n, 1), float)
w.transformation = 'b'
for i, id in enumerate(w.id_order):
ki = max(w.cardinalities[id], 1) # deal with islands
Ni = w.neighbors[id]
wi = pysal.w_subset(w, Ni).full()[0]
c[i] = wi.sum() / (ki * (ki - 1))
return c
def shimbel(w):
"""
Find the Shimbel matrix for first order contiguity matrix.
Parameters
----------
w : W
spatial weights object
Returns
-------
info : list
list of lists; one list for each observation which stores
the shortest order between it and each of the the other observations.
Examples
--------
>>> from pysal import lat2W, shimbel
>>> w5 = lat2W()
>>> w5_shimbel = shimbel(w5)
>>> w5_shimbel[0][24]
8
>>> w5_shimbel[0][0:4]
[-1, 1, 2, 3]
>>>
"""
info = {}
ids = w.id_order
for id in ids:
s = [0] * w.n
s[ids.index(id)] = -1
for j in w.neighbors[id]:
s[ids.index(j)] = 1
k = 1
flag = s.count(0)
while flag:
p = -1
knext = k + 1
for j in range(s.count(k)):
neighbor = s.index(k, p + 1)
p = neighbor
next_neighbors = w.neighbors[ids[p]]
for neighbor in next_neighbors:
nid = ids.index(neighbor)
if s[nid] == 0:
s[nid] = knext
k = knext
flag = s.count(0)
info[id] = s
return info
def full(w):
"""
Generate a full numpy array.
Parameters
----------
w : W
spatial weights object
Returns
-------
(fullw, keys) : tuple
first element being the full numpy array and second element
keys being the ids associated with each row in the array.
Examples
--------
>>> from pysal import W, full
>>> neighbors = {'first':['second'],'second':['first','third'],'third':['second']}
>>> weights = {'first':[1],'second':[1,1],'third':[1]}
>>> w = W(neighbors, weights)
>>> wf, ids = full(w)
>>> wf
array([[ 0., 1., 0.],
[ 1., 0., 1.],
[ 0., 1., 0.]])
>>> ids
['first', 'second', 'third']
"""
wfull = np.zeros([w.n, w.n], dtype=float)
keys = w.neighbors.keys()
if w.id_order:
keys = w.id_order
for i, key in enumerate(keys):
n_i = w.neighbors[key]
w_i = w.weights[key]
for j, wij in zip(n_i, w_i):
c = keys.index(j)
wfull[i, c] = wij
return (wfull, keys)
def full2W(m, ids=None):
'''
Create a PySAL W object from a full array.
Parameters
----------
m : array
nxn array with the full weights matrix
ids : list
User ids assumed to be aligned with m
Returns
-------
w : W
PySAL weights object
Examples
--------
>>> import pysal as ps
>>> import numpy as np
Create an array of zeros
>>> a = np.zeros((4, 4))
For loop to fill it with random numbers
>>> for i in range(len(a)):
... for j in range(len(a[i])):
... if i!=j:
... a[i, j] = np.random.random(1)
Create W object
>>> w = ps.weights.util.full2W(a)
>>> w.full()[0] == a
array([[ True, True, True, True],
[ True, True, True, True],
[ True, True, True, True],
[ True, True, True, True]], dtype=bool)
Create list of user ids
>>> ids = ['myID0', 'myID1', 'myID2', 'myID3']
>>> w = ps.weights.util.full2W(a, ids=ids)
>>> w.full()[0] == a
array([[ True, True, True, True],
[ True, True, True, True],
[ True, True, True, True],
[ True, True, True, True]], dtype=bool)
'''
if m.shape[0] != m.shape[1]:
raise ValueError('Your array is not square')
neighbors, weights = {}, {}
for i in xrange(m.shape[0]):
# for i, row in enumerate(m):
row = m[i]
if ids:
i = ids[i]
ngh = list(row.nonzero()[0])
weights[i] = list(row[ngh])
ngh = list(ngh)
if ids:
ngh = [ids[j] for j in ngh]
neighbors[i] = ngh
return pysal.W(neighbors, weights, id_order=ids)
def WSP2W(wsp, silent_island_warning=False):
"""
Convert a pysal WSP object (thin weights matrix) to a pysal W object.
Parameters
----------
wsp : WSP
PySAL sparse weights object
silent_island_warning : boolean
Switch to turn off (default on) print statements
for every observation with islands
Returns
-------
w : W
PySAL weights object
Examples
--------
>>> import pysal
Build a 10x10 scipy.sparse matrix for a rectangular 2x5 region of cells
(rook contiguity), then construct a PySAL sparse weights object (wsp).
>>> sp = pysal.weights.lat2SW(2, 5)
>>> wsp = pysal.weights.WSP(sp)
>>> wsp.n
10
>>> print wsp.sparse[0].todense()
[[0 1 0 0 0 1 0 0 0 0]]
Convert this sparse weights object to a standard PySAL weights object.
>>> w = pysal.weights.WSP2W(wsp)
>>> w.n
10
>>> print w.full()[0][0]
[ 0. 1. 0. 0. 0. 1. 0. 0. 0. 0.]
"""
wsp.sparse
indices = wsp.sparse.indices
data = wsp.sparse.data
indptr = wsp.sparse.indptr
id_order = wsp.id_order
if id_order:
# replace indices with user IDs
indices = [id_order[i] for i in indices]
else:
id_order = range(wsp.n)
neighbors, weights = {}, {}
start = indptr[0]
for i in xrange(wsp.n):
oid = id_order[i]
end = indptr[i + 1]
neighbors[oid] = indices[start:end]
weights[oid] = data[start:end]
start = end
ids = copy.copy(wsp.id_order)
w = pysal.W(neighbors, weights, ids,
silent_island_warning=silent_island_warning)
w._sparse = copy.deepcopy(wsp.sparse)
w._cache['sparse'] = w._sparse
return w
def insert_diagonal(w, diagonal=1.0, wsp=False):
"""
Returns a new weights object with values inserted along the main diagonal.
Parameters
----------
w : W
Spatial weights object
diagonal : float, int or array
Defines the value(s) to which the weights matrix diagonal should
be set. If a constant is passed then each element along the
diagonal will get this value (default is 1.0). An array of length
w.n can be passed to set explicit values to each element along
the diagonal (assumed to be in the same order as w.id_order).
wsp : boolean
If True return a thin weights object of the type WSP, if False
return the standard W object.
Returns
-------
w : W
Spatial weights object
Examples
--------
>>> import pysal
>>> import numpy as np
Build a basic rook weights matrix, which has zeros on the diagonal, then
insert ones along the diagonal.
>>> w = pysal.lat2W(5, 5, id_type='string')
>>> w_const = pysal.weights.insert_diagonal(w)
>>> w['id0']
{'id5': 1.0, 'id1': 1.0}
>>> w_const['id0']
{'id5': 1.0, 'id0': 1.0, 'id1': 1.0}
Insert different values along the main diagonal.
>>> diag = np.arange(100, 125)
>>> w_var = pysal.weights.insert_diagonal(w, diag)
>>> w_var['id0']
{'id5': 1.0, 'id0': 100.0, 'id1': 1.0}
"""
w_new = copy.deepcopy(w.sparse)
w_new = w_new.tolil()
if issubclass(type(diagonal), np.ndarray):
if w.n != diagonal.shape[0]:
raise Exception("shape of w and diagonal do not match")
w_new.setdiag(diagonal)
elif operator.isNumberType(diagonal):
w_new.setdiag([diagonal] * w.n)
else:
raise Exception("Invalid value passed to diagonal")
w_out = pysal.weights.WSP(w_new, copy.copy(w.id_order))
if wsp:
return w_out
else:
return WSP2W(w_out)
def remap_ids(w, old2new, id_order=[]):
"""
Remaps the IDs in a spatial weights object.
Parameters
----------
w : W
Spatial weights object
old2new : dictionary
Dictionary where the keys are the IDs in w (i.e. "old IDs") and
the values are the IDs to replace them (i.e. "new IDs")
id_order : list
An ordered list of new IDs, which defines the order of observations when
iterating over W. If not set then the id_order in w will be
used.
Returns
-------
implicit : W
Spatial weights object with new IDs
Examples
--------
>>> from pysal import lat2W, remap_ids
>>> w = lat2W(3,2)
>>> w.id_order
[0, 1, 2, 3, 4, 5]
>>> w.neighbors[0]
[2, 1]
>>> old_to_new = {0:'a', 1:'b', 2:'c', 3:'d', 4:'e', 5:'f'}
>>> w_new = remap_ids(w, old_to_new)
>>> w_new.id_order
['a', 'b', 'c', 'd', 'e', 'f']
>>> w_new.neighbors['a']
['c', 'b']
"""
if not isinstance(w, pysal.weights.W):
raise Exception("w must be a spatial weights object")
new_neigh = {}
new_weights = {}
for key, value in w.neighbors.iteritems():
new_values = [old2new[i] for i in value]
new_key = old2new[key]
new_neigh[new_key] = new_values
new_weights[new_key] = copy.copy(w.weights[key])
if id_order:
return pysal.weights.W(new_neigh, new_weights, id_order)
else:
if w.id_order:
id_order = [old2new[i] for i in w.id_order]
return pysal.weights.W(new_neigh, new_weights, id_order)
else:
return pysal.weights.W(new_neigh, new_weights)
def get_ids(shapefile, idVariable):
"""
Gets the IDs from the DBF file that moves with a given shape file.
Parameters
----------
shapefile : string
name of a shape file including suffix
idVariable : string
name of a column in the shapefile's DBF to use for ids
Returns
-------
ids : list
a list of IDs
Examples
--------
>>> from pysal.weights.util import get_ids
>>> polyids = get_ids(pysal.examples.get_path("columbus.shp"), "POLYID")
>>> polyids[:5]
[1, 2, 3, 4, 5]
"""
try:
dbname = os.path.splitext(shapefile)[0] + '.dbf'
db = pysal.open(dbname)
var = db.by_col[idVariable]
db.close()
return var
except IOError:
msg = 'The shapefile "%s" appears to be missing its DBF file. The DBF file "%s" could not be found.' % (
shapefile, dbname)
raise IOError(msg)
except AttributeError:
msg = 'The variable "%s" was not found in the DBF file. The DBF contains the following variables: %s.' % (
idVariable, ','.join(db.header))
raise KeyError(msg)
def get_points_array_from_shapefile(shapefile):
"""
Gets a data array of x and y coordinates from a given shapefile.
Parameters
----------
shapefile : string
name of a shape file including suffix
Returns
-------
points : array
(n, 2)
a data array of x and y coordinates
Notes
-----
If the given shape file includes polygons,
this function returns x and y coordinates of the polygons' centroids
Examples
--------
Point shapefile
>>> from pysal.weights.util import get_points_array_from_shapefile
>>> xy = get_points_array_from_shapefile(pysal.examples.get_path('juvenile.shp'))
>>> xy[:3]
array([[ 94., 93.],
[ 80., 95.],
[ 79., 90.]])
Polygon shapefile
>>> xy = get_points_array_from_shapefile(pysal.examples.get_path('columbus.shp'))
>>> xy[:3]
array([[ 8.82721847, 14.36907602],
[ 8.33265837, 14.03162401],
[ 9.01226541, 13.81971908]])
"""
f = pysal.open(shapefile)
if f.type.__name__ == 'Polygon':
data = np.array([shape.centroid for shape in f])
elif f.type.__name__ == 'Point':
data = np.array([shape for shape in f])
f.close()
return data
def min_threshold_distance(data, p=2):
"""
Get the maximum nearest neighbor distance.
Parameters
----------
data : array
(n,k) or KDTree where KDtree.data is array (n,k)
n observations on k attributes
p : float
Minkowski p-norm distance metric parameter:
1<=p<=infinity
2: Euclidean distance
1: Manhattan distance
Returns
-------
nnd : float
maximum nearest neighbor distance between the n observations
Examples
--------
>>> from pysal.weights.util import min_threshold_distance
>>> import numpy as np
>>> x, y = np.indices((5, 5))
>>> x.shape = (25, 1)
>>> y.shape = (25, 1)
>>> data = np.hstack([x, y])
>>> min_threshold_distance(data)
1.0
"""
if issubclass(type(data), scipy.spatial.KDTree):
kd = data
data = kd.data
else:
kd = KDTree(data)
nn = kd.query(data, k=2, p=p)
nnd = nn[0].max(axis=0)[1]
return nnd
def lat2SW(nrows=3, ncols=5, criterion="rook", row_st=False):
"""
Create a sparse W matrix for a regular lattice.
Parameters
----------
nrows : int
number of rows
ncols : int
number of columns
rook : {"rook", "queen", "bishop"}
type of contiguity. Default is rook.
row_st : boolean
If True, the created sparse W object is row-standardized so
every row sums up to one. Defaults to False.
Returns
-------
w : scipy.sparse.dia_matrix
instance of a scipy sparse matrix
Notes
-----
Observations are row ordered: first k observations are in row 0, next k in row 1, and so on.
This method directly creates the W matrix using the strucuture of the contiguity type.
Examples
--------
>>> from pysal import weights
>>> w9 = weights.lat2SW(3,3)
>>> w9[0,1]
1
>>> w9[3,6]
1
>>> w9r = weights.lat2SW(3,3, row_st=True)
>>> w9r[3,6]
0.33333333333333331
"""
n = nrows * ncols
diagonals = []
offsets = []
if criterion == "rook" or criterion == "queen":
d = np.ones((1, n))
for i in range(ncols - 1, n, ncols):
d[0, i] = 0
diagonals.append(d)
offsets.append(-1)
d = np.ones((1, n))
diagonals.append(d)
offsets.append(-ncols)
if criterion == "queen" or criterion == "bishop":
d = np.ones((1, n))
for i in range(0, n, ncols):
d[0, i] = 0
diagonals.append(d)
offsets.append(-(ncols - 1))
d = np.ones((1, n))
for i in range(ncols - 1, n, ncols):
d[0, i] = 0
diagonals.append(d)
offsets.append(-(ncols + 1))
data = np.concatenate(diagonals)
offsets = np.array(offsets)
m = sparse.dia_matrix((data, offsets), shape=(n, n), dtype=np.int8)
m = m + m.T
if row_st:
m = sparse.spdiags(1. / m.sum(1).T, 0, *m.shape) * m
return m
def write_gal(file, k=10):
f = open(file, 'w')
n = k * k
f.write("0 %d" % n)
for i in xrange(n):
row = i / k
col = i % k
neighs = [i - i, i + 1, i - k, i + k]
neighs = [j for j in neighs if j >= 0 and j < n]
f.write("\n%d %d\n" % (i, len(neighs)))
f.write(" ".join(map(str, neighs)))
f.close()
if __name__ == "__main__":
from pysal import lat2W
assert (lat2W(5, 5).sparse.todense() == lat2SW(5, 5).todense()).all()
assert (lat2W(5, 3).sparse.todense() == lat2SW(5, 3).todense()).all()
assert (lat2W(5, 3, rook=False).sparse.todense() == lat2SW(5, 3,
'queen').todense()).all()
assert (lat2W(50, 50, rook=False).sparse.todense() == lat2SW(50,
50, 'queen').todense()).all()
|
|
from __future__ import division, print_function, absolute_import
import numpy as np
import itertools as itr
import amitgroup as ag
from pnet.layer import Layer
import pnet
import pnet.matrix
from amitgroup.plot import ImageGrid
def _extract_batch(im, settings, num_parts, num_orientations, part_shape,
parts, extract_func, dtype):
# TODO: Change the whole framework to this format
gpu_im = im.transpose((0, 3, 1, 2)).astype(dtype, copy=False)
res = extract_func(gpu_im)
imp = ag.util.pad(im, (0, 1, 1, 0), value=0)[:, :-1, :-1]
cum_im = imp.sum(-1).cumsum(1).cumsum(2)
ps = part_shape
counts = (cum_im[:, ps[0]:, ps[1]:] -
cum_im[:, :-ps[0], ps[1]:] -
cum_im[:, ps[0]:, :-ps[1]] +
cum_im[:, :-ps[0], :-ps[1]])
th = settings['threshold']
res[counts < th] = -1
return res[..., np.newaxis]
@Layer.register('oriented-parts-layer')
class OrientedPartsLayer(Layer):
def __init__(self, n_parts=1, n_orientations=1, part_shape=(6, 6),
settings={}):
self._num_true_parts = n_parts
self._num_orientations = n_orientations
self._part_shape = part_shape
self._settings = dict(outer_frame=0,
n_init=1,
n_iter=8,
threshold=0.0,
samples_per_image=40,
max_samples=10000,
seed=0,
min_prob=0.005,
min_count=20,
std_thresh=0.05, # TODO: Temp
circular=False,
mem_gb=4,
)
for k, v in settings.items():
if k not in self._settings:
raise ValueError("Unknown settings: {}".format(k))
else:
self._settings[k] = v
self._train_info = {}
self._keypoints = None
self._parts = None
self._weights = None
@property
def num_parts(self):
return self._num_true_parts * self._num_orientations
@property
def part_shape(self):
return self._part_shape
@property
def pos_matrix(self):
return self.conv_pos_matrix(self._part_shape)
@property
def visualized_parts(self):
return self._visparts
@property
def parts(self):
return self._parts
def _extract(self, phi, data):
with ag.Timer('extract inside parts'):
im = phi(data)
# Guess an appropriate batch size.
output_shape = (im.shape[1] - self._part_shape[0] + 1,
im.shape[2] - self._part_shape[1] + 1)
# The empirical factor adjusts the projected size with an empirically
# measured size.
empirical_factor = 2.0
bytesize = (self.num_parts * np.prod(output_shape) *
np.dtype(self._dtype).itemsize * empirical_factor)
memory = self._settings['mem_gb'] * 1024**3
print('data shape', data.shape)
print('output shape', output_shape)
print('num parts', self.num_parts)
n_batches = int(bytesize * data.shape[0] / memory)
print('n_batches', n_batches)
with ag.Timer('extract more'):
sett = (self._settings,
self.num_parts,
self._num_orientations,
self._part_shape,
self._parts,
self._extract_func,
self._dtype)
if n_batches == 0:
feat = _extract_batch(im, *sett)
else:
im_batches = np.array_split(im, n_batches)
args = ((im_b,) + sett for im_b in im_batches)
res = pnet.parallel.starmap_unordered(_extract_batch, args)
feat = np.concatenate([batch for batch in res])
return (feat, self.num_parts, self._num_orientations)
@property
def trained(self):
return self._parts is not None
def _train(self, phi, data, y=None):
raw_patches, raw_originals = self._get_patches(phi, data)
assert len(raw_patches), "No patches found"
return self.train_from_samples(raw_patches, raw_originals)
def train_from_samples(self, raw_patches, raw_originals):
min_prob = self._settings['min_prob']
ORI = self._num_orientations
POL = self._settings.get('polarities', 1)
P = ORI * POL
def cycles(X):
return np.asarray([np.concatenate([X[i:], X[:i]])
for i in range(len(X))])
RR = np.arange(ORI)
PP = np.arange(POL)
II = [list(itr.product(PPi, RRi))
for PPi in cycles(PP)
for RRi in cycles(RR)]
lookup = dict(zip(itr.product(PP, RR), itr.count()))
n_init = self._settings.get('n_init', 1)
n_iter = self._settings.get('n_iter', 10)
seed = self._settings.get('em_seed', 0)
num_angle = ORI
d = np.prod(raw_patches.shape[2:])
permutation = np.empty((num_angle, num_angle * d), dtype=np.int_)
for a in range(num_angle):
if a == 0:
permutation[a] = np.arange(num_angle * d)
else:
permutation[a] = np.roll(permutation[a-1], d)
permutations = [[lookup[ii] for ii in rows] for rows in II]
permutations = np.asarray(permutations)
from pnet.permutation_mm import PermutationMM
mm = PermutationMM(n_components=self._num_true_parts,
permutations=permutations,
n_iter=n_iter,
n_init=n_init,
random_state=seed,
min_probability=min_prob)
Xflat = raw_patches.reshape(raw_patches.shape[:2] + (-1,))
mm.fit(Xflat)
comps = mm.predict(Xflat)
ml = self._num_true_parts
counts = np.bincount(comps[:, 0], minlength=ml)
visparts = np.asarray([
raw_originals[comps[:, 0] == k,
comps[comps[:, 0] == k][:, 1]].mean(0)
for k in range(ml)
])
ww = counts / counts.sum()
HH = np.sum(-ww * np.log(ww))
print('entropy', HH)
ok = counts >= self._settings['min_count']
# Reject some parts
#ok = counts >= self._settings['min_count']
II = np.argsort(counts)[::-1]
II = np.asarray([ii for ii in II if ok[ii]])
ag.info('Keeping', len(II), 'out of', ok.size, 'parts')
self._num_true_parts = len(II)
means = mm.means_[II]
weights = mm.weights_[II]
counts_final = counts[II]
# Visualize parts : we iterate only over 'ok' ones
self._visparts = visparts[II]
ag.info('Training counts:', counts_final)
# Store info
sh = (self._num_true_parts * P,) + raw_patches.shape[2:]
self._parts = means.reshape(sh)
self._parts_vis = self._parts.copy()
if self._settings['circular']:
sh = raw_patches.shape[2:4]
assert sh[0] == sh[1], 'Must use square parts with circular'
side = sh[0]
off = -(side - 1) / 2
# Remove edges and make circular
x, y = np.meshgrid(np.arange(side) + off, np.arange(side) + off)
mask = (x ** 2) + (y ** 2) <= (side / 2) ** 2
mask0 = mask[np.newaxis, ..., np.newaxis]
# We'll set them to any value (0.1). This could use better handling
# if so that the likelihoods aren't ruined.
self._parts = mask0 * self._parts + ~mask0 * 0.1
self._parts_vis[:, ~mask, :] = np.nan
self._train_info['counts_initial'] = counts
self._train_info['counts'] = counts_final
self._preprocess()
if 0:
def _get_data(self, data):
ORI = self._num_orientations
POL = self._settings.get('polarities', 1)
size = data.shape[1:3]
# Make it square, to accommodate all types of rotations
new_side = np.max(size)
new_size = [new_side + (new_side - data.shape[1]) % 2,
new_side + (new_side - data.shape[2]) % 2]
pad_shape = (-1, new_size[0], new_size[1])
data_padded = ag.util.pad_to_size(data, pad_shape)
angles = np.arange(0, 360, 360/ORI)
all_data = np.asarray([
transform.rotate(data_padded.transpose((1, 2, 0)),
angle,
resize=False,
mode='nearest')
for angle in angles]).transpose((3, 0, 1, 2))
# Add inverted polarity too
if POL == 2:
all_data = np.concatenate([all_data, 1 - all_data], axis=1)
sh_samples = all_data.shape[:2]
sh_image = all_data.shape[2:]
all_X = phi(all_data.reshape((-1,) + sh_image))
all_X = all_X.reshape(sh_samples + all_X.shape[1:])
X_shape = all_X.shape[2:4]
def _get_patches(self, phi, data):
samples_per_image = self._settings['samples_per_image']
fr = self._settings.get('outer_frame', 0)
the_patches = None
the_originals = None
ag.info("Extracting patches from")
ps = self._part_shape
ORI = self._num_orientations
POL = self._settings.get('polarities', 1)
assert POL in (1, 2), "Polarities must be 1 or 2"
from skimage import transform
size = data.shape[1:3]
# Make it square, to accommodate all types of rotations
new_side = np.max(size)
new_size = [new_side + (new_side - data.shape[1]) % 2,
new_side + (new_side - data.shape[2]) % 2]
pad = [(new_size[i]-size[i])//2 for i in range(2)]
angles = np.arange(0, 360, 360/ORI)
radians = angles*np.pi/180
# Set up matrices that will translate a position in the canonical
# image to the rotated images. This way, we're not rotating each
# patch on demand, which will be much slower.
offset = (np.asarray(new_size) - 1) / 2
matrices = [pnet.matrix.translation(offset[0], offset[1]) *
pnet.matrix.rotation(a) *
pnet.matrix.translation(-offset[0], -offset[1])
for a in radians]
# Add matrices for the polarity flips too, if applicable
matrices *= POL
E = phi(data[:1]).shape[-1]
c = 0
batch_size = 20
max_samples = self._settings.get('max_samples', 10000)
patches_sh = ((max_samples, self._num_orientations) +
self._part_shape + (E,))
the_patches = np.zeros(patches_sh)
orig_sh = (max_samples, self._num_orientations) + (3, 3)
the_originals = np.zeros(orig_sh)
#the_originals = []
consecutive_failures = 0
rs = np.random.RandomState(0)
for b in itr.count(0):
data_subset = data[b*batch_size:(b+1)*batch_size]
if data_subset.shape[0] == 0:
break
data_padded = ag.util.pad_to_size(data_subset,
(-1, new_size[0], new_size[1],) + data.shape[3:])
ims = data_padded.transpose(1, 2, 0, 3)
ims = ims.reshape(data_padded.shape[1:3] + (-1,))
all_data = np.asarray([
transform.rotate(ims,
angle,
resize=False,
mode='nearest')
for angle in angles])
sh = all_data.shape[:3] + (-1, data.shape[3])
all_data = all_data.reshape(sh)
all_data = all_data.transpose(3, 0, 1, 2, 4)
# Add inverted polarity too
if POL == 2:
all_data = np.concatenate([all_data, 1 - all_data], axis=1)
# This avoids hitting the outside of patches, even after rotating.
# The 15 here is fairly arbitrary
avoid_edge = int(1 + np.max(ps)/2)
sh_samples = all_data.shape[:2]
sh_image = all_data.shape[2:]
all_X = phi(all_data.reshape((-1,) + sh_image))
all_X = all_X.reshape(sh_samples + all_X.shape[1:])
X_shape = all_X.shape[2:4]
# These indices represent the center of patches
range_x = range(pad[0]+avoid_edge, pad[0]+X_shape[0]-avoid_edge)
range_y = range(pad[1]+avoid_edge, pad[1]+X_shape[1]-avoid_edge)
center_adjusts = [ps[0] % 2,
ps[1] % 2]
offset = (np.asarray(X_shape) - center_adjusts) / 2
mmatrices = [pnet.matrix.translation(offset[0], offset[1]) *
pnet.matrix.rotation(a) *
pnet.matrix.translation(-offset[0], -offset[1])
for a in radians]
for n in range(len(all_data)):
# all_img = all_data[n]
X = all_X[n]
indices = list(itr.product(range_x, range_y))
rs.shuffle(indices)
i_iter = itr.cycle(iter(indices))
minus_ps = [-(ps[i]//2) for i in range(2)]
plus_ps = [minus_ps[i] + ps[i] for i in range(2)]
E = X.shape[-1]
th = self._settings['threshold']
# std_thresh = self._settings['std_thresh']
TRIES = 200
for sample in range(samples_per_image):
for tries in range(TRIES):
x, y = next(i_iter)
selection0 = [0,
slice(x+minus_ps[0], x+plus_ps[0]),
slice(y+minus_ps[1], y+plus_ps[1])]
# Return grayscale patch and edges patch
patch0 = X[selection0]
if fr == 0:
tot = patch0.sum()
# std = patch0.std()
else:
tot = patch0[fr:-fr, fr:-fr].sum()
# std = patch0[fr:-fr, fr:-fr].std()
# if std_thresh <= std:
if th <= tot:
XY = np.array([x, y, 1])[:, np.newaxis]
# Now, let's explore all orientations
patch = np.zeros((ORI * POL,) + ps + (E,))
vispatch = []
br = False
sels = []
for ori in range(ORI * POL):
p = np.dot(mmatrices[ori], XY)
A = np.asarray(phi.pos_matrix)
p2 = p
H = np.matrix([ps[0] / 2, ps[1] / 2, 0]).T
invA = np.linalg.inv(A)
lower = np.asarray(np.dot(invA, p2 - H))
upper = np.asarray(np.dot(invA, p2 + H))
ip2 = [int(round(float(p2[i]))) for i in range(2)]
def safe_round(x):
return int(float(x) + 1e-5)
selection_orig = [ori,
slice(safe_round(lower[0]),
safe_round(upper[0])),
slice(safe_round(lower[1]),
safe_round(upper[1]))]
slice1 = slice(ip2[0] + minus_ps[0],
ip2[0] + plus_ps[0])
slice2 = slice(ip2[1] + minus_ps[1],
ip2[1] + plus_ps[1])
selection = [ori, slice1, slice2]
try:
patch[ori] = X[selection]
except:
#print('Skipping', selection)
br = True
break
#orig = all_img[selection_orig]
#orig = data_padded[selection_orig]
orig = np.zeros((3, 3))
vispatch.append(orig)
sels.append(selection)
if br:
continue
def assert_equal(t, x, y):
assert x == y, '{}: {} != {}'.format(t, x, y)
# Randomly rotate this patch, so that we don't bias
# the unrotated (and possibly unblurred) image
vispatch = np.asarray(vispatch)
shift = rs.randint(ORI)
patch[:ORI] = np.roll(patch[:ORI], shift, axis=0)
vispatch[:ORI] = np.roll(vispatch[:ORI], shift, axis=0)
if POL == 2:
patch[ORI:] = np.roll(patch[ORI:], shift, axis=0)
vispatch[ORI:] = np.roll(vispatch[ORI:], shift,
axis=0)
# the_patches.append(patch)
#the_originals.append(vispatch)
the_patches[c] = patch
the_originals[c] = vispatch
c += 1
if c % 500 == 0:
ag.info('Fetching patches {}/{}'.format(c, max_samples))
if c >= max_samples:
return (np.asarray(the_patches),
np.asarray(the_originals))
consecutive_failures = 0
break
if tries == TRIES-1:
ag.info('WARNING: {} tries'.format(TRIES))
ag.info('cons', consecutive_failures)
consecutive_failures += 1
if consecutive_failures >= 10:
# Just give up.
raise ValueError('FATAL ERROR: Threshold is '
'probably too high (in {})'
.format(self.__class__.__name__))
return np.asarray(the_patches[:c]), np.asarray(the_originals[:c])
def _create_extract_func(self):
import theano.tensor as T
import theano
from theano.tensor.nnet import conv
# Create Theano convolution function
s_input = T.tensor4(name='input')
gpu_parts = self._parts.transpose((0, 3, 1, 2)).astype(s_input.dtype, copy=False)
gpu_parts = (gpu_parts)[:, :, ::-1, ::-1]
gpu_logits = np.log(gpu_parts / (1 - gpu_parts))
gpu_rest = np.log(1 - gpu_parts).sum(1).sum(1).sum(1)
s_logits = theano.shared(gpu_logits, name='logits')
s_rest = theano.shared(gpu_rest, name='rest')
s_conv = (conv.conv2d(s_input, s_logits) +
s_rest.dimshuffle('x', 0, 'x', 'x'))
s_output = s_conv.argmax(1)
self._dtype = s_input.dtype
return theano.function([s_input], s_output)
def _preprocess(self):
self._extract_func = self._create_extract_func()
def _vzlog_output_(self, vz):
from pylab import cm
if self._train_info:
vz.log('Initial counts:', self._train_info['counts_initial'])
vz.log('Final counts:', self._train_info['counts'])
grid1 = ImageGrid(self._visparts, vmin=0, vmax=1)
grid1.save(vz.impath(), scale=4)
# Plot all the parts
if hasattr(self, '_parts_vis'):
vz.log('parts', self._parts.shape)
grid2 = ImageGrid(self._parts_vis.transpose((0, 3, 1, 2)),
vmin=0, vmax=1, cmap=cm.jet,
border_color=1)
grid2.save(vz.impath(), scale=3)
grid2 = ImageGrid(self._parts.transpose((0, 3, 1, 2)),
vmin=0, vmax=1, cmap=cm.jet,
border_color=1)
grid2.save(vz.impath(), scale=3)
def save_to_dict(self):
d = {}
d['num_true_parts'] = self._num_true_parts
d['num_orientations'] = self._num_orientations
d['part_shape'] = self._part_shape
d['settings'] = self._settings
d['parts'] = self._parts
d['visparts'] = self._visparts
d['weights'] = self._weights
return d
@classmethod
def load_from_dict(cls, d):
num_true_parts = d.get('num_true_parts')
obj = cls(num_true_parts, d['num_orientations'], d['part_shape'],
settings=d['settings'])
obj._parts = d['parts']
obj._visparts = d.get('visparts')
obj._weights = d.get('weights')
obj._preprocess()
return obj
def __repr__(self):
return ('OrientedPartsLayer(n_parts={n_parts}, '
'n_orientations={n_orientations}, '
'part_shape={part_shape}').format(
n_parts=self._num_true_parts,
n_orientations=self._num_orientations,
part_shape=self._part_shape)
|
|
# -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes
Email: danaukes<at>gmail.com
Please see LICENSE for full license.
"""
import sympy
sympy.init_printing(use_latex=False,pretty_print=False)
import pynamics
import numpy
import scipy
from pynamics.force import Force
from pynamics.spring import Spring
from pynamics.variable_types import Differentiable
import logging
logger = logging.getLogger('pynamics.system')
def static_vars(**kwargs):
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
class System(object):
_z = 0
def __init__(self):
self.derivatives = {}
self.constants = []
self.constant_values = {}
self.forces = []
self.constraints = []
# self.momentum = []
# self.KE = sympy.Number(0)
self.bodies = []
self.particles = []
self.q = {}
self.replacements = {}
self.springs = []
self.t = sympy.Symbol('t')
self.ini = {}
self.frames = []
self.error_tolerance = 1e-16
def set_ini(self,name,val):
self.ini[name]=val
def add_q(self,q,ii):
if ii in self.q:
self.q[ii].append(q)
else:
self.q[ii] = [q]
# def get_dependent_solved(self):
# q_dep = []
# for constraint in self.constraints:
# # if constraint.solved:
# q_dep.extend(constraint.q_dep)
# return q_dep
def get_q(self,ii):
# q_dep = self.get_dependent_solved()
if ii in self.q:
# q_ind = [item for item in self.q[ii] if item not in q_dep]
q_ind = [item for item in self.q[ii]]
return q_ind
else:
return []
def get_state_variables(self):
state_var = self.get_q(0)+self.get_q(1)
return state_var
def set_newtonian(self,frame):
self.newtonian = frame
def add_frame(self,frame):
self.frames.append(frame)
def generatez(self,number):
z=sympy.Symbol('z'+str(self._z))
self.replacements[z]=number
self._z+=1
return z
def addforce_direct(self,f):
self.forces.append(f)
def addforce(self,force,velocity):
f=Force(force,velocity)
self.forces.append(f)
return f
def add_spring_force1(self,k,stretch,velocity):
force = -k*stretch
f=Force(force,velocity)
s = Spring(k,stretch,f)
self.forces.append(f)
self.springs.append(s)
return f,s
def add_spring_force2(self,k,stretch,v1,v2):
force = -k*stretch
f1=Force(force,v1)
f2=Force(force,v2)
s = Spring(k,stretch,f1,f2)
self.forces.append(f1)
self.forces.append(f2)
self.springs.append(s)
return f1,f2,s
def remove_spring(self,spring):
self.springs.remove(spring)
for f in spring.forces:
self.forces.remove(f)
# def addmomentum(self,momentum,velocity):
# self.momentum.append((momentum,velocity))
def get_KE(self):
KE = sympy.Number(0)
for item in self.particles+self.bodies:
KE+= item.KE
return KE
# def addKE(self,KE):
# self.KE+=KE
def set_derivative(self,expression,variable):
self.derivatives[expression]=variable
def add_constant(self,constant):
self.constants.append(constant)
def add_constant_value(self,constant,value):
self.constant_values[constant]=value
def getPEGravity(self,point):
PE = pynamics.ZERO
for body in self.bodies+self.particles:
if body.gravityvector is not None:
d = body.pCM - point
F = body.forcegravity
PE += F.dot(d)
return PE
def getPESprings(self):
PE = pynamics.ZERO
for item in self.springs:
k = item.k
stretch = item.s
PE+=.5*k*stretch.dot(stretch)
return PE
def addforcegravity(self,gravityvector):
for body in self.bodies:
body.addforcegravity(gravityvector)
for particle in self.particles:
particle.addforcegravity(gravityvector)
def getdynamics(self,q_speed = None):
logger.info('getting dynamic equations')
effectiveforces = []
for particle in self.particles:
effectiveforces.extend(particle.adddynamics())
for body in self.bodies:
effectiveforces.extend(body.adddynamics())
q_d = q_speed or self.get_q(1)
generalizedforce=self.generalize(self.forces,q_d)
generalizedeffectiveforce=self.generalize(effectiveforces,q_d)
return generalizedforce,generalizedeffectiveforce
def generalize(self,list1,q_d):
generalized=[]
for speed in q_d:
new = pynamics.ZERO
for item in list1:
expression = item.f
velocity = item.v
new+=expression.dot(velocity.diff_partial_local(speed))
generalized.append(new)
return generalized
# def solve_f_ma(self,f,ma,q_dd,inv_method = 'LU',constants = None):
# constants = constants or {}
# f = sympy.Matrix(f)
# ma = sympy.Matrix(ma)
# Ax_b = ma-f
# Ax_b = Ax_b.subs(constants)
# A = Ax_b.jacobian(q_dd)
# b = -Ax_b.subs(dict(list([(item,0) for item in q_dd])))
# var_dd = A.solve(b,method = inv_method)
# return var_dd
def state_space_pre_invert(self,f,ma,inv_method = 'LU',constants = None,q_acceleration = None, q_speed = None, q_position = None):
logger.info('solving a = f/m and creating function')
'''pre-invert A matrix'''
constants = constants or {}
remaining_constant_keys = list(set(self.constants) - set(constants.keys()))
q = q_position or self.get_q(0)
q_d = q_speed or self.get_q(1)
q_dd = q_acceleration or self.get_q(2)
q_state = q+q_d
# q_ind = q_ind or []
# q_dep = q_dep or []
# eq = eq or []
#
# logger.info('solving constraints')
# for constra
# if len(eq)>0:
# EQ = sympy.Matrix(eq)
# AA = EQ.jacobian(sympy.Matrix(q_ind))
# BB = EQ.jacobian(sympy.Matrix(q_dep))
# CC = EQ - AA*(sympy.Matrix(q_ind)) - BB*(sympy.Matrix(q_dep))
# CC = sympy.simplify(CC)
# assert(sum(CC)==0)
# dep2 = sympy.simplify(BB.solve(-(AA),method = inv_method))
# logger.info('solved constraints.')
f = sympy.Matrix(f)
ma = sympy.Matrix(ma)
Ax_b = ma-f
logger.info('substituting constants in Ma-f. ')
# if not not constants:
Ax_b = Ax_b.subs(constants)
# f = f.subs(constants)
# ma = ma.subs(constants)
logger.info('substituting constrained in Ma-f.' )
# for constraint in self.constraints:
# if constraint.solved:
# subs1 = dict([(a,b) for a,b in zip(q_dep,dep2*sympy.Matrix(q_ind))])
# Ax_b = Ax_b.subs(constraint.subs)
# ma = ma.subs(constraint.subs)
# f = f.subs(constraint.subs)
# logger.info('simplifying Ax-b')
# Ax_b = sympy.simplify(Ax_b)
logger.info('finding A')
A = Ax_b.jacobian(q_dd)
# M = ma.jacobian(q_dd)
logger.info('simplifying A')
A = sympy.simplify(A)
logger.info('finding b')
b = -Ax_b.subs(dict(list([(item,0) for item in q_dd])))
# M = sympy.simplify(M)
m = len(q_dd)
if not self.constraints:
A_full = A
b_full = b
n=0
else:
eq_dd = []
for constraint in self.constraints:
eq_dd += constraint.eq
eq_dd = sympy.Matrix(eq_dd)
if not not constants:
eq_dd = eq_dd.subs(constants)
J = eq_dd.jacobian(q_dd)
c = -eq_dd.subs(dict(list([(item,0) for item in q_dd])))
n = len(eq_dd)
A_full = sympy.zeros(m+n)
A_full[:m,:m] = A
A_full[m:,:m] = J
A_full[:m,m:] = J.T
b_full = sympy.zeros(m+n,1)
b_full[:m,0]=b
b_full[m:,0]=c
logger.info('solving M')
# acc = M.solve(f,method = inv_method)
acc = A_full.solve(b_full,method = inv_method)
# # return var_dd
state_augmented = q_state+remaining_constant_keys+[self.t]
f_acc = sympy.lambdify(state_augmented,acc)
position_derivatives = sympy.Matrix([self.derivative(item) for item in q])
# for constraint in self.constraints:
# position_derivatives = position_derivatives.subs(constraint.subs)
position_derivatives = position_derivatives.subs(constants)
f_position_derivatives = sympy.lambdify(state_augmented,position_derivatives)
@static_vars(ii=0)
def func(arg0,arg1,*args):
if pynamics.integrator==0:
state = arg0
time = arg1
if pynamics.integrator==1:
time = arg0
state = arg1
if func.ii%1000==0:
logger.info('integration at time {0:07.2f}'.format(time))
func.ii+=1
try:
kwargs = args[0]
except IndexError:
kwargs = {}
constant_values = [kwargs['constants'][item] for item in remaining_constant_keys]
state_i_augmented = list(state)+constant_values+[time]
x1 = numpy.array(f_position_derivatives(*state_i_augmented),dtype=float).flatten()
x2 = numpy.array(f_acc(*(state_i_augmented))).flatten()
x3 = numpy.r_[x1,x2[:m]]
x4 = x3.flatten().tolist()
return x4
logger.info('done solving a = f/m and creating function')
return func
def state_space_post_invert(self,f,ma,eq_dd = None,constants = None,q_acceleration = None, q_speed = None, q_position = None,return_lambda = False,variable_functions = None):
'''invert A matrix each call'''
logger.info('solving a = f/m and creating function')
if eq_dd is not None:
raise(Exception('eq_dd is no longer being used, please use pynamics acceleration constraints instead'))
constants = constants or {}
variable_functions = variable_functions or {}
remaining_constant_keys = list(set(self.constants) - set(constants.keys()))
q = q_position or self.get_q(0)
q_d = q_speed or self.get_q(1)
q_dd = q_acceleration or self.get_q(2)
q_state = q+q_d
f = sympy.Matrix(f)
ma = sympy.Matrix(ma)
Ax_b = ma-f
if not not constants:
Ax_b = Ax_b.subs(constants)
A = Ax_b.jacobian(q_dd)
b = -Ax_b.subs(dict(list([(item,0) for item in q_dd])))
m = len(q_dd)
logger.info('substituting constrained in Ma-f.' )
if not self.constraints:
A_full = A
b_full = b
n=0
else:
eq_dd = []
for constraint in self.constraints:
eq_dd += constraint.eq
eq_dd = sympy.Matrix(eq_dd)
if not not constants:
eq_dd = eq_dd.subs(constants)
J = eq_dd.jacobian(q_dd)
c = -eq_dd.subs(dict(list([(item,0) for item in q_dd])))
n = len(eq_dd)
A_full = sympy.zeros(m+n)
A_full[:m,:m] = A
A_full[m:,:m] = J
A_full[:m,m:] = J.T
b_full = sympy.zeros(m+n,1)
b_full[:m,0]=b
b_full[m:,0]=c
variables = list(variable_functions.keys())
state_full = q_state+remaining_constant_keys+[self.t]+variables
fA = sympy.lambdify(state_full,A_full)
fb = sympy.lambdify(state_full,b_full)
position_derivatives = sympy.Matrix([self.derivative(item) for item in q])
if not not constants:
position_derivatives = position_derivatives.subs(constants)
f_position_derivatives = sympy.lambdify(state_full,position_derivatives)
@static_vars(ii=0)
def func(arg0,arg1,*args):
if pynamics.integrator==0:
time = arg1
state = arg0
if pynamics.integrator==1:
time = arg0
state = arg1
if func.ii%1000==0:
logger.info('integration at time {0:07.2f}'.format(time))
func.ii+=1
try:
kwargs = args[0]
except IndexError:
kwargs = {}
constant_values = [kwargs['constants'][item] for item in remaining_constant_keys]
vi = [variable_functions[key](time) for key in variables]
state_i_full = list(state)+constant_values+[time]+vi
Ai = numpy.array(fA(*state_i_full),dtype=float)
bi = numpy.array(fb(*state_i_full),dtype=float)
x1 = numpy.array(f_position_derivatives(*state_i_full),dtype=float).flatten()
x2 = numpy.array(scipy.linalg.solve(Ai,bi)).flatten()
x3 = numpy.r_[x1,x2[:m]]
x4 = x3.flatten().tolist()
return x4
logger.info('done solving a = f/m and creating function')
if not return_lambda:
return func
else:
logger.info('calculating function for lambdas')
def lambdas(time,state,constants = None):
constants = constants or {}
constant_values = [constants[item] for item in remaining_constant_keys]
vi = [variable_functions[key](time) for key in variables]
state_i_full = list(state)+constant_values+[time]+vi
Ai = numpy.array(fA(*state_i_full),dtype=float)
bi = numpy.array(fb(*state_i_full),dtype=float)
x2 = numpy.array(scipy.linalg.solve(Ai,bi)).flatten()
x4 = x2[m:].flatten().tolist()
return x4
return func,lambdas
def state_space_post_invert2(self,f,ma,eq_dd,eq_d,eq,eq_active=None,constants = None,q_acceleration = None, q_speed = None, q_position = None):
'''invert A matrix each call'''
logger.info('solving a = f/m and creating function')
constants = constants or {}
remaining_constant_keys = list(set(self.constants) - set(constants.keys()))
q = q_position or self.get_q(0)
q_d = q_speed or self.get_q(1)
q_dd = q_acceleration or self.get_q(2)
q_state = q+q_d
if not not eq_dd:
eq_active = eq_active or [1]*len(eq_dd)
else:
eq_active = eq_active or []
eq_active = sympy.Matrix(eq_active)
eq = sympy.Matrix(eq or [])
eq_d = sympy.Matrix(eq_d or [])
eq_dd = sympy.Matrix(eq_dd or [])
f = sympy.Matrix(f)
ma = sympy.Matrix(ma)
Ax_b = ma-f
if not not constants:
Ax_b = Ax_b.subs(constants)
eq_active = eq_active.subs(constants)
eq = eq.subs(constants)
eq_d = eq_d.subs(constants)
eq_dd = eq_dd.subs(constants)
A = Ax_b.jacobian(q_dd)
b = -Ax_b.subs(dict(list([(item,0) for item in q_dd])))
m = len(q_dd)
if not eq_dd:
A_full = A
b_full = b
n=0
else:
J = eq_dd.jacobian(q_dd)
c = -eq_dd.subs(dict(list([(item,0) for item in q_dd])))
n = len(eq_dd)
A_full = sympy.zeros(m+n)
A_full[:m,:m] = A
A_full[m:,:m] = J
A_full[:m,m:] = J.T
b_full = sympy.zeros(m+n,1)
b_full[:m,0]=b
b_full[m:,0]=c
state_full = q_state+remaining_constant_keys+[self.t]
fA = sympy.lambdify(state_full,A_full)
fb = sympy.lambdify(state_full,b_full)
feq = sympy.lambdify(state_full,eq)
feq_d = sympy.lambdify(state_full,eq_d)
factive = sympy.lambdify(state_full,eq_active)
position_derivatives = sympy.Matrix([self.derivative(item) for item in q])
if not not constants:
position_derivatives = position_derivatives.subs(constants)
f_position_derivatives = sympy.lambdify(state_full,position_derivatives)
@static_vars(ii=0)
def func(arg0,arg1,*args):
if pynamics.integrator==0:
state = arg0
time = arg1
if pynamics.integrator==1:
time = arg0
state = arg1
if func.ii%1000==0:
logger.info('integration at time {0:07.2f}'.format(time))
func.ii+=1
try:
kwargs = args[0]
except IndexError:
kwargs = {}
alpha = kwargs['alpha']
beta = kwargs['beta']
constant_values = [kwargs['constants'][item] for item in remaining_constant_keys]
state_i_full = list(state)+constant_values+[time]
Ai = numpy.array(fA(*state_i_full),dtype=float)
bi = numpy.array(fb(*state_i_full),dtype=float)
eqi = numpy.array(feq(*state_i_full),dtype = float)
eq_di = numpy.array(feq_d(*state_i_full),dtype = float)
bi[m:] = bi[m:]-2*alpha*eq_di-beta**2*eqi
active = numpy.array(m*[1]+factive(*state_i_full).flatten().tolist())
f1 = numpy.eye(m+n)
f2 = f1[(active>self.error_tolerance).nonzero()[0],:]
Ai=(f2.dot(Ai)).dot(f2.T)
bi=f2.dot(bi)
x1 = numpy.array(f_position_derivatives(*state_i_full),dtype=float).flatten()
x2 = numpy.array(scipy.linalg.solve(Ai,bi)).flatten()
x3 = numpy.r_[x1,x2[:m]]
x4 = x3.flatten().tolist()
return x4
logger.info('done solving a = f/m and creating function')
return func
@staticmethod
def assembleconstrained(eq_dyn,eq_con,q_dyn,q_con):
logger.info('solving constrained')
AC1x_b1 = sympy.Matrix(eq_dyn)
C2x_b2 = sympy.Matrix(eq_con)
logger.info('solving Ax-b')
q_dyn = sympy.Matrix(q_dyn)
q_con = sympy.Matrix(q_con)
x = q_dyn.col_join(q_con)
logger.info('finding x, l')
MASS = AC1x_b1.jacobian(q_dyn)
C1 = AC1x_b1.jacobian(q_con)
C2 = C2x_b2.jacobian(x)
AA = sympy.Matrix.col_join(sympy.Matrix.row_join(MASS,C1),C2)
logger.info('finding A,C1,C2')
b1 = -AC1x_b1.subs(zip(x.T.tolist()[0],[0 for item in x]))
b2 = -C2x_b2.subs(zip(x.T.tolist()[0],[0 for item in x]))
b = b1.col_join(b2)
logger.info('finished solving constrained')
return AA,b,x
@classmethod
def solveconstraineddynamics(cls,eq_dyn,eq_con,q_dyn,q_con,method='LU'):
AA,b,x = cls.assembleconstrained(eq_dyn,eq_con,q_dyn,q_con)
AA_inv = AA.inv(method = method)
xx = AA_inv*b
x_dyn = xx[0:len(q_dyn),:]
x_con = xx[len(q_dyn):,:]
return x_dyn,x_con
def derivative(self,expression):
# for ii,a in enumerate(self.derivatives.keys()):
# if ii==0:
# result = expression.diff(a)*self.derivatives[a]
# else:
# result += expression.diff(a)*self.derivatives[a]
# return result
import sympy
all_differentiables = list(expression.atoms(Differentiable))
result = expression*0
for ii,a in enumerate(all_differentiables):
# if ii==0:
# result = expression.diff(a)*self.derivatives[a]
# else:
# result += expression.diff(a)*self.derivatives[a]
result += expression.diff(a)*self.derivatives[a]
result += expression.diff(self.t)
return result
def get_ini(self,state_variables = None):
state_variables = state_variables or self.get_state_variables()
return [self.ini[item] for item in state_variables]
def add_constraint(self, constraint):
self.constraints.append(constraint)
|
|
"""
Primary key changing capabilities and passive/non-passive cascading updates.
"""
from sqlalchemy.test.testing import eq_, ne_, \
assert_raises, assert_raises_message
import sqlalchemy as sa
from sqlalchemy.test import testing
from sqlalchemy import Integer, String, ForeignKey, Unicode
from sqlalchemy.test.schema import Table, Column
from sqlalchemy.orm import mapper, relationship, create_session, backref
from sqlalchemy.orm.session import make_transient
from sqlalchemy.test.testing import eq_
from test.orm import _base, _fixtures
class NaturalPKTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
if testing.against('oracle'):
fk_args = dict(deferrable=True, initially='deferred')
else:
fk_args = dict(onupdate='cascade')
users = Table('users', metadata,
Column('username', String(50), primary_key=True),
Column('fullname', String(100)),
test_needs_fk=True)
addresses = Table('addresses', metadata,
Column('email', String(50), primary_key=True),
Column('username', String(50),
ForeignKey('users.username', **fk_args)),
test_needs_fk=True)
items = Table('items', metadata,
Column('itemname', String(50), primary_key=True),
Column('description', String(100)),
test_needs_fk=True)
users_to_items = Table('users_to_items', metadata,
Column('username', String(50),
ForeignKey('users.username', **fk_args),
primary_key=True),
Column('itemname', String(50),
ForeignKey('items.itemname', **fk_args),
primary_key=True),
test_needs_fk=True)
@classmethod
def setup_classes(cls):
class User(_base.ComparableEntity):
pass
class Address(_base.ComparableEntity):
pass
class Item(_base.ComparableEntity):
pass
@testing.resolve_artifact_names
def test_entity(self):
mapper(User, users)
sess = create_session()
u1 = User(username='jack', fullname='jack')
sess.add(u1)
sess.flush()
assert sess.query(User).get('jack') is u1
u1.username = 'ed'
sess.flush()
def go():
assert sess.query(User).get('ed') is u1
self.assert_sql_count(testing.db, go, 0)
assert sess.query(User).get('jack') is None
sess.expunge_all()
u1 = sess.query(User).get('ed')
eq_(User(username='ed', fullname='jack'), u1)
@testing.resolve_artifact_names
def test_load_after_expire(self):
mapper(User, users)
sess = create_session()
u1 = User(username='jack', fullname='jack')
sess.add(u1)
sess.flush()
assert sess.query(User).get('jack') is u1
users.update(values={User.username:'jack'}).execute(username='ed')
# expire/refresh works off of primary key. the PK is gone
# in this case so theres no way to look it up. criterion-
# based session invalidation could solve this [ticket:911]
sess.expire(u1)
assert_raises(sa.orm.exc.ObjectDeletedError, getattr, u1, 'username')
sess.expunge_all()
assert sess.query(User).get('jack') is None
assert sess.query(User).get('ed').fullname == 'jack'
@testing.resolve_artifact_names
def test_flush_new_pk_after_expire(self):
mapper(User, users)
sess = create_session()
u1 = User(username='jack', fullname='jack')
sess.add(u1)
sess.flush()
assert sess.query(User).get('jack') is u1
sess.expire(u1)
u1.username = 'ed'
sess.flush()
sess.expunge_all()
assert sess.query(User).get('ed').fullname == 'jack'
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_onetomany_passive(self):
self._test_onetomany(True)
def test_onetomany_nonpassive(self):
self._test_onetomany(False)
@testing.resolve_artifact_names
def _test_onetomany(self, passive_updates):
mapper(User, users, properties={
'addresses':relationship(Address, passive_updates=passive_updates)
})
mapper(Address, addresses)
sess = create_session()
u1 = User(username='jack', fullname='jack')
u1.addresses.append(Address(email='jack1'))
u1.addresses.append(Address(email='jack2'))
sess.add(u1)
sess.flush()
assert sess.query(Address).get('jack1') is u1.addresses[0]
u1.username = 'ed'
sess.flush()
assert u1.addresses[0].username == 'ed'
sess.expunge_all()
eq_([Address(username='ed'), Address(username='ed')],
sess.query(Address).all())
u1 = sess.query(User).get('ed')
u1.username = 'jack'
def go():
sess.flush()
if not passive_updates:
# test passive_updates=False;
#load addresses, update user, update 2 addresses
self.assert_sql_count(testing.db, go, 4)
else:
# test passive_updates=True; update user
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
assert User(username='jack', addresses=[
Address(username='jack'),
Address(username='jack')]) == \
sess.query(User).get('jack')
u1 = sess.query(User).get('jack')
u1.addresses = []
u1.username = 'fred'
sess.flush()
sess.expunge_all()
assert sess.query(Address).get('jack1').username is None
u1 = sess.query(User).get('fred')
eq_(User(username='fred', fullname='jack'), u1)
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_manytoone_passive(self):
self._test_manytoone(True)
def test_manytoone_nonpassive(self):
self._test_manytoone(False)
@testing.resolve_artifact_names
def _test_manytoone(self, passive_updates):
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User, passive_updates=passive_updates)
})
sess = create_session()
a1 = Address(email='jack1')
a2 = Address(email='jack2')
u1 = User(username='jack', fullname='jack')
a1.user = u1
a2.user = u1
sess.add(a1)
sess.add(a2)
sess.flush()
u1.username = 'ed'
def go():
sess.flush()
if passive_updates:
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 3)
def go():
sess.flush()
self.assert_sql_count(testing.db, go, 0)
assert a1.username == a2.username == 'ed'
sess.expunge_all()
eq_([Address(username='ed'), Address(username='ed')],
sess.query(Address).all())
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_onetoone_passive(self):
self._test_onetoone(True)
def test_onetoone_nonpassive(self):
self._test_onetoone(False)
@testing.resolve_artifact_names
def _test_onetoone(self, passive_updates):
mapper(User, users, properties={
"address":relationship(Address, passive_updates=passive_updates,
uselist=False)
})
mapper(Address, addresses)
sess = create_session()
u1 = User(username='jack', fullname='jack')
sess.add(u1)
sess.flush()
a1 = Address(email='jack1')
u1.address = a1
sess.add(a1)
sess.flush()
u1.username = 'ed'
def go():
sess.flush()
if passive_updates:
sess.expire(u1, ['address'])
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 2)
def go():
sess.flush()
self.assert_sql_count(testing.db, go, 0)
sess.expunge_all()
eq_([Address(username='ed')], sess.query(Address).all())
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_bidirectional_passive(self):
self._test_bidirectional(True)
def test_bidirectional_nonpassive(self):
self._test_bidirectional(False)
@testing.resolve_artifact_names
def _test_bidirectional(self, passive_updates):
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User, passive_updates=passive_updates,
backref='addresses')})
sess = create_session()
a1 = Address(email='jack1')
a2 = Address(email='jack2')
u1 = User(username='jack', fullname='jack')
a1.user = u1
a2.user = u1
sess.add(a1)
sess.add(a2)
sess.flush()
u1.username = 'ed'
(ad1, ad2) = sess.query(Address).all()
eq_([Address(username='jack'), Address(username='jack')], [ad1, ad2])
def go():
sess.flush()
if passive_updates:
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 3)
eq_([Address(username='ed'), Address(username='ed')], [ad1, ad2])
sess.expunge_all()
eq_([Address(username='ed'), Address(username='ed')],
sess.query(Address).all())
u1 = sess.query(User).get('ed')
assert len(u1.addresses) == 2 # load addresses
u1.username = 'fred'
def go():
sess.flush()
# check that the passive_updates is on on the other side
if passive_updates:
self.assert_sql_count(testing.db, go, 1)
else:
self.assert_sql_count(testing.db, go, 3)
sess.expunge_all()
eq_([Address(username='fred'), Address(username='fred')],
sess.query(Address).all())
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_manytomany_passive(self):
self._test_manytomany(True)
# mysqldb executemany() of the association table fails to
# report the correct row count
@testing.fails_if(lambda: testing.against('mysql')
and not testing.against('+zxjdbc'))
def test_manytomany_nonpassive(self):
self._test_manytomany(False)
@testing.resolve_artifact_names
def _test_manytomany(self, passive_updates):
mapper(User, users, properties={
'items':relationship(Item, secondary=users_to_items,
backref='users',
passive_updates=passive_updates)})
mapper(Item, items)
sess = create_session()
u1 = User(username='jack')
u2 = User(username='fred')
i1 = Item(itemname='item1')
i2 = Item(itemname='item2')
u1.items.append(i1)
u1.items.append(i2)
i2.users.append(u2)
sess.add(u1)
sess.add(u2)
sess.flush()
r = sess.query(Item).all()
# ComparableEntity can't handle a comparison with the backrefs
# involved....
eq_(Item(itemname='item1'), r[0])
eq_(['jack'], [u.username for u in r[0].users])
eq_(Item(itemname='item2'), r[1])
eq_(['jack', 'fred'], [u.username for u in r[1].users])
u2.username='ed'
def go():
sess.flush()
go()
def go():
sess.flush()
self.assert_sql_count(testing.db, go, 0)
sess.expunge_all()
r = sess.query(Item).all()
eq_(Item(itemname='item1'), r[0])
eq_(['jack'], [u.username for u in r[0].users])
eq_(Item(itemname='item2'), r[1])
eq_(['ed', 'jack'], sorted([u.username for u in r[1].users]))
sess.expunge_all()
u2 = sess.query(User).get(u2.username)
u2.username='wendy'
sess.flush()
r = sess.query(Item).with_parent(u2).all()
eq_(Item(itemname='item2'), r[0])
class TransientExceptionTesst(_fixtures.FixtureTest):
run_inserts = None
@testing.resolve_artifact_names
def test_transient_exception(self):
"""An object that goes from a pk value to transient/pending
doesn't count as a "pk" switch.
"""
mapper(User, users)
mapper(Address, addresses, properties={'user':relationship(User)})
sess = create_session()
u1 = User(id=5, name='u1')
ad1 = Address(email_address='e1', user=u1)
sess.add_all([u1, ad1])
sess.flush()
make_transient(u1)
u1.id = None
u1.username='u2'
sess.add(u1)
sess.flush()
eq_(ad1.user_id, 5)
sess.expire_all()
eq_(ad1.user_id, 5)
ne_(u1.id, 5)
ne_(u1.id, None)
eq_(sess.query(User).count(), 2)
class ReversePKsTest(_base.MappedTest):
"""reverse the primary keys of two entities and ensure bookkeeping
succeeds."""
@classmethod
def define_tables(cls, metadata):
Table(
'user', metadata,
Column('code', Integer, primary_key=True),
Column('status', Integer, primary_key=True),
Column('username', Unicode(50), nullable=False),
)
@classmethod
def setup_classes(cls):
class User(_base.ComparableEntity):
def __init__(self, code, status, username):
self.code = code
self.status = status
self.username = username
@testing.resolve_artifact_names
def test_reverse(self):
PUBLISHED, EDITABLE, ARCHIVED = 1, 2, 3
mapper(User, user)
session = sa.orm.sessionmaker()()
a_published = User(1, PUBLISHED, u'a')
session.add(a_published)
session.commit()
a_editable = User(1, EDITABLE, u'a')
session.add(a_editable)
session.commit()
# do the switch in both directions -
# one or the other should raise the error
# based on platform dictionary ordering
a_published.status = ARCHIVED
a_editable.status = PUBLISHED
session.commit()
assert session.query(User).get([1, PUBLISHED]) is a_editable
assert session.query(User).get([1, ARCHIVED]) is a_published
a_published.status = PUBLISHED
a_editable.status = EDITABLE
session.commit()
assert session.query(User).get([1, PUBLISHED]) is a_published
assert session.query(User).get([1, EDITABLE]) is a_editable
class SelfReferentialTest(_base.MappedTest):
# mssql, mysql don't allow
# ON UPDATE on self-referential keys
__unsupported_on__ = ('mssql','mysql')
@classmethod
def define_tables(cls, metadata):
if testing.against('oracle'):
fk_args = dict(deferrable=True, initially='deferred')
else:
fk_args = dict(onupdate='cascade')
Table('nodes', metadata,
Column('name', String(50), primary_key=True),
Column('parent', String(50),
ForeignKey('nodes.name', **fk_args)),
test_needs_fk=True
)
@classmethod
def setup_classes(cls):
class Node(_base.ComparableEntity):
pass
@testing.resolve_artifact_names
def test_one_to_many(self):
mapper(Node, nodes, properties={
'children': relationship(Node,
backref=sa.orm.backref('parentnode',
remote_side=nodes.c.name,
passive_updates=False),
passive_updates=False)})
sess = create_session()
n1 = Node(name='n1')
n1.children.append(Node(name='n11'))
n1.children.append(Node(name='n12'))
n1.children.append(Node(name='n13'))
sess.add(n1)
sess.flush()
n1.name = 'new n1'
sess.flush()
eq_(n1.children[1].parent, 'new n1')
eq_(['new n1', 'new n1', 'new n1'],
[n.parent
for n in sess.query(Node).filter(
Node.name.in_(['n11', 'n12', 'n13']))])
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_many_to_one_passive(self):
self._test_many_to_one(True)
def test_many_to_one_nonpassive(self):
self._test_many_to_one(False)
@testing.resolve_artifact_names
def _test_many_to_one(self, passive):
mapper(Node, nodes, properties={
'parentnode':relationship(Node,
remote_side=nodes.c.name,
passive_updates=passive)
}
)
sess = create_session()
n1 = Node(name='n1')
n11 = Node(name='n11', parentnode=n1)
n12 = Node(name='n12', parentnode=n1)
n13 = Node(name='n13', parentnode=n1)
sess.add_all([n1, n11, n12, n13])
sess.flush()
n1.name = 'new n1'
sess.flush()
if passive:
sess.expire_all()
eq_(['new n1', 'new n1', 'new n1'],
[n.parent
for n in sess.query(Node).filter(
Node.name.in_(['n11', 'n12', 'n13']))])
class NonPKCascadeTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
if testing.against('oracle'):
fk_args = dict(deferrable=True, initially='deferred')
else:
fk_args = dict(onupdate='cascade')
Table('users', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('username', String(50), unique=True),
Column('fullname', String(100)),
test_needs_fk=True)
Table('addresses', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('email', String(50)),
Column('username', String(50),
ForeignKey('users.username', **fk_args)),
test_needs_fk=True
)
@classmethod
def setup_classes(cls):
class User(_base.ComparableEntity):
pass
class Address(_base.ComparableEntity):
pass
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_onetomany_passive(self):
self._test_onetomany(True)
def test_onetomany_nonpassive(self):
self._test_onetomany(False)
@testing.resolve_artifact_names
def _test_onetomany(self, passive_updates):
mapper(User, users, properties={
'addresses':relationship(Address,
passive_updates=passive_updates)})
mapper(Address, addresses)
sess = create_session()
u1 = User(username='jack', fullname='jack')
u1.addresses.append(Address(email='jack1'))
u1.addresses.append(Address(email='jack2'))
sess.add(u1)
sess.flush()
a1 = u1.addresses[0]
eq_(sa.select([addresses.c.username]).execute().fetchall(),
[('jack',), ('jack',)])
assert sess.query(Address).get(a1.id) is u1.addresses[0]
u1.username = 'ed'
sess.flush()
assert u1.addresses[0].username == 'ed'
eq_(sa.select([addresses.c.username]).execute().fetchall(),
[('ed',), ('ed',)])
sess.expunge_all()
eq_([Address(username='ed'), Address(username='ed')],
sess.query(Address).all())
u1 = sess.query(User).get(u1.id)
u1.username = 'jack'
def go():
sess.flush()
if not passive_updates:
# test passive_updates=False; load addresses,
# update user, update 2 addresses
self.assert_sql_count(testing.db, go, 4)
else:
# test passive_updates=True; update user
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
assert User(username='jack',
addresses=[Address(username='jack'),
Address(username='jack')]) == \
sess.query(User).get(u1.id)
sess.expunge_all()
u1 = sess.query(User).get(u1.id)
u1.addresses = []
u1.username = 'fred'
sess.flush()
sess.expunge_all()
a1 = sess.query(Address).get(a1.id)
eq_(a1.username, None)
eq_(sa.select([addresses.c.username]).execute().fetchall(),
[(None,), (None,)])
u1 = sess.query(User).get(u1.id)
eq_(User(username='fred', fullname='jack'), u1)
class CascadeToFKPKTest(_base.MappedTest, testing.AssertsCompiledSQL):
"""A primary key mutation cascades onto a foreign key that is itself a
primary key."""
@classmethod
def define_tables(cls, metadata):
if testing.against('oracle'):
fk_args = dict(deferrable=True, initially='deferred')
else:
fk_args = dict(onupdate='cascade')
Table('users', metadata,
Column('username', String(50), primary_key=True),
test_needs_fk=True)
Table('addresses', metadata,
Column('username', String(50),
ForeignKey('users.username', **fk_args),
primary_key=True
),
Column('email', String(50), primary_key=True),
Column('etc', String(50)),
test_needs_fk=True
)
@classmethod
def setup_classes(cls):
class User(_base.ComparableEntity):
pass
class Address(_base.ComparableEntity):
pass
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_onetomany_passive(self):
self._test_onetomany(True)
# PG etc. need passive=True to allow PK->PK cascade
@testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc')
def test_onetomany_nonpassive(self):
self._test_onetomany(False)
def test_o2m_change_passive(self):
self._test_o2m_change(True)
def test_o2m_change_nonpassive(self):
self._test_o2m_change(False)
@testing.resolve_artifact_names
def _test_o2m_change(self, passive_updates):
"""Change the PK of a related entity to another.
"on update cascade" is not involved here, so the mapper has
to do the UPDATE itself.
"""
mapper(User, users, properties={
'addresses':relationship(Address,
passive_updates=passive_updates)})
mapper(Address, addresses)
sess = create_session()
a1 = Address(username='ed', email='ed@host1')
u1 = User(username='ed', addresses=[a1])
u2 = User(username='jack')
sess.add_all([a1, u1, u2])
sess.flush()
a1.username = 'jack'
sess.flush()
def test_o2m_move_passive(self):
self._test_o2m_move(True)
def test_o2m_move_nonpassive(self):
self._test_o2m_move(False)
@testing.resolve_artifact_names
def _test_o2m_move(self, passive_updates):
"""Move the related entity to a different collection,
changing its PK.
"""
mapper(User, users, properties={
'addresses':relationship(Address,
passive_updates=passive_updates)})
mapper(Address, addresses)
sess = create_session()
a1 = Address(username='ed', email='ed@host1')
u1 = User(username='ed', addresses=[a1])
u2 = User(username='jack')
sess.add_all([a1, u1, u2])
sess.flush()
u1.addresses.remove(a1)
u2.addresses.append(a1)
sess.flush()
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE '
'but requires referential integrity')
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
def test_change_m2o_passive(self):
self._test_change_m2o(True)
@testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc')
def test_change_m2o_nonpassive(self):
self._test_change_m2o(False)
@testing.resolve_artifact_names
def _test_change_m2o(self, passive_updates):
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User, passive_updates=passive_updates)
})
sess = create_session()
u1 = User(username='jack')
a1 = Address(user=u1, email='foo@bar')
sess.add_all([u1, a1])
sess.flush()
u1.username='edmodified'
sess.flush()
eq_(a1.username, 'edmodified')
sess.expire_all()
eq_(a1.username, 'edmodified')
def test_move_m2o_passive(self):
self._test_move_m2o(True)
def test_move_m2o_nonpassive(self):
self._test_move_m2o(False)
@testing.resolve_artifact_names
def _test_move_m2o(self, passive_updates):
# tests [ticket:1856]
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User, passive_updates=passive_updates)
})
sess = create_session()
u1 = User(username='jack')
u2 = User(username='ed')
a1 = Address(user=u1, email='foo@bar')
sess.add_all([u1, u2, a1])
sess.flush()
a1.user = u2
sess.flush()
@testing.resolve_artifact_names
def test_rowswitch_doesntfire(self):
mapper(User, users)
mapper(Address, addresses, properties={
'user':relationship(User, passive_updates=True)
})
sess = create_session()
u1 = User(username='ed')
a1 = Address(user=u1, email='ed@host1')
sess.add(u1)
sess.add(a1)
sess.flush()
sess.delete(u1)
sess.delete(a1)
u2 = User(username='ed')
a2 = Address(user=u2, email='ed@host1', etc='foo')
sess.add(u2)
sess.add(a2)
from sqlalchemy.test.assertsql import CompiledSQL
# test that the primary key columns of addresses are not
# being updated as well, since this is a row switch.
self.assert_sql_execution(testing.db,
sess.flush,
CompiledSQL(
"UPDATE addresses SET etc=:etc WHERE "
"addresses.username = :addresses_username AND"
" addresses.email = :addresses_email",
{'etc': 'foo', 'addresses_username':'ed',
'addresses_email':'ed@host1'} ),
)
@testing.resolve_artifact_names
def _test_onetomany(self, passive_updates):
"""Change the PK of a related entity via foreign key cascade.
For databases that require "on update cascade", the mapper
has to identify the row by the new value, not the old, when
it does the update.
"""
mapper(User, users, properties={
'addresses':relationship(Address,
passive_updates=passive_updates)})
mapper(Address, addresses)
sess = create_session()
a1, a2 = Address(username='ed', email='ed@host1'),\
Address(username='ed', email='ed@host2')
u1 = User(username='ed', addresses=[a1, a2])
sess.add(u1)
sess.flush()
eq_(a1.username, 'ed')
eq_(a2.username, 'ed')
eq_(sa.select([addresses.c.username]).execute().fetchall(),
[('ed',), ('ed',)])
u1.username = 'jack'
a2.email='ed@host3'
sess.flush()
eq_(a1.username, 'jack')
eq_(a2.username, 'jack')
eq_(sa.select([addresses.c.username]).execute().fetchall(),
[('jack',), ('jack', )])
class JoinedInheritanceTest(_base.MappedTest):
"""Test cascades of pk->pk/fk on joined table inh."""
# mssql doesn't allow ON UPDATE on self-referential keys
__unsupported_on__ = ('mssql',)
@classmethod
def define_tables(cls, metadata):
if testing.against('oracle'):
fk_args = dict(deferrable=True, initially='deferred')
else:
fk_args = dict(onupdate='cascade')
Table('person', metadata,
Column('name', String(50), primary_key=True),
Column('type', String(50), nullable=False),
test_needs_fk=True)
Table('engineer', metadata,
Column('name', String(50), ForeignKey('person.name', **fk_args),
primary_key=True),
Column('primary_language', String(50)),
Column('boss_name', String(50),
ForeignKey('manager.name', **fk_args)),
test_needs_fk=True
)
Table('manager', metadata,
Column('name', String(50),
ForeignKey('person.name', **fk_args),
primary_key=True),
Column('paperwork', String(50)),
test_needs_fk=True
)
@classmethod
def setup_classes(cls):
class Person(_base.ComparableEntity):
pass
class Engineer(Person):
pass
class Manager(Person):
pass
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_pk_passive(self):
self._test_pk(True)
# PG etc. need passive=True to allow PK->PK cascade
@testing.fails_on_everything_except('sqlite', 'oracle', '+zxjdbc')
def test_pk_nonpassive(self):
self._test_pk(False)
@testing.fails_on('sqlite', 'sqlite doesnt support ON UPDATE CASCADE')
@testing.fails_on('oracle', 'oracle doesnt support ON UPDATE CASCADE')
def test_fk_passive(self):
self._test_fk(True)
# PG etc. need passive=True to allow PK->PK cascade
@testing.fails_on_everything_except('sqlite', 'mysql+zxjdbc',
'postgresql+zxjdbc')
def test_fk_nonpassive(self):
self._test_fk(False)
@testing.resolve_artifact_names
def _test_pk(self, passive_updates):
mapper(Person, person, polymorphic_on=person.c.type,
polymorphic_identity='person',
passive_updates=passive_updates)
mapper(Engineer, engineer, inherits=Person,
polymorphic_identity='engineer', properties={
'boss':relationship(Manager,
primaryjoin=manager.c.name==engineer.c.boss_name,
passive_updates=passive_updates
)
})
mapper(Manager, manager, inherits=Person,
polymorphic_identity='manager')
sess = sa.orm.sessionmaker()()
e1 = Engineer(name='dilbert', primary_language='java')
sess.add(e1)
sess.commit()
e1.name = 'wally'
e1.primary_language = 'c++'
sess.commit()
@testing.resolve_artifact_names
def _test_fk(self, passive_updates):
mapper(Person, person, polymorphic_on=person.c.type,
polymorphic_identity='person',
passive_updates=passive_updates)
mapper(Engineer, engineer, inherits=Person,
polymorphic_identity='engineer', properties={
'boss':relationship(Manager,
primaryjoin=manager.c.name==engineer.c.boss_name,
passive_updates=passive_updates
)
})
mapper(Manager, manager, inherits=Person,
polymorphic_identity='manager')
sess = sa.orm.sessionmaker()()
m1 = Manager(name='dogbert', paperwork='lots')
e1, e2 = \
Engineer(name='dilbert', primary_language='java', boss=m1),\
Engineer(name='wally', primary_language='c++', boss=m1)
sess.add_all([
e1, e2, m1
])
sess.commit()
m1.name = 'pointy haired'
e1.primary_language = 'scala'
e2.primary_language = 'cobol'
sess.commit()
|
|
#!/usr/bin/env python
import os, sys
new_path = [ os.path.join( os.getcwd(), "lib" ) ]
new_path.extend( sys.path[1:] ) # remove scripts/ from the path
sys.path = new_path
from galaxy import eggs
import pkg_resources
pkg_resources.require( "SQLAlchemy >= 0.4" )
import time, ConfigParser, shutil
from datetime import datetime, timedelta
from time import strftime
from optparse import OptionParser
import galaxy.model.mapping
import sqlalchemy as sa
from galaxy.model.orm import and_, eagerload
assert sys.version_info[:2] >= ( 2, 4 )
def main():
parser = OptionParser()
parser.add_option( "-d", "--days", dest="days", action="store", type="int", help="number of days (60)", default=60 )
parser.add_option( "-r", "--remove_from_disk", action="store_true", dest="remove_from_disk", help="remove datasets from disk when purged", default=False )
parser.add_option( "-i", "--info_only", action="store_true", dest="info_only", help="info about the requested action", default=False )
parser.add_option( "-f", "--force_retry", action="store_true", dest="force_retry", help="performs the requested actions, but ignores whether it might have been done before. Useful when -r wasn't used, but should have been", default=False )
parser.add_option( "-1", "--delete_userless_histories", action="store_true", dest="delete_userless_histories", default=False, help="delete userless histories and datasets" )
parser.add_option( "-2", "--purge_histories", action="store_true", dest="purge_histories", default=False, help="purge deleted histories" )
parser.add_option( "-3", "--purge_datasets", action="store_true", dest="purge_datasets", default=False, help="purge deleted datasets" )
parser.add_option( "-4", "--purge_libraries", action="store_true", dest="purge_libraries", default=False, help="purge deleted libraries" )
parser.add_option( "-5", "--purge_folders", action="store_true", dest="purge_folders", default=False, help="purge deleted library folders" )
parser.add_option( "-6", "--delete_datasets", action="store_true", dest="delete_datasets", default=False, help="mark deletable datasets as deleted and purge associated dataset instances" )
( options, args ) = parser.parse_args()
ini_file = args[0]
if not ( options.purge_folders ^ options.delete_userless_histories ^ \
options.purge_libraries ^ options.purge_histories ^ \
options.purge_datasets ^ options.delete_datasets ):
parser.print_help()
sys.exit(0)
if options.remove_from_disk and options.info_only:
parser.error( "remove_from_disk and info_only are mutually exclusive" )
conf_parser = ConfigParser.ConfigParser( {'here':os.getcwd()} )
conf_parser.read( ini_file )
configuration = {}
for key, value in conf_parser.items( "app:main" ):
configuration[key] = value
if 'database_connection' in configuration:
database_connection = configuration['database_connection']
else:
database_connection = "sqlite:///%s?isolation_level=IMMEDIATE" % configuration["database_file"]
file_path = configuration['file_path']
app = CleanupDatasetsApplication( database_connection=database_connection, file_path=file_path )
cutoff_time = datetime.utcnow() - timedelta( days=options.days )
now = strftime( "%Y-%m-%d %H:%M:%S" )
print "##########################################"
print "\n# %s - Handling stuff older than %i days" % ( now, options.days )
if options.info_only:
print "# Displaying info only ( --info_only )\n"
elif options.remove_from_disk:
print "Datasets will be removed from disk.\n"
else:
print "Datasets will NOT be removed from disk.\n"
if options.delete_userless_histories:
delete_userless_histories( app, cutoff_time, info_only = options.info_only, force_retry = options.force_retry )
elif options.purge_histories:
purge_histories( app, cutoff_time, options.remove_from_disk, info_only = options.info_only, force_retry = options.force_retry )
elif options.purge_datasets:
purge_datasets( app, cutoff_time, options.remove_from_disk, info_only = options.info_only, force_retry = options.force_retry )
elif options.purge_libraries:
purge_libraries( app, cutoff_time, options.remove_from_disk, info_only = options.info_only, force_retry = options.force_retry )
elif options.purge_folders:
purge_folders( app, cutoff_time, options.remove_from_disk, info_only = options.info_only, force_retry = options.force_retry )
elif options.delete_datasets:
delete_datasets( app, cutoff_time, options.remove_from_disk, info_only = options.info_only, force_retry = options.force_retry )
sys.exit(0)
def delete_userless_histories( app, cutoff_time, info_only = False, force_retry = False ):
# Deletes userless histories whose update_time value is older than the cutoff_time.
# The purge history script will handle marking DatasetInstances as deleted.
# Nothing is removed from disk yet.
history_count = 0
start = time.time()
if force_retry:
histories = app.sa_session.query( app.model.History ) \
.filter( and_( app.model.History.table.c.user_id==None,
app.model.History.table.c.update_time < cutoff_time ) )
else:
histories = app.sa_session.query( app.model.History ) \
.filter( and_( app.model.History.table.c.user_id==None,
app.model.History.table.c.deleted==False,
app.model.History.table.c.update_time < cutoff_time ) )
for history in histories:
if not info_only:
print "Deleting history id ", history.id
history.deleted = True
app.sa_session.add( history )
app.sa_session.flush()
history_count += 1
stop = time.time()
print "Deleted %d histories" % history_count
print "Elapsed time: ", stop - start
print "##########################################"
def purge_histories( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
# Purges deleted histories whose update_time is older than the cutoff_time.
# The dataset associations of each history are also marked as deleted.
# The Purge Dataset method will purge each Dataset as necessary
# history.purged == True simply means that it can no longer be undeleted
# i.e. all associated datasets are marked as deleted
history_count = 0
start = time.time()
if force_retry:
histories = app.sa_session.query( app.model.History ) \
.filter( and_( app.model.History.table.c.deleted==True,
app.model.History.table.c.update_time < cutoff_time ) ) \
.options( eagerload( 'datasets' ) )
else:
histories = app.sa_session.query( app.model.History ) \
.filter( and_( app.model.History.table.c.deleted==True,
app.model.History.table.c.purged==False,
app.model.History.table.c.update_time < cutoff_time ) ) \
.options( eagerload( 'datasets' ) )
for history in histories:
for dataset_assoc in history.datasets:
_purge_dataset_instance( dataset_assoc, app, remove_from_disk, info_only = info_only ) #mark a DatasetInstance as deleted, clear associated files, and mark the Dataset as deleted if it is deletable
if not info_only:
# TODO: should the Delete DefaultHistoryPermissions be deleted here? This was incorrectly
# done in the _list_delete() method of the history controller, so copied it here. Not sure
# if we should ever delete info like this from the db though, so commented out for now...
#for dhp in history.default_permissions:
# dhp.delete()
print "Purging history id ", history.id
history.purged = True
app.sa_session.add( history )
app.sa_session.flush()
history_count += 1
stop = time.time()
print 'Purged %d histories.' % history_count
print "Elapsed time: ", stop - start
print "##########################################"
def purge_libraries( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
# Purges deleted libraries whose update_time is older than the cutoff_time.
# The dataset associations of each library are also marked as deleted.
# The Purge Dataset method will purge each Dataset as necessary
# library.purged == True simply means that it can no longer be undeleted
# i.e. all associated LibraryDatasets/folders are marked as deleted
library_count = 0
start = time.time()
if force_retry:
libraries = app.sa_session.query( app.model.Library ) \
.filter( and_( app.model.Library.table.c.deleted==True,
app.model.Library.table.c.update_time < cutoff_time ) )
else:
libraries = app.sa_session.query( app.model.Library ) \
.filter( and_( app.model.Library.table.c.deleted==True,
app.model.Library.table.c.purged==False,
app.model.Library.table.c.update_time < cutoff_time ) )
for library in libraries:
_purge_folder( library.root_folder, app, remove_from_disk, info_only = info_only )
if not info_only:
print "Purging library id ", library.id
library.purged = True
app.sa_session.add( library )
app.sa_session.flush()
library_count += 1
stop = time.time()
print '# Purged %d libraries .' % library_count
print "Elapsed time: ", stop - start
print "##########################################"
def purge_folders( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
# Purges deleted folders whose update_time is older than the cutoff_time.
# The dataset associations of each folder are also marked as deleted.
# The Purge Dataset method will purge each Dataset as necessary
# libraryFolder.purged == True simply means that it can no longer be undeleted
# i.e. all associated LibraryDatasets/folders are marked as deleted
folder_count = 0
start = time.time()
if force_retry:
folders = app.sa_session.query( app.model.LibraryFolder ) \
.filter( and_( app.model.LibraryFolder.table.c.deleted==True,
app.model.LibraryFolder.table.c.update_time < cutoff_time ) )
else:
folders = app.sa_session.query( app.model.LibraryFolder ) \
.filter( and_( app.model.LibraryFolder.table.c.deleted==True,
app.model.LibraryFolder.table.c.purged==False,
app.model.LibraryFolder.table.c.update_time < cutoff_time ) )
for folder in folders:
_purge_folder( folder, app, remove_from_disk, info_only = info_only )
folder_count += 1
stop = time.time()
print '# Purged %d folders.' % folder_count
print "Elapsed time: ", stop - start
print "##########################################"
def delete_datasets( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
# Marks datasets as deleted if associated items are all deleted.
start = time.time()
if force_retry:
history_dataset_ids_query = sa.select( ( app.model.Dataset.table.c.id,
app.model.Dataset.table.c.state ),
whereclause = app.model.HistoryDatasetAssociation.table.c.update_time < cutoff_time,
from_obj = [ sa.outerjoin( app.model.Dataset.table,
app.model.HistoryDatasetAssociation.table ) ] )
library_dataset_ids_query = sa.select( ( app.model.Dataset.table.c.id,
app.model.Dataset.table.c.state ),
whereclause = app.model.LibraryDatasetDatasetAssociation.table.c.update_time < cutoff_time,
from_obj = [ sa.outerjoin( app.model.Dataset.table,
app.model.LibraryDatasetDatasetAssociation.table ) ] )
else:
# We really only need the id column here, but sqlalchemy barfs when trying to select only 1 column
history_dataset_ids_query = sa.select( ( app.model.Dataset.table.c.id,
app.model.Dataset.table.c.state ),
whereclause = sa.and_( app.model.Dataset.table.c.deleted == False,
app.model.HistoryDatasetAssociation.table.c.update_time < cutoff_time,
app.model.HistoryDatasetAssociation.table.c.deleted == True ),
from_obj = [ sa.outerjoin( app.model.Dataset.table,
app.model.HistoryDatasetAssociation.table ) ] )
library_dataset_ids_query = sa.select( ( app.model.Dataset.table.c.id,
app.model.Dataset.table.c.state ),
whereclause = sa.and_( app.model.Dataset.table.c.deleted == False,
app.model.LibraryDatasetDatasetAssociation.table.c.update_time < cutoff_time,
app.model.LibraryDatasetDatasetAssociation.table.c.deleted == True ),
from_obj = [ sa.outerjoin( app.model.Dataset.table,
app.model.LibraryDatasetDatasetAssociation.table ) ] )
history_dataset_ids = [ row.id for row in history_dataset_ids_query.execute() ]
library_dataset_ids = [ row.id for row in library_dataset_ids_query.execute() ]
dataset_ids = history_dataset_ids + library_dataset_ids
skip = []
deleted_dataset_count = 0
deleted_instance_count = 0
for dataset_id in dataset_ids:
print "######### Processing dataset id:", dataset_id
dataset = app.sa_session.query( app.model.Dataset ).get( dataset_id )
if dataset.id not in skip and _dataset_is_deletable( dataset ):
deleted_dataset_count += 1
for dataset_instance in dataset.history_associations + dataset.library_associations:
print "Associated Dataset instance: ", dataset_instance.__class__.__name__, dataset_instance.id
_purge_dataset_instance( dataset_instance, app, remove_from_disk, include_children=True, info_only=info_only, is_deletable=True )
deleted_instance_count += 1
skip.append( dataset.id )
stop = time.time()
print "Examined %d datasets, marked %d as deleted and purged %d dataset instances" % ( len( skip ), deleted_dataset_count, deleted_instance_count )
print "Total elapsed time: ", stop - start
print "##########################################"
def purge_datasets( app, cutoff_time, remove_from_disk, info_only = False, force_retry = False ):
# Purges deleted datasets whose update_time is older than cutoff_time. Files may or may
# not be removed from disk.
dataset_count = 0
disk_space = 0
start = time.time()
if force_retry:
datasets = app.sa_session.query( app.model.Dataset ) \
.filter( and_( app.model.Dataset.table.c.deleted==True,
app.model.Dataset.table.c.purgable==True,
app.model.Dataset.table.c.update_time < cutoff_time ) )
else:
datasets = app.sa_session.query( app.model.Dataset ) \
.filter( and_( app.model.Dataset.table.c.deleted==True,
app.model.Dataset.table.c.purgable==True,
app.model.Dataset.table.c.purged==False,
app.model.Dataset.table.c.update_time < cutoff_time ) )
for dataset in datasets:
file_size = dataset.file_size
_purge_dataset( app, dataset, remove_from_disk, info_only = info_only )
dataset_count += 1
try:
disk_space += file_size
except:
pass
stop = time.time()
print 'Purged %d datasets' % dataset_count
if remove_from_disk:
print 'Freed disk space: ', disk_space
print "Elapsed time: ", stop - start
print "##########################################"
def _purge_dataset_instance( dataset_instance, app, remove_from_disk, include_children=True, info_only=False, is_deletable=False ):
# A dataset_instance is either a HDA or an LDDA. Purging a dataset instance marks the instance as deleted,
# and marks the associated dataset as deleted if it is not associated with another active DatsetInstance.
if not info_only:
print "Deleting dataset_instance ", str( dataset_instance ), " id ", dataset_instance.id
dataset_instance.mark_deleted( include_children = include_children )
dataset_instance.clear_associated_files()
app.sa_session.add( dataset_instance )
app.sa_session.flush()
app.sa_session.refresh( dataset_instance.dataset )
if is_deletable or _dataset_is_deletable( dataset_instance.dataset ):
# Calling methods may have already checked _dataset_is_deletable, if so, is_deletable should be True
_delete_dataset( dataset_instance.dataset, app, remove_from_disk, info_only=info_only, is_deletable=is_deletable )
#need to purge children here
if include_children:
for child in dataset_instance.children:
_purge_dataset_instance( child, app, remove_from_disk, include_children = include_children, info_only = info_only )
def _dataset_is_deletable( dataset ):
#a dataset is deletable when it no longer has any non-deleted associations
return not bool( dataset.active_history_associations or dataset.active_library_associations )
def _delete_dataset( dataset, app, remove_from_disk, info_only=False, is_deletable=False ):
#marks a base dataset as deleted, hdas/ldas associated with dataset can no longer be undeleted
#metadata files attached to associated dataset Instances is removed now
if not is_deletable and not _dataset_is_deletable( dataset ):
print "This Dataset (%i) is not deletable, associated Metadata Files will not be removed.\n" % ( dataset.id )
else:
# Mark all associated MetadataFiles as deleted and purged and remove them from disk
metadata_files = []
#lets create a list of metadata files, then perform actions on them
for hda in dataset.history_associations:
for metadata_file in app.sa_session.query( app.model.MetadataFile ) \
.filter( app.model.MetadataFile.table.c.hda_id==hda.id ):
metadata_files.append( metadata_file )
for lda in dataset.library_associations:
for metadata_file in app.sa_session.query( app.model.MetadataFile ) \
.filter( app.model.MetadataFile.table.c.lda_id==lda.id ):
metadata_files.append( metadata_file )
for metadata_file in metadata_files:
print "The following metadata files attached to associations of Dataset '%s' have been purged:" % dataset.id
if not info_only:
if remove_from_disk:
try:
print "Removing disk file ", metadata_file.file_name
os.unlink( metadata_file.file_name )
except Exception, e:
print "Error, exception: %s caught attempting to purge metadata file %s\n" %( str( e ), metadata_file.file_name )
metadata_file.purged = True
app.sa_session.add( metadata_file )
app.sa_session.flush()
metadata_file.deleted = True
app.sa_session.add( metadata_file )
app.sa_session.flush()
print "%s" % metadata_file.file_name
print "Deleting dataset id", dataset.id
dataset.deleted = True
app.sa_session.add( dataset )
app.sa_session.flush()
def _purge_dataset( app, dataset, remove_from_disk, info_only = False ):
if dataset.deleted:
try:
if dataset.purgable and _dataset_is_deletable( dataset ):
if not info_only:
# Remove files from disk and update the database
if remove_from_disk:
# TODO: should permissions on the dataset be deleted here?
print "Removing disk, file ", dataset.file_name
os.unlink( dataset.file_name )
# Remove associated extra files from disk if they exist
if dataset.extra_files_path and os.path.exists( dataset.extra_files_path ):
shutil.rmtree( dataset.extra_files_path ) #we need to delete the directory and its contents; os.unlink would always fail on a directory
print "Purging dataset id", dataset.id
dataset.purged = True
app.sa_session.add( dataset )
app.sa_session.flush()
else:
print "This dataset (%i) is not purgable, the file (%s) will not be removed.\n" % ( dataset.id, dataset.file_name )
except OSError, exc:
print "Error, dataset file has already been removed: %s" % str( exc )
print "Purging dataset id", dataset.id
dataset.purged = True
app.sa_session.add( dataset )
app.sa_session.flush()
except Exception, exc:
print "Error attempting to purge data file: ", dataset.file_name, " error: ", str( exc )
else:
print "Error: '%s' has not previously been deleted, so it cannot be purged\n" % dataset.file_name
def _purge_folder( folder, app, remove_from_disk, info_only = False ):
"""Purges a folder and its contents, recursively"""
for ld in folder.datasets:
print "Deleting library dataset id ", ld.id
ld.deleted = True
for ldda in [ld.library_dataset_dataset_association] + ld.expired_datasets:
_purge_dataset_instance( ldda, app, remove_from_disk, info_only = info_only ) #mark a DatasetInstance as deleted, clear associated files, and mark the Dataset as deleted if it is deletable
for sub_folder in folder.folders:
_purge_folder( sub_folder, app, remove_from_disk, info_only = info_only )
if not info_only:
# TODO: should the folder permissions be deleted here?
print "Purging folder id ", folder.id
folder.purged = True
app.sa_session.add( folder )
app.sa_session.flush()
class CleanupDatasetsApplication( object ):
"""Encapsulates the state of a Universe application"""
def __init__( self, database_connection=None, file_path=None ):
if database_connection is None:
raise Exception( "CleanupDatasetsApplication requires a database_connection value" )
if file_path is None:
raise Exception( "CleanupDatasetsApplication requires a file_path value" )
self.database_connection = database_connection
self.file_path = file_path
# Setup the database engine and ORM
self.model = galaxy.model.mapping.init( self.file_path, self.database_connection, engine_options={}, create_tables=False )
@property
def sa_session( self ):
"""
Returns a SQLAlchemy session -- currently just gets the current
session from the threadlocal session context, but this is provided
to allow migration toward a more SQLAlchemy 0.4 style of use.
"""
return self.model.context.current
if __name__ == "__main__": main()
|
|
'''
Module containing useful encryption functions.
Some require openssl to be installed.
'''
import glob
import os
from M2Crypto import BIO, SMIME, X509
from subprocess import Popen, PIPE
import tempfile
import logging
# hashlib only available in python 2.5+
try:
from hashlib import md5
except ImportError:
from md5 import md5
# logging configuration
log = logging.getLogger('SSM')
class EncryptException(Exception):
pass
def from_file(filename):
'''
Convenience function to read entire file into string.
'''
f = open(filename, 'r')
s = f.read()
f.close()
return s
def file_is_closed(file):
"""
Return True if 'lsof <file>' returns no output.
To do this, we have to call the lsof command using a pipe.
This is Linux-specific.
"""
# /usr/sbin/lsof is SL-specific.
p1 = Popen(["/usr/sbin/lsof", file],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, error = p1.communicate()
# If the error output isn't empty, raise an exception
if not error == "":
# Only the first line of the error from lsof is interesting.
error_lines = str(error).splitlines()
raise IOError(error_lines[0])
# If there's output, the file is open.
return output == ""
def check_cert_key(certificate, key):
'''
Check that a certificate and a key match, using openssl directly to fetch
the modulus of each, which must be the same.
'''
# Two things the same have the same modulus.
if certificate == key:
return False
p1 = Popen(["openssl", "x509", "-noout", "-modulus"],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
modulus1, error = p1.communicate(certificate)
if (error != ''):
log.error(error)
return False
p2 = Popen(["openssl", "rsa", "-noout", "-modulus"],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
modulus2, error = p2.communicate(key)
if (error != ''):
log.error(error)
return False
return (modulus1 == modulus2)
def sign_message(text, cert_path, key_path):
'''
Sign the specified message using the certificate and key in the files specified.
Returns the signed message as an SMIME string, suitable for transmission.
'''
try:
p1 = Popen(["openssl", "smime", "-sign", "-inkey", key_path, "-signer", cert_path, "-text"],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
signed_msg, error = p1.communicate(text)
if (error != ''):
log.error(error)
print error
return signed_msg
except OSError, e:
log.error("Failed to sign message: %s" % e)
raise EncryptException("Message signing failed. Check cert and key permissions.")
# Using M2Crypto...
# This signature code with v0.16 of m2crypto doesn't work as expected, in
# that it generates a signature the same size as the original message,
# rather than a constant-size signature 'blob' as in the original OpenSSL
# command. This results in a message doubling in size, which is OK in cases
# of small (<100k) messages.
# # Make a MemoryBuffer of the message.
# buf = BIO.MemoryBuffer(text)
#
# # Seed the PRNG.
# Rand.load_file('randpool.dat', -1)
#
# # Instantiate an SMIME object; set it up; sign the buffer.
# s = SMIME.SMIME()
# s.load_key(key, certificate)
# p7 = s.sign(buf)
#
# # buf gets stomped during signing, create another one
# buf = BIO.MemoryBuffer(text)
#
# # headers- optional
# out = BIO.MemoryBuffer()
#
# # write out the signature and the buffer
# s.write(out, p7, buf)
#
# return out.read()
def encrypt_message(text, certificate):
'''
Encrypt the specified message using the certificate string.
Returns the encrypted SMIME text suitable for transmission
'''
# store the certificate in a file
tmpfd, tmpname = tempfile.mkstemp(prefix='cert')
os.write(tmpfd, certificate)
os.close(tmpfd)
# encrypt
p1 = Popen(["openssl", "smime", "-encrypt", "-des3", tmpname],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
enc_txt, error = p1.communicate(text)
if (error != ''):
log.error(error)
# tidy
os.remove(tmpname)
return enc_txt
# # Using M2Crypto...
#
# # The reason not to use this code is again to do with the size of the
# # message, in that it is much faster to call out to OpenSSL rather than use
# # the m2crypto library for anything larger than 100k.
#
# buf = BIO.MemoryBuffer(text)
# Rand.load_file('randpool.dat', -1)
# s = SMIME.SMIME()
#
# x509 = X509.load_cert_string(certificate)
# sk = X509.X509_Stack()
# sk.push(x509)
#
# s.set_x509_stack(sk)
#
# s.set_cipher(SMIME.Cipher('des_ede3_cbc'))
# #s.set_cipher(SMIME.Cipher('aes_128_cbc'))
#
# p7 = s.encrypt(buf)
#
# out = BIO.MemoryBuffer()
# s.write(out, p7)
# return out.read()
def verify_message(signed_text, capath, check_crl):
'''
Verify the signed message has been signed by the certificate (attached to the
supplied SMIME message) it claims to have, by one of the accepted CAs in
capath.
Returns a tuple including the signer's certificate and the plain-text of the
message if it has been verified
'''
# This ensures that openssl knows that the string is finished.
# It makes no difference if the signed message is correct, but
# prevents it from hanging in the case of an empty string.
signed_text += "\n\n"
signer = get_signer_cert(signed_text)
if not verify_certificate(signer, capath, check_crl):
raise EncryptException("Unverified signer")
# The -noverify flag removes the certificate verification. The certificate
# is verified above; this check would also check that the certificate
# is allowed to sign with SMIME, which host certificates sometimes aren't.
p1 = Popen(["openssl", "smime", "-verify", "-CApath", capath, "-noverify", "-text"],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
message, error = p1.communicate(signed_text)
# Interesting problem here - we get a message 'Verification successful'
# to standard error. We don't want to log this as an error each time,
# but we do want to see if there's a genuine error...
log.info(str(error).strip())
signer_x509 = X509.load_cert_string(signer)
return str(signer_x509.get_subject()), message
# # Using M2Crypto...
#
# # (you can only use this code if you're also using m2crypto to sign the
# # message)
#
# s = SMIME.SMIME()
#
# # Read the signer's certificate from the message, and verify it. Note, if
# # we don't do this first, then the verify process will segfault below.
#
# signer = get_signer_cert(signed_text)
#
#
# if not verify_certificate(signer, capath, check_crl):
# raise RuntimeError("Unverified signer")
#
#
# # Create X509 stack including just the signer certificate (which we will
# # read from the message)
#
# sk = X509.X509_Stack()
# sk.push(signer)
# s.set_x509_stack(sk)
#
# # Create X509 certificate store, including all the certificates that
# # we might need in the chain to verify the signer
#
# st = load_certificate_store(capath)
# s.set_x509_store(st)
#
# blob = BIO.MemoryBuffer(signed_text)
#
# # See note against other write_close call below for reasons for this
# blob.write_close()
#
# p7, data = SMIME.smime_load_pkcs7_bio(blob)
# v = s.verify(p7, data)
#
# signer_x509 = X509.load_cert_string(signer)
# return str(signer_x509.get_subject()), v
def decrypt_message(encrypted_text, certificate, key, capath):
'''
Decrypt the specified message using the certificate and key contained in the
named PEM files. The capath should point to a directory holding all the
CAs that we accept
This decryption function can be used whether or not OpenSSL is used to
encrypt the data
'''
s = SMIME.SMIME()
blob = BIO.MemoryBuffer(encrypted_text)
# m2Crypto v0.17? Then need to add this line to smime_load_pkcs7_bio,
# in SMIME.py, at line 98
# m2.bio_set_mem_eof_return(p7_bio._ptr(),0)
#
# Otherwise you get a 'not enough data' error from SMIME module
# Alternatively, blob.write_close() also seems to fix this, but this
# might need to be revisited if a later M2Crypto is used
blob.write_close()
s.load_key(key, certificate)
p7, data = SMIME.smime_load_pkcs7_bio(blob)
###########
# Write data to a temporary file, then decrypt it
# Workaround because the above doesn't work with M2Crypto v0.17
#tmpfd,tmpname = tempfile.mkstemp()
#os.write(tmpfd,encrypted_text)
#os.close(tmpfd)
#p7, data = SMIME.smime_load_pkcs7(tmpname)
#os.remove(tmpname)
##########
out = s.decrypt(p7)
return out
################################################################################
# Verify that the certificate is signed by a CA whose certificate is stored in
# capath.
#
# Return True if the certificate is valid
#def verify_certificate(certificate, capath):
# x509 = X509.load_cert(certificate)
# st = load_certificate_store(capath)
# if x509.verify() == 1:
# return True
# else:
# return False
#
#def load_certificate_store(capath):
# '''
# Load all the certificates in the specified directory into a certificate store
# object.
#
# Returns the certificate store
# '''
# st = X509.X509_Store()
# #st.load_locations(capath) -- doesn't work; possible bug in M2Crypto
#
# for cert in glob.glob(capath + '/*.0'):
# st.load_info(cert)
#
# return st
def verify_certificate(certificate, capath, check_crls=True):
'''
Verify that the certificate is signed by a CA whose certificate is stored in
capath. There are two variants of this function, one will load the certificate
from a string, the other will use an X509 object.
Returns True if verified
'''
verified = False
if check_crls:
# Use openssl
verified = verify_cert_and_crls(certificate, capath)
else:
# Use m2crypto
x509 = X509.load_cert_string(certificate)
verified = verify_certificate_x509(x509, capath)
return verified
def verify_cert_and_crls(certificate, capath):
'''
Verify the certificate against the CA certs in capath. Note that this uses
openssl directly because the python libraries don't offer this.
Note also that I've had to compare strings in the output of openssl to check
for verification, which may make this brittle.
Returns True if the certificate is verified.
'''
p1 = Popen(["openssl", "verify", "-CApath", capath, "-crl_check_all"],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
message, error = p1.communicate(certificate)
# I think this is unlikely ever to happen
if (error != ''):
log.error(error)
# There was a sticky problem here.
#
# None of the python openssl libraries go as far as checking CRLs,
# so I had to resort to calling openssl directly.
# However, 'openssl verify' returns 0 whatever happens, so we can't
# use the return code to determine whether the verificatation was
# successful.
# If it is successful, openssl prints 'OK'
# If it fails, openssl prints 'error'
# So:
log.info("Certificate verification: " + str(message).strip())
return ("OK" in message and not "error" in message)
def verify_certificate_x509(x509, capath):
count = 0
for cert in glob.glob(capath + '/*.0'):
ca = X509.load_cert(cert)
pkey = ca.get_pubkey()
if x509.verify(pkey):
count += 1
break
return (count > 0)
def get_certificate_subject(certificate):
'''
Returns the certificate subject's DN.
'''
x509 = X509.load_cert_string(certificate)
return str(x509.get_subject())
def get_signer_cert(signed_text):
'''
Read the signer's certificate from the specified message, and return the
certificate object.
Returns an X509 object for the signer's certificate
'''
# have to resort to calling out to the openssl command line client in order
# to extract the certificate from the signed message, because I can't
# figure out how to achieve this using the M2Crypto API
p1 = Popen(["openssl", "smime", "-pk7out"],
stdin=PIPE, stdout=PIPE, stderr=PIPE)
p2 = Popen(["openssl", "pkcs7", "-print_certs"],
stdin=p1.stdout, stdout=PIPE, stderr=PIPE)
p1.stdin.write(signed_text)
cert_string, error = p2.communicate()
if (error != ''):
log.error(error)
return cert_string
def get_signer_cert_x509(signed_text):
'''
Return the signer's certificate from signed text as an X509 object.
'''
cert_string = get_signer_cert(signed_text)
return (X509.load_cert_string(cert_string))
def message_hash(msg):
"""
Compute MD5 hash of a message.
"""
m5 = md5(msg)
return m5.hexdigest()
|
|
from __future__ import division
from shapely.geometry import Point, LineString
import random
import math
import numpy as np
class RRTFamilyPathPlanner():
"""Plans path using an algorithm from the RRT family.
Contains methods for simple RRT based search, RRTstar based search and informed RRTstar based search.
"""
def initialise(self, environment, bounds, start_pose, goal_region, object_radius, steer_distance, num_iterations, resolution, runForFullIterations):
"""Initialises the planner with information about the environment and parameters for the rrt path planers
Args:
environment (A yaml environment): Environment where the planner will be run. Includes obstacles.
bounds( (int int int int) ): min x, min y, max x, and max y coordinates of the bounds of the world.
start_pose( (float float) ): Starting x and y coordinates of the object in question.
goal_region (Polygon): A polygon representing the region that we want our object to go to.
object_radius (float): Radius of the object.
steer_distance (float): Limits the length of the branches
num_iterations (int): How many points are sampled for the creationg of the tree
resolution (int): Number of segments used to approximate a quarter circle around a point.
runForFullIterations (bool): If True RRT and RRTStar return the first path found without having to sample all num_iterations points.
Returns:
None
"""
self.env = environment
self.obstacles = environment.obstacles
self.bounds = bounds
self.minx, self.miny, self.maxx, self.maxy = bounds
self.start_pose = start_pose
self.goal_region = goal_region
self.obj_radius = object_radius
self.N = num_iterations
self.resolution = resolution
self.steer_distance = steer_distance
self.V = set()
self.E = set()
self.child_to_parent_dict = dict() #key = child, value = parent
self.runForFullIterations = runForFullIterations
self.goal_pose = (goal_region.centroid.coords[0])
def path(self, environment, bounds, start_pose, goal_region, object_radius, steer_distance, num_iterations, resolution, runForFullIterations, RRT_Flavour):
"""Returns a path from the start_pose to the goal region in the current environment using the specified RRT-variant algorithm.
Args:
environment (A yaml environment): Environment where the planner will be run. Includes obstacles.
bounds( (int int int int) ): min x, min y, max x, and max y coordinates of the bounds of the world.
start_pose( (float float) ): Starting x and y coordinates of the object in question.
goal_region (Polygon): A polygon representing the region that we want our object to go to.
object_radius (float): Radius of the object.
steer_distance (float): Limits the length of the branches
num_iterations (int): How many points are sampled for the creationg of the tree
resolution (int): Number of segments used to approximate a quarter circle around a point.
runForFullIterations (bool): If True RRT and RRTStar return the first path found without having to sample all num_iterations points.
RRT_Flavour (str): A string representing what type of algorithm to use.
Options are 'RRT', 'RRT*', and 'InformedRRT*'. Anything else returns None,None,None.
Returns:
path (list<(int,int)>): A list of tuples/coordinates representing the nodes in a path from start to the goal region
self.V (set<(int,int)>): A set of Vertices (coordinates) of nodes in the tree
self.E (set<(int,int),(int,int)>): A set of Edges connecting one node to another node in the tree
"""
self.env = environment
self.initialise(environment, bounds, start_pose, goal_region, object_radius, steer_distance, num_iterations, resolution, runForFullIterations)
# Define start and goal in terms of coordinates. The goal is the centroid of the goal polygon.
x0, y0 = start_pose
x1, y1 = goal_region.centroid.coords[0]
start = (x0, y0)
goal = (x1, y1)
# Handle edge case where where the start is already at the goal
if start == goal:
path = [start, goal]
self.V.union([start, goal])
self.E.union([(start, goal)])
# There might also be a straight path to goal, consider this case before invoking algorithm
elif self.isEdgeCollisionFree(start, goal):
path = [start, goal]
self.V.union([start, goal])
self.E.union([(start, goal)])
# Run the appropriate RRT algorithm according to RRT_Flavour
else:
if RRT_Flavour == "RRT":
path, self.V, self.E = self.RRTSearch()
elif RRT_Flavour == "RRT*":
path, self.V, self.E = self.RRTStarSearch()
elif RRT_Flavour == "InformedRRT*":
path, self.V, self.E = self.InformedRRTStarSearch()
else:
# The RRT flavour has no defined algorithm, therefore return None for all values
return None, None, None
return path, self.V, self.E
def RRTSearch(self):
"""Returns path using RRT algorithm.
Builds a tree exploring from the start node until it reaches the goal region. It works by sampling random points in the map and connecting them with
the tree we build off on each iteration of the algorithm.
Returns:
path (list<(int,int)>): A list of tuples/coordinates representing the nodes in a path from start to the goal region
self.V (set<(int,int)>): A set of Vertices (coordinates) of nodes in the tree
self.E (set<(int,int),(int,int)>): A set of Edges connecting one node to another node in the tree
"""
# Initialize path and tree to be empty.
path = []
path_length = float('inf')
tree_size = 0
path_size = 0
self.V.add(self.start_pose)
goal_centroid = self.get_centroid(self.goal_region)
# Iteratively sample N random points in environment to build tree
for i in xrange(self.N):
if(random.random()>=1.95): # Change to a value under 1 to bias search towards goal, right now this line doesn't run
random_point = goal_centroid
else:
random_point = self.get_collision_free_random_point()
# The new point to be added to the tree is not the sampled point, but a colinear point with it and the nearest point in the tree.
# This keeps the branches short
nearest_point = self.find_nearest_point(random_point)
new_point = self.steer(nearest_point, random_point)
# If there is no obstacle between nearest point and sampled point, add the new point to the tree.
if self.isEdgeCollisionFree(nearest_point, new_point):
self.V.add(new_point)
self.E.add((nearest_point, new_point))
self.setParent(nearest_point, new_point)
# If new point of the tree is at the goal region, we can find a path in the tree from start node to goal.
if self.isAtGoalRegion(new_point):
if not self.runForFullIterations: # If not running for full iterations, terminate as soon as a path is found.
path, tree_size, path_size, path_length = self.find_path(self.start_pose, new_point)
break
else: # If running for full iterations, we return the shortest path found.
tmp_path, tmp_tree_size, tmp_path_size, tmp_path_length = self.find_path(self.start_pose, new_point)
if tmp_path_length < path_length:
path_length = tmp_path_length
path = tmp_path
tree_size = tmp_tree_size
path_size = tmp_path_size
# If no path is found, then path would be an empty list.
return path, self.V, self.E
def RRTStarSearch(self):
"""Returns path using RRTStar algorithm.
Uses the same structure as RRTSearch, except there's an additional 'rewire' call when adding nodes to the tree.
This can be seen as a way to optimise the branches of the subtree where the new node is being added.
Returns:
path (list<(int,int)>): A list of tuples/coordinates representing the nodes in a path from start to the goal region
self.V (set<(int,int)>): A set of Vertices (coordinates) of nodes in the tree
self.E (set<(int,int),(int,int)>): A set of Edges connecting one node to another node in the tree
"""
# Code is very similar to RRTSearch, so for simplicity's sake only the main differences have been commented.
path = []
path_length = float('inf')
tree_size = 0
path_size = 0
self.V.add(self.start_pose)
goal_centroid = self.get_centroid(self.goal_region)
for i in xrange(self.N):
if(random.random()>=1.95):
random_point = goal_centroid
else:
random_point = self.get_collision_free_random_point()
nearest_point = self.find_nearest_point(random_point)
new_point = self.steer(nearest_point, random_point)
if self.isEdgeCollisionFree(nearest_point, new_point):
# Find the nearest set of points around the new point
nearest_set = self.find_nearest_set(new_point)
min_point = self.find_min_point(nearest_set, nearest_point, new_point)
self.V.add(new_point)
self.E.add((min_point, new_point))
self.setParent(min_point, new_point)
# Main difference between RRT and RRT*, modify the points in the nearest set to optimise local path costs.
self.rewire(nearest_set, min_point, new_point)
if self.isAtGoalRegion(new_point):
if not self.runForFullIterations:
path, tree_size, path_size, path_length = self.find_path(self.start_pose, new_point)
break
else:
tmp_path, tmp_tree_size, tmp_path_size, tmp_path_length = self.find_path(self.start_pose, new_point)
if tmp_path_length < path_length:
path_length = tmp_path_length
path = tmp_path
tree_size = tmp_tree_size
path_size = tmp_path_size
return path, self.V, self.E
def InformedRRTStarSearch(self):
"""Returns path using informed RRTStar algorithm.
Uses the same structure as RRTStarSearch, except that once a path is found, sampling is restricted to an ellipse
containing the shortest path found.
Returns:
path (list<(int,int)>): A list of tuples/coordinates representing the nodes in a path from start to the goal region
self.V (set<(int,int)>): A set of Vertices (coordinates) of nodes in the tree
self.E (set<(int,int),(int,int)>): A set of Edges connecting one node to another node in the tree
"""
# Code is very similar to RRTStarSearch, so for simplicity's sake only the main differences have been commented.
path = []
path_length = float('inf')
c_best = float('inf') # Max length we expect to find in our 'informed' sample space starts as infinite
tree_size = 0
path_size = 0
self.V.add(self.start_pose)
goal_centroid = self.get_centroid(self.goal_region)
solution_set = set()
start_obj = Point(self.start_pose).buffer(self.obj_radius, self.resolution)
# The following equations define the space of the environment that is being sampled.
c_min = start_obj.distance(self.goal_region)
x_center = np.matrix([[(self.start_pose[0] + self.goal_pose[0]) / 2.0],[(self.start_pose[1] + self.goal_pose[1]) / 2.0], [0]])
a_1 = np.matrix([[(self.goal_pose[0] - self.start_pose[0]) / c_min],[(self.goal_pose[1] - self.start_pose[1]) / c_min], [0]])
id1_t = np.matrix([1.0,0,0])
M = np.dot(a_1, id1_t)
U,S,Vh = np.linalg.svd(M, 1, 1)
C = np.dot(np.dot(U, np.diag([1.0,1.0, np.linalg.det(U) * np.linalg.det(np.transpose(Vh))])), Vh)
for i in xrange(self.N):
# The main difference in this algorithm is that we limit our sample space.
# Sample space is defined by c_best (our best path and the maximum path length inside the new space)
# c_min (the distance between start and goal), x_center (midpoint between start and goal) and C
# only c_best changes whenever a new path is found.
random_point = self.sample(c_best, c_min, x_center, C)
nearest_point = self.find_nearest_point(random_point)
new_point = self.steer(nearest_point, random_point)
if self.isEdgeCollisionFree(nearest_point, new_point):
nearest_set = self.find_nearest_set(new_point)
min_point = self.find_min_point(nearest_set, nearest_point, new_point)
self.V.add(new_point)
self.E.add((min_point, new_point))
self.setParent(min_point, new_point)
self.rewire(nearest_set, min_point, new_point)
if self.isAtGoalRegion(new_point):
solution_set.add(new_point)
tmp_path, tmp_tree_size, tmp_path_size, tmp_path_length = self.find_path(self.start_pose, new_point)
if tmp_path_length < path_length:
path_length = tmp_path_length
path = tmp_path
tree_size = tmp_tree_size
path_size = tmp_path_size
c_best = tmp_path_length # c_best is calculated everytime a path is found. Affecting the sample space.
return path, self.V, self.E
"""
******************************************************************************************************************************************
***************************************************** Helper Functions *******************************************************************
******************************************************************************************************************************************
"""
def sample(self, c_max, c_min, x_center, C):
if c_max < float('inf'):
r= [c_max /2.0, math.sqrt(c_max**2 - c_min**2)/2.0, math.sqrt(c_max**2 - c_min**2)/2.0]
L = np.diag(r)
x_ball = self.sample_unit_ball()
random_point = np.dot(np.dot(C,L), x_ball) + x_center
random_point = (random_point[(0,0)], random_point[(1,0)])
else:
random_point = self.get_collision_free_random_point()
return random_point
def sample_unit_ball(self):
a = random.random()
b = random.random()
if b < a:
tmp = b
b = a
a = tmp
sample = (b*math.cos(2*math.pi*a/b), b*math.sin(2*math.pi*a/b))
return np.array([[sample[0]], [sample[1]], [0]])
def find_nearest_set(self, new_point):
points = set()
ball_radius = self.find_ball_radius()
for vertex in self.V:
euc_dist = self.euclidian_dist(new_point, vertex)
if euc_dist < ball_radius:
points.add(vertex)
return points
def find_ball_radius(self):
unit_ball_volume = math.pi
n = len(self.V)
dimensions = 2.0
gamma = (2**dimensions)*(1.0 + 1.0/dimensions) * (self.maxx - self.minx) * (self.maxy - self.miny)
ball_radius = min(((gamma/unit_ball_volume) * math.log(n) / n)**(1.0/dimensions), self.steer_distance)
return ball_radius
def find_min_point(self, nearest_set, nearest_point, new_point):
min_point = nearest_point
min_cost = self.cost(nearest_point) + self.linecost(nearest_point, new_point)
for vertex in nearest_set:
if self.isEdgeCollisionFree(vertex, new_point):
temp_cost = self.cost(vertex) + self.linecost(vertex, new_point)
if temp_cost < min_cost:
min_point = vertex
min_cost = temp_cost
return min_point
def rewire(self, nearest_set, min_point, new_point):
# Discards edges in the nearest_set that lead to a longer path than going through the new_point first
# Then add an edge from new_point to the vertex in question and update its parent accordingly.
for vertex in nearest_set - set([min_point]):
if self.isEdgeCollisionFree(vertex, new_point):
if self.cost(vertex) > self.cost(new_point) + self.linecost(vertex, new_point):
parent_point = self.getParent(vertex)
self.E.discard((parent_point, vertex))
self.E.discard((vertex, parent_point))
self.E.add((new_point, vertex))
self.setParent(new_point, vertex)
def cost(self, vertex):
path, tree_size, path_size, path_length = self.find_path(self.start_pose, vertex)
return path_length
def linecost(self, point1, point2):
return self.euclidian_dist(point1, point2)
def getParent(self, vertex):
return self.child_to_parent_dict[vertex]
def setParent(self, parent, child):
self.child_to_parent_dict[child] = parent
def get_random_point(self):
x = self.minx + random.random() * (self.maxx - self.minx)
y = self.miny + random.random() * (self.maxy - self.miny)
return (x, y)
def get_collision_free_random_point(self):
# Run until a valid point is found
while True:
point = self.get_random_point()
# Pick a point, if no obstacle overlaps with a circle centered at point with some obj_radius then return said point.
buffered_point = Point(point).buffer(self.obj_radius, self.resolution)
if self.isPointCollisionFree(buffered_point):
return point
def isPointCollisionFree(self, point):
for obstacle in self.obstacles:
if obstacle.contains(point):
return False
return True
def find_nearest_point(self, random_point):
closest_point = None
min_dist = float('inf')
for vertex in self.V:
euc_dist = self.euclidian_dist(random_point, vertex)
if euc_dist < min_dist:
min_dist = euc_dist
closest_point = vertex
return closest_point
def isOutOfBounds(self, point):
if((point[0] - self.obj_radius) < self.minx):
return True
if((point[1] - self.obj_radius) < self.miny):
return True
if((point[0] + self.obj_radius) > self.maxx):
return True
if((point[1] + self.obj_radius) > self.maxy):
return True
return False
def isEdgeCollisionFree(self, point1, point2):
if self.isOutOfBounds(point2):
return False
line = LineString([point1, point2])
expanded_line = line.buffer(self.obj_radius, self.resolution)
for obstacle in self.obstacles:
if expanded_line.intersects(obstacle):
return False
return True
def steer(self, from_point, to_point):
fromPoint_buffered = Point(from_point).buffer(self.obj_radius, self.resolution)
toPoint_buffered = Point(to_point).buffer(self.obj_radius, self.resolution)
if fromPoint_buffered.distance(toPoint_buffered) < self.steer_distance:
return to_point
else:
from_x, from_y = from_point
to_x, to_y = to_point
theta = math.atan2(to_y - from_y, to_x- from_x)
new_point = (from_x + self.steer_distance * math.cos(theta), from_y + self.steer_distance * math.sin(theta))
return new_point
def isAtGoalRegion(self, point):
buffered_point = Point(point).buffer(self.obj_radius, self.resolution)
intersection = buffered_point.intersection(self.goal_region)
inGoal = intersection.area / buffered_point.area
return inGoal >= 0.5
def euclidian_dist(self, point1, point2):
return math.sqrt((point2[0] - point1[0])**2 + (point2[1] - point1[1])**2)
def find_path(self, start_point, end_point):
# Returns a path by backtracking through the tree formed by one of the RRT algorithms starting at the end_point until reaching start_node.
path = [end_point]
tree_size, path_size, path_length = len(self.V), 1, 0
current_node = end_point
previous_node = None
target_node = start_point
while current_node != target_node:
parent = self.getParent(current_node)
path.append(parent)
previous_node = current_node
current_node = parent
path_length += self.euclidian_dist(current_node, previous_node)
path_size += 1
path.reverse()
return path, tree_size, path_size, path_length
def get_centroid(self, region):
centroid = region.centroid.wkt
filtered_vals = centroid[centroid.find("(")+1:centroid.find(")")]
filtered_x = filtered_vals[0:filtered_vals.find(" ")]
filtered_y = filtered_vals[filtered_vals.find(" ") + 1: -1]
(x,y) = (float(filtered_x), float(filtered_y))
return (x,y)
|
|
# Test the runpy module
import unittest
import os
import os.path
import sys
import re
import tempfile
from test.test_support import verbose, run_unittest, forget, check_impl_detail
from test.script_helper import (temp_dir, make_script, compile_script,
make_pkg, make_zip_script, make_zip_pkg)
if check_impl_detail(pypy=True):
no_lone_pyc_file = True
else:
no_lone_pyc_file = False
from runpy import _run_code, _run_module_code, run_module, run_path
# Note: This module can't safely test _run_module_as_main as it
# runs its tests in the current process, which would mess with the
# real __main__ module (usually test.regrtest)
# See test_cmd_line_script for a test that executes that code path
# Set up the test code and expected results
class RunModuleCodeTest(unittest.TestCase):
"""Unit tests for runpy._run_code and runpy._run_module_code"""
expected_result = ["Top level assignment", "Lower level reference"]
test_source = (
"# Check basic code execution\n"
"result = ['Top level assignment']\n"
"def f():\n"
" result.append('Lower level reference')\n"
"f()\n"
"# Check the sys module\n"
"import sys\n"
"run_argv0 = sys.argv[0]\n"
"run_name_in_sys_modules = __name__ in sys.modules\n"
"if run_name_in_sys_modules:\n"
" module_in_sys_modules = globals() is sys.modules[__name__].__dict__\n"
"# Check nested operation\n"
"import runpy\n"
"nested = runpy._run_module_code('x=1\\n', mod_name='<run>')\n"
)
def test_run_code(self):
saved_argv0 = sys.argv[0]
d = _run_code(self.test_source, {})
self.assertEqual(d["result"], self.expected_result)
self.assertIs(d["__name__"], None)
self.assertIs(d["__file__"], None)
self.assertIs(d["__loader__"], None)
self.assertIs(d["__package__"], None)
self.assertIs(d["run_argv0"], saved_argv0)
self.assertNotIn("run_name", d)
self.assertIs(sys.argv[0], saved_argv0)
def test_run_module_code(self):
initial = object()
name = "<Nonsense>"
file = "Some other nonsense"
loader = "Now you're just being silly"
package = '' # Treat as a top level module
d1 = dict(initial=initial)
saved_argv0 = sys.argv[0]
d2 = _run_module_code(self.test_source,
d1,
name,
file,
loader,
package)
self.assertNotIn("result", d1)
self.assertIs(d2["initial"], initial)
self.assertEqual(d2["result"], self.expected_result)
self.assertEqual(d2["nested"]["x"], 1)
self.assertIs(d2["__name__"], name)
self.assertTrue(d2["run_name_in_sys_modules"])
self.assertTrue(d2["module_in_sys_modules"])
self.assertIs(d2["__file__"], file)
self.assertIs(d2["run_argv0"], file)
self.assertIs(d2["__loader__"], loader)
self.assertIs(d2["__package__"], package)
self.assertIs(sys.argv[0], saved_argv0)
self.assertNotIn(name, sys.modules)
class RunModuleTest(unittest.TestCase):
"""Unit tests for runpy.run_module"""
def expect_import_error(self, mod_name):
try:
run_module(mod_name)
except ImportError:
pass
else:
self.fail("Expected import error for " + mod_name)
def test_invalid_names(self):
# Builtin module
self.expect_import_error("sys")
# Non-existent modules
self.expect_import_error("sys.imp.eric")
self.expect_import_error("os.path.half")
self.expect_import_error("a.bee")
self.expect_import_error(".howard")
self.expect_import_error("..eaten")
# Package without __main__.py
self.expect_import_error("multiprocessing")
def test_library_module(self):
run_module("runpy")
def _add_pkg_dir(self, pkg_dir):
os.mkdir(pkg_dir)
pkg_fname = os.path.join(pkg_dir, "__init__"+os.extsep+"py")
pkg_file = open(pkg_fname, "w")
pkg_file.close()
return pkg_fname
def _make_pkg(self, source, depth, mod_base="runpy_test"):
pkg_name = "__runpy_pkg__"
test_fname = mod_base+os.extsep+"py"
pkg_dir = sub_dir = tempfile.mkdtemp()
if verbose: print " Package tree in:", sub_dir
sys.path.insert(0, pkg_dir)
if verbose: print " Updated sys.path:", sys.path[0]
for i in range(depth):
sub_dir = os.path.join(sub_dir, pkg_name)
pkg_fname = self._add_pkg_dir(sub_dir)
if verbose: print " Next level in:", sub_dir
if verbose: print " Created:", pkg_fname
mod_fname = os.path.join(sub_dir, test_fname)
mod_file = open(mod_fname, "w")
mod_file.write(source)
mod_file.close()
if verbose: print " Created:", mod_fname
mod_name = (pkg_name+".")*depth + mod_base
return pkg_dir, mod_fname, mod_name
def _del_pkg(self, top, depth, mod_name):
for entry in list(sys.modules):
if entry.startswith("__runpy_pkg__"):
del sys.modules[entry]
if verbose: print " Removed sys.modules entries"
del sys.path[0]
if verbose: print " Removed sys.path entry"
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
try:
os.remove(os.path.join(root, name))
except OSError, ex:
if verbose: print ex # Persist with cleaning up
for name in dirs:
fullname = os.path.join(root, name)
try:
os.rmdir(fullname)
except OSError, ex:
if verbose: print ex # Persist with cleaning up
try:
os.rmdir(top)
if verbose: print " Removed package tree"
except OSError, ex:
if verbose: print ex # Persist with cleaning up
def _check_module(self, depth):
pkg_dir, mod_fname, mod_name = (
self._make_pkg("x=1\n", depth))
forget(mod_name)
try:
if verbose: print "Running from source:", mod_name
d1 = run_module(mod_name) # Read from source
self.assertIn("x", d1)
self.assertTrue(d1["x"] == 1)
del d1 # Ensure __loader__ entry doesn't keep file open
if not no_lone_pyc_file:
__import__(mod_name)
os.remove(mod_fname)
if not sys.dont_write_bytecode:
if verbose: print "Running from compiled:", mod_name
d2 = run_module(mod_name) # Read from bytecode
self.assertIn("x", d2)
self.assertTrue(d2["x"] == 1)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
def _check_package(self, depth):
pkg_dir, mod_fname, mod_name = (
self._make_pkg("x=1\n", depth, "__main__"))
pkg_name, _, _ = mod_name.rpartition(".")
forget(mod_name)
try:
if verbose: print "Running from source:", pkg_name
d1 = run_module(pkg_name) # Read from source
self.assertIn("x", d1)
self.assertTrue(d1["x"] == 1)
del d1 # Ensure __loader__ entry doesn't keep file open
if not no_lone_pyc_file:
__import__(mod_name)
os.remove(mod_fname)
if not sys.dont_write_bytecode:
if verbose: print "Running from compiled:", pkg_name
d2 = run_module(pkg_name) # Read from bytecode
self.assertIn("x", d2)
self.assertTrue(d2["x"] == 1)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, pkg_name)
if verbose: print "Package executed successfully"
def _add_relative_modules(self, base_dir, source, depth):
if depth <= 1:
raise ValueError("Relative module test needs depth > 1")
pkg_name = "__runpy_pkg__"
module_dir = base_dir
for i in range(depth):
parent_dir = module_dir
module_dir = os.path.join(module_dir, pkg_name)
# Add sibling module
sibling_fname = os.path.join(module_dir, "sibling"+os.extsep+"py")
sibling_file = open(sibling_fname, "w")
sibling_file.close()
if verbose: print " Added sibling module:", sibling_fname
# Add nephew module
uncle_dir = os.path.join(parent_dir, "uncle")
self._add_pkg_dir(uncle_dir)
if verbose: print " Added uncle package:", uncle_dir
cousin_dir = os.path.join(uncle_dir, "cousin")
self._add_pkg_dir(cousin_dir)
if verbose: print " Added cousin package:", cousin_dir
nephew_fname = os.path.join(cousin_dir, "nephew"+os.extsep+"py")
nephew_file = open(nephew_fname, "w")
nephew_file.close()
if verbose: print " Added nephew module:", nephew_fname
def _check_relative_imports(self, depth, run_name=None):
contents = r"""\
from __future__ import absolute_import
from . import sibling
from ..uncle.cousin import nephew
"""
pkg_dir, mod_fname, mod_name = (
self._make_pkg(contents, depth))
try:
self._add_relative_modules(pkg_dir, contents, depth)
pkg_name = mod_name.rpartition('.')[0]
if verbose: print "Running from source:", mod_name
d1 = run_module(mod_name, run_name=run_name) # Read from source
self.assertIn("__package__", d1)
self.assertTrue(d1["__package__"] == pkg_name)
self.assertIn("sibling", d1)
self.assertIn("nephew", d1)
del d1 # Ensure __loader__ entry doesn't keep file open
if not no_lone_pyc_file:
__import__(mod_name)
os.remove(mod_fname)
if not sys.dont_write_bytecode:
if verbose: print "Running from compiled:", mod_name
d2 = run_module(mod_name, run_name=run_name) # Read from bytecode
self.assertIn("__package__", d2)
self.assertTrue(d2["__package__"] == pkg_name)
self.assertIn("sibling", d2)
self.assertIn("nephew", d2)
del d2 # Ensure __loader__ entry doesn't keep file open
finally:
self._del_pkg(pkg_dir, depth, mod_name)
if verbose: print "Module executed successfully"
def test_run_module(self):
for depth in range(4):
if verbose: print "Testing package depth:", depth
self._check_module(depth)
def test_run_package(self):
for depth in range(1, 4):
if verbose: print "Testing package depth:", depth
self._check_package(depth)
def test_explicit_relative_import(self):
for depth in range(2, 5):
if verbose: print "Testing relative imports at depth:", depth
self._check_relative_imports(depth)
def test_main_relative_import(self):
for depth in range(2, 5):
if verbose: print "Testing main relative imports at depth:", depth
self._check_relative_imports(depth, "__main__")
class RunPathTest(unittest.TestCase):
"""Unit tests for runpy.run_path"""
# Based on corresponding tests in test_cmd_line_script
test_source = """\
# Script may be run with optimisation enabled, so don't rely on assert
# statements being executed
def assertEqual(lhs, rhs):
if lhs != rhs:
raise AssertionError('%r != %r' % (lhs, rhs))
def assertIs(lhs, rhs):
if lhs is not rhs:
raise AssertionError('%r is not %r' % (lhs, rhs))
# Check basic code execution
result = ['Top level assignment']
def f():
result.append('Lower level reference')
f()
assertEqual(result, ['Top level assignment', 'Lower level reference'])
# Check the sys module
import sys
assertIs(globals(), sys.modules[__name__].__dict__)
argv0 = sys.argv[0]
"""
def _make_test_script(self, script_dir, script_basename, source=None):
if source is None:
source = self.test_source
return make_script(script_dir, script_basename, source)
def _check_script(self, script_name, expected_name, expected_file,
expected_argv0, expected_package):
result = run_path(script_name)
self.assertEqual(result["__name__"], expected_name)
self.assertEqual(result["__file__"], expected_file)
self.assertIn("argv0", result)
self.assertEqual(result["argv0"], expected_argv0)
self.assertEqual(result["__package__"], expected_package)
def _check_import_error(self, script_name, msg):
msg = re.escape(msg)
self.assertRaisesRegexp(ImportError, msg, run_path, script_name)
def test_basic_script(self):
with temp_dir() as script_dir:
mod_name = 'script'
script_name = self._make_test_script(script_dir, mod_name)
self._check_script(script_name, "<run_path>", script_name,
script_name, None)
def test_script_compiled(self):
with temp_dir() as script_dir:
mod_name = 'script'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = compile_script(script_name)
os.remove(script_name)
self._check_script(compiled_name, "<run_path>", compiled_name,
compiled_name, None)
def test_directory(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
self._check_script(script_dir, "<run_path>", script_name,
script_dir, '')
def test_directory_compiled(self):
if no_lone_pyc_file:
return
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = compile_script(script_name)
os.remove(script_name)
self._check_script(script_dir, "<run_path>", compiled_name,
script_dir, '')
def test_directory_error(self):
with temp_dir() as script_dir:
mod_name = 'not_main'
script_name = self._make_test_script(script_dir, mod_name)
msg = "can't find '__main__' module in %r" % script_dir
self._check_import_error(script_dir, msg)
def test_zipfile(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
self._check_script(zip_name, "<run_path>", fname, zip_name, '')
def test_zipfile_compiled(self):
with temp_dir() as script_dir:
mod_name = '__main__'
script_name = self._make_test_script(script_dir, mod_name)
compiled_name = compile_script(script_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', compiled_name)
self._check_script(zip_name, "<run_path>", fname, zip_name, '')
def test_zipfile_error(self):
with temp_dir() as script_dir:
mod_name = 'not_main'
script_name = self._make_test_script(script_dir, mod_name)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
msg = "can't find '__main__' module in %r" % zip_name
self._check_import_error(zip_name, msg)
def test_main_recursion_error(self):
with temp_dir() as script_dir, temp_dir() as dummy_dir:
mod_name = '__main__'
source = ("import runpy\n"
"runpy.run_path(%r)\n") % dummy_dir
script_name = self._make_test_script(script_dir, mod_name, source)
zip_name, fname = make_zip_script(script_dir, 'test_zip', script_name)
msg = "recursion depth exceeded"
self.assertRaisesRegexp(RuntimeError, msg, run_path, zip_name)
def test_main():
run_unittest(RunModuleCodeTest, RunModuleTest, RunPathTest)
if __name__ == "__main__":
test_main()
|
|
"""
shared options and groups
The principle here is to define options once, but *not* instantiate them
globally. One reason being that options with action='append' can carry state
between parses. pip parse's general options twice internally, and shouldn't
pass on state. To be consistent, all options will follow this design.
"""
from __future__ import absolute_import
import copy
from optparse import OptionGroup, SUPPRESS_HELP, Option
from pip.locations import CA_BUNDLE_PATH, USER_CACHE_DIR, src_prefix
def make_option_group(group, parser):
"""
Return an OptionGroup object
group -- assumed to be dict with 'name' and 'options' keys
parser -- an optparse Parser
"""
option_group = OptionGroup(parser, group['name'])
for option in group['options']:
option_group.add_option(option.make())
return option_group
class OptionMaker(object):
"""Class that stores the args/kwargs that would be used to make an Option,
for making them later, and uses deepcopy's to reset state."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def make(self):
args_copy = copy.deepcopy(self.args)
kwargs_copy = copy.deepcopy(self.kwargs)
return Option(*args_copy, **kwargs_copy)
###########
# options #
###########
help_ = OptionMaker(
'-h', '--help',
dest='help',
action='help',
help='Show help.')
isolated_mode = OptionMaker(
"--isolated",
dest="isolated_mode",
action="store_true",
default=False,
help=(
"Run pip in an isolated mode, ignoring environment variables and user "
"configuration."
),
)
require_virtualenv = OptionMaker(
# Run only if inside a virtualenv, bail if not.
'--require-virtualenv', '--require-venv',
dest='require_venv',
action='store_true',
default=False,
help=SUPPRESS_HELP)
verbose = OptionMaker(
'-v', '--verbose',
dest='verbose',
action='count',
default=0,
help='Give more output. Option is additive, and can be used up to 3 times.'
)
version = OptionMaker(
'-V', '--version',
dest='version',
action='store_true',
help='Show version and exit.')
quiet = OptionMaker(
'-q', '--quiet',
dest='quiet',
action='count',
default=0,
help='Give less output.')
log = OptionMaker(
"--log", "--log-file", "--local-log",
dest="log",
metavar="path",
help="Path to a verbose appending log."
)
log_explicit_levels = OptionMaker(
# Writes the log levels explicitely to the log'
'--log-explicit-levels',
dest='log_explicit_levels',
action='store_true',
default=False,
help=SUPPRESS_HELP)
no_input = OptionMaker(
# Don't ask for input
'--no-input',
dest='no_input',
action='store_true',
default=False,
help=SUPPRESS_HELP)
proxy = OptionMaker(
'--proxy',
dest='proxy',
type='str',
default='',
help="Specify a proxy in the form [user:passwd@]proxy.server:port.")
retries = OptionMaker(
'--retries',
dest='retries',
type='int',
default=3,
help="Maximum number of retries each connection should attempt "
"(default %default times).")
timeout = OptionMaker(
'--timeout', '--default-timeout',
metavar='sec',
dest='timeout',
type='float',
default=15,
help='Set the socket timeout (default %default seconds).')
default_vcs = OptionMaker(
# The default version control system for editables, e.g. 'svn'
'--default-vcs',
dest='default_vcs',
type='str',
default='',
help=SUPPRESS_HELP)
skip_requirements_regex = OptionMaker(
# A regex to be used to skip requirements
'--skip-requirements-regex',
dest='skip_requirements_regex',
type='str',
default='',
help=SUPPRESS_HELP)
exists_action = OptionMaker(
# Option when path already exist
'--exists-action',
dest='exists_action',
type='choice',
choices=['s', 'i', 'w', 'b'],
default=[],
action='append',
metavar='action',
help="Default action when a path already exists: "
"(s)witch, (i)gnore, (w)ipe, (b)ackup.")
cert = OptionMaker(
'--cert',
dest='cert',
type='str',
default=CA_BUNDLE_PATH,
metavar='path',
help="Path to alternate CA bundle.")
client_cert = OptionMaker(
'--client-cert',
dest='client_cert',
type='str',
default=None,
metavar='path',
help="Path to SSL client certificate, a single file containing the "
"private key and the certificate in PEM format.")
no_check_certificate = OptionMaker(
"--no-check-certificate",
dest="no_check_certificate",
action="store_true",
default=False,
help="Don't validate SSL certificates.",
)
index_url = OptionMaker(
'-i', '--index-url', '--pypi-url',
dest='index_url',
metavar='URL',
default='https://pypi.python.org/simple/',
help='Base URL of Python Package Index (default %default).')
extra_index_url = OptionMaker(
'--extra-index-url',
dest='extra_index_urls',
metavar='URL',
action='append',
default=[],
help='Extra URLs of package indexes to use in addition to --index-url.')
no_index = OptionMaker(
'--no-index',
dest='no_index',
action='store_true',
default=False,
help='Ignore package index (only looking at --find-links URLs instead).')
find_links = OptionMaker(
'-f', '--find-links',
dest='find_links',
action='append',
default=[],
metavar='url',
help="If a url or path to an html file, then parse for links to archives. "
"If a local path or file:// url that's a directory, then look for "
"archives in the directory listing.")
# TODO: Remove after 6.0
use_mirrors = OptionMaker(
'-M', '--use-mirrors',
dest='use_mirrors',
action='store_true',
default=False,
help=SUPPRESS_HELP)
# TODO: Remove after 6.0
mirrors = OptionMaker(
'--mirrors',
dest='mirrors',
metavar='URL',
action='append',
default=[],
help=SUPPRESS_HELP)
allow_external = OptionMaker(
"--allow-external",
dest="allow_external",
action="append",
default=[],
metavar="PACKAGE",
help="Allow the installation of a package even if it is externally hosted",
)
allow_all_external = OptionMaker(
"--allow-all-external",
dest="allow_all_external",
action="store_true",
default=False,
help="Allow the installation of all packages that are externally hosted",
)
# Remove after 7.0
no_allow_external = OptionMaker(
"--no-allow-external",
dest="allow_all_external",
action="store_false",
default=False,
help=SUPPRESS_HELP,
)
# Remove --allow-insecure after 7.0
allow_unsafe = OptionMaker(
"--allow-unverified", "--allow-insecure",
dest="allow_unverified",
action="append",
default=[],
metavar="PACKAGE",
help="Allow the installation of a package even if it is hosted "
"in an insecure and unverifiable way",
)
# Remove after 7.0
no_allow_unsafe = OptionMaker(
"--no-allow-insecure",
dest="allow_all_insecure",
action="store_false",
default=False,
help=SUPPRESS_HELP
)
# Remove after 1.5
process_dependency_links = OptionMaker(
"--process-dependency-links",
dest="process_dependency_links",
action="store_true",
default=False,
help="Enable the processing of dependency links.",
)
requirements = OptionMaker(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Install from the given requirements file. '
'This option can be used multiple times.')
editable = OptionMaker(
'-e', '--editable',
dest='editables',
action='append',
default=[],
metavar='path/url',
help=('Install a project in editable mode (i.e. setuptools '
'"develop mode") from a local project path or a VCS url.'),
)
src = OptionMaker(
'--src', '--source', '--source-dir', '--source-directory',
dest='src_dir',
metavar='dir',
default=src_prefix,
help='Directory to check out editable projects into. '
'The default in a virtualenv is "<venv path>/src". '
'The default for global installs is "<current dir>/src".'
)
use_wheel = OptionMaker(
'--use-wheel',
dest='use_wheel',
action='store_true',
help=SUPPRESS_HELP,
)
no_use_wheel = OptionMaker(
'--no-use-wheel',
dest='use_wheel',
action='store_false',
default=True,
help=('Do not Find and prefer wheel archives when searching indexes and '
'find-links locations.'),
)
cache_dir = OptionMaker(
"--cache-dir",
dest="cache_dir",
default=USER_CACHE_DIR,
metavar="dir",
help="Store the cache data in <dir>."
)
no_cache = OptionMaker(
"--no-cache-dir",
dest="cache_dir",
action="store_false",
help="Disable the cache.",
)
download_cache = OptionMaker(
'--download-cache',
dest='download_cache',
default=None,
help=SUPPRESS_HELP)
no_deps = OptionMaker(
'--no-deps', '--no-dependencies',
dest='ignore_dependencies',
action='store_true',
default=False,
help="Don't install package dependencies.")
build_dir = OptionMaker(
'-b', '--build', '--build-dir', '--build-directory',
dest='build_dir',
metavar='dir',
help='Directory to unpack packages into and build in.'
)
install_options = OptionMaker(
'--install-option',
dest='install_options',
action='append',
metavar='options',
help="Extra arguments to be supplied to the setup.py install "
"command (use like --install-option=\"--install-scripts=/usr/local/"
"bin\"). Use multiple --install-option options to pass multiple "
"options to setup.py install. If you are using an option with a "
"directory path, be sure to use absolute path.")
global_options = OptionMaker(
'--global-option',
dest='global_options',
action='append',
metavar='options',
help="Extra global options to be supplied to the setup.py "
"call before the install command.")
no_clean = OptionMaker(
'--no-clean',
action='store_true',
default=False,
help="Don't clean up build directories.")
disable_pip_version_check = OptionMaker(
"--disable-pip-version-check",
dest="disable_pip_version_check",
action="store_true",
default=False,
help="Don't periodically check PyPI to determine whether a new version "
"of pip is available for download.")
##########
# groups #
##########
general_group = {
'name': 'General Options',
'options': [
help_,
isolated_mode,
require_virtualenv,
verbose,
version,
quiet,
log,
log_explicit_levels,
no_input,
proxy,
retries,
timeout,
default_vcs,
skip_requirements_regex,
exists_action,
cert,
client_cert,
no_check_certificate,
cache_dir,
no_cache,
disable_pip_version_check,
]
}
index_group = {
'name': 'Package Index Options',
'options': [
index_url,
extra_index_url,
no_index,
find_links,
use_mirrors,
mirrors,
allow_external,
allow_all_external,
no_allow_external,
allow_unsafe,
no_allow_unsafe,
process_dependency_links,
]
}
|
|
# Copyright 2016 Hewlett Packard Enterprise Development Company LP.
# Copyright 2017 IBM Corp
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from cinderclient import exceptions as cinder_exceptions
from cinderclient.v3 import client as client
from oslo_log import log
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import keystone
from ironic.conf import CONF
LOG = log.getLogger(__name__)
AVAILABLE = 'available'
IN_USE = 'in-use'
_CINDER_SESSION = None
def _get_cinder_session():
global _CINDER_SESSION
if not _CINDER_SESSION:
_CINDER_SESSION = keystone.get_session('cinder')
return _CINDER_SESSION
def get_client():
"""Get a cinder client connection.
:returns: A cinder client.
"""
params = {
'connect_retries': CONF.cinder.retries
}
# TODO(jtaryma): Add support for noauth
# NOTE(TheJulia): If a URL is provided for cinder, we will pass
# along the URL to python-cinderclient. Otherwise the library
# handles keystone url autodetection.
if CONF.cinder.url:
params['endpoint_override'] = CONF.cinder.url
if CONF.keystone.region_name:
params['region_name'] = CONF.keystone.region_name
params['session'] = _get_cinder_session()
return client.Client(**params)
def is_volume_available(volume):
"""Check if a volume is available for a connection.
:param volume: The object representing the volume.
:returns: Boolean if volume is available.
"""
return (volume.status == AVAILABLE or
(volume.status == IN_USE and
volume.multiattach))
def is_volume_attached(node, volume):
"""Check if a volume is attached to the supplied node.
:param node: The object representing the node.
:param volume: The object representing the volume from cinder.
:returns: Boolean indicating if the volume is attached. Returns True if
cinder shows the volume as presently attached, otherwise
returns False.
"""
attachments = volume.attachments
if attachments is not None:
for attachment in attachments:
if attachment.get('server_id') in (node.instance_uuid, node.uuid):
return True
return False
def _get_attachment_id(node, volume):
"""Return the attachment ID for a node to a volume.
:param node: The object representing the node.
:param volume: The object representing the volume from cinder.
:returns: The UUID of the attachment in cinder, if present. Otherwise
returns None.
"""
# NOTE(TheJulia): This is under the belief that there is a single
# attachment for each node that represents all possible attachment
# information as multiple types can be submitted in a single request.
attachments = volume.attachments
if attachments is not None:
for attachment in attachments:
if attachment.get('server_id') in (node.instance_uuid, node.uuid):
return attachment.get('attachment_id')
def _create_metadata_dictionary(node, action):
"""Create a volume metadata dictionary utilizing the node UUID.
:param node: Object representing a node.
:param action: String value representing the last action.
:returns: Metadata dictionary for volume.
"""
label = "ironic_node_%s" % node.uuid
return {
label: {
'instance_uuid': node.instance_uuid,
'last_seen': datetime.datetime.utcnow().isoformat(),
'last_action': action}}
def _init_client_for_operations(task):
"""Obtain cinder client and return it for use.
:param task: TaskManager instance representing the operation.
:returns: A cinder client.
:raises: StorageError If an exception is encountered creating the client.
"""
node = task.node
try:
return get_client()
except Exception as e:
msg = (_('Failed to initialize cinder client for node %(uuid)s: %('
'err)s') % {'uuid': node.uuid, 'err': e})
LOG.error(msg)
raise exception.StorageError(msg)
def attach_volumes(task, volume_list, connector):
"""Attach volumes to a node.
Enumerate through the provided list of volumes and attach the volumes
to the node defined in the task utilizing the provided connector
information.
If an attachment appears to already exist, we will skip attempting to
attach the volume. If use of the volume fails, a user may need to
remove any lingering pre-existing/unused attachment records since
we have no way to validate if the connector profile data differs
from what was provided to cinder.
:param task: TaskManager instance representing the operation.
:param volume_list: List of volume_id UUID values representing volumes.
:param connector: Dictionary object representing the node sufficiently
to attach a volume. This value can vary based upon
the node's configuration, capability, and ultimately
the back-end storage driver. As cinder was designed
around iSCSI, the 'ip' and 'initiator' keys are
generally expected by cinder drivers.
For FiberChannel, the key 'wwpns' can be used
with a list of port addresses.
Some drivers support a 'multipath' boolean key,
although it is generally False. The 'host' key
is generally used for logging by drivers.
Example:
{
'wwpns': ['list','of','port','wwns'],
'ip': 'ip address',
'initiator': 'initiator iqn',
'multipath': False,
'host': 'hostname',
}
:raises: StorageError If storage subsystem exception is raised.
:raises: TypeError If the supplied volume_list is not a list.
:returns: List of connected volumes, including volumes that were
already connected to desired nodes. The returned list
can be relatively consistent depending on the end storage
driver that the volume is configured for, however
the 'driver_volume_type' key should not be relied upon
as it is a free-form value returned by the driver.
The accompanying 'data' key contains the actual target
details which will indicate either target WWNs and a LUN
or a target portal and IQN. It also always contains
volume ID in cinder and ironic. Except for these two IDs,
each driver may return somewhat different data although
the same keys are used if the target is FC or iSCSI,
so any logic should be based upon the returned contents.
For already attached volumes, the structure contains
'already_attached': True key-value pair. In such case,
connection info for the node is already in the database,
'data' structure contains only basic info of volume ID in
cinder and ironic, so any logic based on that should
retrieve it from the database.
Example:
[{
'driver_volume_type': 'fibre_channel'
'data': {
'encrypted': False,
'target_lun': 1,
'target_wwn': ['1234567890123', '1234567890124'],
'volume_id': '00000000-0000-0000-0000-000000000001',
'ironic_volume_id':
'11111111-0000-0000-0000-000000000001'}
},
{
'driver_volume_type': 'iscsi'
'data': {
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000002',
'target_portal': '127.0.0.0.1:3260',
'volume_id': '00000000-0000-0000-0000-000000000002',
'ironic_volume_id':
'11111111-0000-0000-0000-000000000002',
'target_lun': 2}
},
{
'already_attached': True
'data': {
'volume_id': '00000000-0000-0000-0000-000000000002',
'ironic_volume_id':
'11111111-0000-0000-0000-000000000002'}
}]
"""
node = task.node
client = _init_client_for_operations(task)
connected = []
for volume_id in volume_list:
try:
volume = client.volumes.get(volume_id)
except cinder_exceptions.ClientException as e:
msg = (_('Failed to get volume %(vol_id)s from cinder for node '
'%(uuid)s: %(err)s') %
{'vol_id': volume_id, 'uuid': node.uuid, 'err': e})
LOG.error(msg)
raise exception.StorageError(msg)
if is_volume_attached(node, volume):
LOG.debug('Volume %(vol_id)s is already attached to node '
'%(uuid)s. Skipping attachment.',
{'vol_id': volume_id, 'uuid': node.uuid})
# NOTE(jtaryma): Actual connection info of already connected
# volume will be provided by nova. Adding this dictionary to
# 'connected' list so it contains also already connected volumes.
connection = {'data': {'ironic_volume_uuid': volume.uuid,
'volume_id': volume_id},
'already_attached': True}
connected.append(connection)
continue
try:
client.volumes.reserve(volume_id)
except cinder_exceptions.ClientException as e:
msg = (_('Failed to reserve volume %(vol_id)s for node %(node)s: '
'%(err)s)') %
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
LOG.error(msg)
raise exception.StorageError(msg)
try:
# Provide connector information to cinder
connection = client.volumes.initialize_connection(volume_id,
connector)
if 'volume_id' not in connection['data']:
connection['data']['volume_id'] = volume_id
connection['data']['ironic_volume_uuid'] = volume.uuid
connected.append(connection)
except cinder_exceptions.ClientException as e:
msg = (_('Failed to initialize connection for volume '
'%(vol_id)s to node %(node)s: %(err)s') %
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
LOG.error(msg)
raise exception.StorageError(msg)
LOG.info('Successfully initialized volume %(vol_id)s for '
'node %(node)s.', {'vol_id': volume_id, 'node': node.uuid})
instance_uuid = node.instance_uuid or node.uuid
try:
# NOTE(TheJulia): The final step of the cinder volume
# attachment process involves updating the volume
# database record to indicate that the attachment has
# been completed, which moves the volume to the
# 'attached' state. This action also sets a mountpoint
# for the volume, if known. In our use case, there is
# no way for us to know what the mountpoint is inside of
# the operating system, thus we send None.
client.volumes.attach(volume_id, instance_uuid, None)
except cinder_exceptions.ClientException as e:
msg = (_('Failed to inform cinder that the attachment for volume '
'%(vol_id)s for node %(node)s has been completed: '
'%(err)s') %
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
LOG.error(msg)
raise exception.StorageError(msg)
try:
# Set metadata to assist a user in volume identification
client.volumes.set_metadata(
volume_id,
_create_metadata_dictionary(node, 'attached'))
except cinder_exceptions.ClientException as e:
LOG.warning('Failed to update volume metadata for volume '
'%(vol_id)s for node %(node)s: %(err)s',
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
return connected
def detach_volumes(task, volume_list, connector, allow_errors=False):
"""Detach a list of volumes from a provided connector detail.
Enumerates through a provided list of volumes and issues
detachment requests utilizing the connector information
that describes the node.
:param task: The TaskManager task representing the request.
:param volume_list: The list of volume id values to detach.
:param connector: Dictionary object representing the node sufficiently
to attach a volume. This value can vary based upon
the node's configuration, capability, and ultimately
the back-end storage driver. As cinder was designed
around iSCSI, the 'ip' and 'initiator' keys are
generally expected. For FiberChannel, the key
'wwpns' can be used with a list of port addresses.
Some drivers support a 'multipath' boolean key,
although it is generally False. The 'host' key
is generally used for logging by drivers.
Example:
{
'wwpns': ['list','of','port','wwns']
'ip': 'ip address',
'initiator': 'initiator iqn',
'multipath': False,
'host': 'hostname'
}
:param allow_errors: Boolean value governing if errors that are returned
are treated as warnings instead of exceptions.
Default False.
:raises: TypeError If the supplied volume_list is not a sequence.
:raises: StorageError
"""
def _handle_errors(msg):
if allow_errors:
LOG.warning(msg)
else:
LOG.error(msg)
raise exception.StorageError(msg)
client = _init_client_for_operations(task)
node = task.node
for volume_id in volume_list:
try:
volume = client.volumes.get(volume_id)
except cinder_exceptions.ClientException as e:
_handle_errors(_('Failed to get volume %(vol_id)s from cinder for '
'node %(node)s: %(err)s') %
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
# If we do not raise an exception, we should move on to
# the next volume since the volume could have been deleted
# before we're attempting to power off the node.
continue
if not is_volume_attached(node, volume):
LOG.debug('Volume %(vol_id)s is not attached to node '
'%(uuid)s: Skipping detachment.',
{'vol_id': volume_id, 'uuid': node.uuid})
continue
try:
client.volumes.begin_detaching(volume_id)
except cinder_exceptions.ClientException as e:
_handle_errors(_('Failed to request detach for volume %(vol_id)s '
'from cinder for node %(node)s: %(err)s') %
{'vol_id': volume_id, 'node': node.uuid, 'err': e}
)
# NOTE(jtaryma): This operation only updates the volume status, so
# we can proceed the process of actual detachment if allow_errors
# is set to True.
try:
# Remove the attachment
client.volumes.terminate_connection(volume_id, connector)
except cinder_exceptions.ClientException as e:
_handle_errors(_('Failed to detach volume %(vol_id)s from node '
'%(node)s: %(err)s') %
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
# Skip proceeding with this method if we're not raising
# errors. This will leave the volume in the detaching
# state, but in that case something very unexpected
# has occured.
continue
# Attempt to identify the attachment id value to provide
# accessible relationship data to leave in the cinder API
# to enable reconciliation.
attachment_id = _get_attachment_id(node, volume)
try:
# Update the API attachment record
client.volumes.detach(volume_id, attachment_id)
except cinder_exceptions.ClientException as e:
_handle_errors(_('Failed to inform cinder that the detachment for '
'volume %(vol_id)s from node %(node)s has been '
'completed: %(err)s') %
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
# NOTE(jtaryma): This operation mainly updates the volume status,
# so we can proceed the process of volume updating if allow_errors
# is set to True.
try:
# Set metadata to assist in volume identification.
client.volumes.set_metadata(
volume_id,
_create_metadata_dictionary(node, 'detached'))
except cinder_exceptions.ClientException as e:
LOG.warning('Failed to update volume %(vol_id)s metadata for node '
'%(node)s: %(err)s',
{'vol_id': volume_id, 'node': node.uuid, 'err': e})
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.linalg.linalg_impl.tridiagonal_solve."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.linalg import linalg_impl
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
_sample_diags = np.array([[2, 1, 4, 0], [1, 3, 2, 2], [0, 1, -1, 1]])
_sample_rhs = np.array([1, 2, 3, 4])
_sample_result = np.array([-9, 5, -4, 4])
# Flag, indicating that test should be run only with partial_pivoting=True
FLAG_REQUIRES_PIVOTING = "FLAG_REQUIRES_PIVOT"
# Flag, indicating that test shouldn't be parameterized by different values of
# partial_pivoting, etc.
FLAG_NO_PARAMETERIZATION = "FLAG_NO_PARAMETERIZATION"
def flags(*args):
def decorator(f):
for flag in args:
setattr(f, flag, True)
return f
return decorator
def _tfconst(array):
return constant_op.constant(array, dtypes.float64)
def _tf_ones(shape):
return array_ops.ones(shape, dtype=dtypes.float64)
class TridiagonalSolveOpTest(test.TestCase):
def _test(self,
diags,
rhs,
expected,
diags_format="compact",
transpose_rhs=False,
conjugate_rhs=False):
with self.cached_session():
pivoting = True
if hasattr(self, "pivoting"):
pivoting = self.pivoting
if test_util.is_xla_enabled() and pivoting:
# Pivoting is not supported by xla backends.
return
result = linalg_impl.tridiagonal_solve(
diags,
rhs,
diags_format,
transpose_rhs,
conjugate_rhs,
partial_pivoting=pivoting)
self.assertAllClose(self.evaluate(result), expected)
def _testWithLists(self,
diags,
rhs,
expected,
diags_format="compact",
transpose_rhs=False,
conjugate_rhs=False):
self._test(
_tfconst(diags), _tfconst(rhs), _tfconst(expected), diags_format,
transpose_rhs, conjugate_rhs)
def _assertRaises(self, diags, rhs, diags_format="compact"):
pivoting = True
if hasattr(self, "pivoting"):
pivoting = self.pivoting
if test_util.is_xla_enabled() and pivoting:
# Pivoting is not supported by xla backends.
return
with self.assertRaises(ValueError):
linalg_impl.tridiagonal_solve(
diags, rhs, diags_format, partial_pivoting=pivoting)
# Tests with various dtypes
def testReal(self):
for dtype in dtypes.float32, dtypes.float64:
self._test(
diags=constant_op.constant(_sample_diags, dtype),
rhs=constant_op.constant(_sample_rhs, dtype),
expected=constant_op.constant(_sample_result, dtype))
def testComplex(self):
for dtype in dtypes.complex64, dtypes.complex128:
self._test(
diags=constant_op.constant(_sample_diags, dtype) * (1 + 1j),
rhs=constant_op.constant(_sample_rhs, dtype) * (1 - 1j),
expected=constant_op.constant(_sample_result, dtype) * (1 - 1j) /
(1 + 1j))
# Tests with small matrix sizes
def test3x3(self):
self._testWithLists(
diags=[[2, -1, 0], [1, 3, 1], [0, -1, -2]],
rhs=[1, 2, 3],
expected=[-3, 2, 7])
def test2x2(self):
self._testWithLists(
diags=[[2, 0], [1, 3], [0, 1]], rhs=[1, 4], expected=[-5, 3])
def test2x2Complex(self):
for dtype in dtypes.complex64, dtypes.complex128:
self._test(
diags=constant_op.constant([[2j, 0j], [1j, 3j], [0j, 1j]], dtype),
rhs=constant_op.constant([1 - 1j, 4 - 4j], dtype),
expected=constant_op.constant([5 + 5j, -3 - 3j], dtype))
def test1x1(self):
self._testWithLists(diags=[[0], [3], [0]], rhs=[6], expected=[2])
def test0x0(self):
if test_util.is_xla_enabled():
# The following test crashes with XLA due to slicing 0 length tensors.
return
self._test(
diags=constant_op.constant(0, shape=(3, 0), dtype=dtypes.float32),
rhs=constant_op.constant(0, shape=(0, 1), dtype=dtypes.float32),
expected=constant_op.constant(0, shape=(0, 1), dtype=dtypes.float32))
def test2x2WithMultipleRhs(self):
self._testWithLists(
diags=[[2, 0], [1, 3], [0, 1]],
rhs=[[1, 2, 3], [4, 8, 12]],
expected=[[-5, -10, -15], [3, 6, 9]])
def test1x1WithMultipleRhs(self):
self._testWithLists(
diags=[[0], [3], [0]], rhs=[[6, 9, 12]], expected=[[2, 3, 4]])
def test1x1NotInvertible(self):
if test_util.is_xla_enabled():
# XLA implementation does not check invertibility.
return
with self.assertRaises(errors_impl.InvalidArgumentError):
self._testWithLists(diags=[[0], [0], [0]], rhs=[[6, 9, 12]], expected=[])
def test2x2NotInvertible(self):
if test_util.is_xla_enabled():
# XLA implementation does not check invertibility.
return
with self.assertRaises(errors_impl.InvalidArgumentError):
self._testWithLists(
diags=[[3, 0], [1, 3], [0, 1]], rhs=[1, 4], expected=[])
# Other edge cases
@flags(FLAG_REQUIRES_PIVOTING)
def testCaseRequiringPivoting(self):
# Without partial pivoting (e.g. Thomas algorithm) this would fail.
self._testWithLists(
diags=[[2, -1, 1, 0], [1, 4, 1, -1], [0, 2, -2, 3]],
rhs=[1, 2, 3, 4],
expected=[8, -3.5, 0, -4])
@flags(FLAG_REQUIRES_PIVOTING)
def testCaseRequiringPivotingLastRows(self):
self._testWithLists(
diags=[[2, 1, -1, 0], [1, -1, 2, 1], [0, 1, -6, 1]],
rhs=[1, 2, -1, -2],
expected=[5, -2, -5, 3])
def testNotInvertible(self):
if test.is_gpu_available(cuda_only=True) or test_util.is_xla_enabled():
# CuSparse gtsv routines don't raise errors for non-invertible
# matrices.
return
with self.assertRaises(errors_impl.InvalidArgumentError):
self._testWithLists(
diags=[[2, -1, 1, 0], [1, 4, 1, -1], [0, 2, 0, 3]],
rhs=[1, 2, 3, 4],
expected=[8, -3.5, 0, -4])
def testDiagonal(self):
self._testWithLists(
diags=[[0, 0, 0, 0], [1, 2, -1, -2], [0, 0, 0, 0]],
rhs=[1, 2, 3, 4],
expected=[1, 1, -3, -2])
def testUpperTriangular(self):
self._testWithLists(
diags=[[2, 4, -1, 0], [1, 3, 1, 2], [0, 0, 0, 0]],
rhs=[1, 6, 4, 4],
expected=[13, -6, 6, 2])
def testLowerTriangular(self):
self._testWithLists(
diags=[[0, 0, 0, 0], [2, -1, 3, 1], [0, 1, 4, 2]],
rhs=[4, 5, 6, 1],
expected=[2, -3, 6, -11])
# Multiple right-hand sides and batching
def testWithTwoRightHandSides(self):
self._testWithLists(
diags=_sample_diags,
rhs=np.transpose([_sample_rhs, 2 * _sample_rhs]),
expected=np.transpose([_sample_result, 2 * _sample_result]))
def testBatching(self):
self._testWithLists(
diags=np.array([_sample_diags, -_sample_diags]),
rhs=np.array([_sample_rhs, 2 * _sample_rhs]),
expected=np.array([_sample_result, -2 * _sample_result]))
def testWithTwoBatchingDimensions(self):
self._testWithLists(
diags=np.array([[_sample_diags, -_sample_diags, _sample_diags],
[-_sample_diags, _sample_diags, -_sample_diags]]),
rhs=np.array([[_sample_rhs, 2 * _sample_rhs, 3 * _sample_rhs],
[4 * _sample_rhs, 5 * _sample_rhs, 6 * _sample_rhs]]),
expected=np.array(
[[_sample_result, -2 * _sample_result, 3 * _sample_result],
[-4 * _sample_result, 5 * _sample_result, -6 * _sample_result]]))
def testBatchingAndTwoRightHandSides(self):
rhs = np.transpose([_sample_rhs, 2 * _sample_rhs])
expected_result = np.transpose([_sample_result, 2 * _sample_result])
self._testWithLists(
diags=np.array([_sample_diags, -_sample_diags]),
rhs=np.array([rhs, 2 * rhs]),
expected=np.array([expected_result, -2 * expected_result]))
# Various input formats
def testSequenceFormat(self):
self._test(
diags=(_tfconst([2, 1, 4]), _tfconst([1, 3, 2, 2]), _tfconst([1, -1,
1])),
rhs=_tfconst([1, 2, 3, 4]),
expected=_tfconst([-9, 5, -4, 4]),
diags_format="sequence")
def testSequenceFormatWithDummyElements(self):
dummy = 20
self._test(
diags=(_tfconst([2, 1, 4,
dummy]), _tfconst([1, 3, 2,
2]), _tfconst([dummy, 1, -1, 1])),
rhs=_tfconst([1, 2, 3, 4]),
expected=_tfconst([-9, 5, -4, 4]),
diags_format="sequence")
def testSequenceFormatWithBatching(self):
self._test(
diags=(_tfconst([[2, 1, 4], [-2, -1, -4]]),
_tfconst([[1, 3, 2, 2],
[-1, -3, -2, -2]]), _tfconst([[1, -1, 1], [-1, 1,
-1]])),
rhs=_tfconst([[1, 2, 3, 4], [1, 2, 3, 4]]),
expected=_tfconst([[-9, 5, -4, 4], [9, -5, 4, -4]]),
diags_format="sequence")
def testMatrixFormat(self):
self._testWithLists(
diags=[[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4], [0, 0, 1, 2]],
rhs=[1, 2, 3, 4],
expected=[-9, 5, -4, 4],
diags_format="matrix")
def testMatrixFormatWithMultipleRightHandSides(self):
self._testWithLists(
diags=[[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4], [0, 0, 1, 2]],
rhs=[[1, -1], [2, -2], [3, -3], [4, -4]],
expected=[[-9, 9], [5, -5], [-4, 4], [4, -4]],
diags_format="matrix")
def testMatrixFormatWithBatching(self):
self._testWithLists(
diags=[[[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4], [0, 0, 1, 2]],
[[-1, -2, 0, 0], [-1, -3, -1, 0], [0, 1, -2, -4], [0, 0, -1,
-2]]],
rhs=[[1, 2, 3, 4], [1, 2, 3, 4]],
expected=[[-9, 5, -4, 4], [9, -5, 4, -4]],
diags_format="matrix")
def testRightHandSideAsColumn(self):
self._testWithLists(
diags=_sample_diags,
rhs=np.transpose([_sample_rhs]),
expected=np.transpose([_sample_result]),
diags_format="compact")
# Tests with transpose and adjoint
def testTransposeRhs(self):
self._testWithLists(
diags=_sample_diags,
rhs=np.array([_sample_rhs, 2 * _sample_rhs]),
expected=np.array([_sample_result, 2 * _sample_result]).T,
transpose_rhs=True)
def testConjugateRhs(self):
self._testWithLists(
diags=_sample_diags,
rhs=np.transpose([_sample_rhs * (1 + 1j), _sample_rhs * (1 - 2j)]),
expected=np.transpose(
[_sample_result * (1 - 1j), _sample_result * (1 + 2j)]),
conjugate_rhs=True)
def testAdjointRhs(self):
self._testWithLists(
diags=_sample_diags,
rhs=np.array([_sample_rhs * (1 + 1j), _sample_rhs * (1 - 2j)]),
expected=np.array(
[_sample_result * (1 - 1j), _sample_result * (1 + 2j)]).T,
transpose_rhs=True,
conjugate_rhs=True)
def testTransposeRhsWithBatching(self):
self._testWithLists(
diags=np.array([_sample_diags, -_sample_diags]),
rhs=np.array([[_sample_rhs, 2 * _sample_rhs],
[3 * _sample_rhs, 4 * _sample_rhs]]),
expected=np.array([[_sample_result, 2 * _sample_result],
[-3 * _sample_result,
-4 * _sample_result]]).transpose(0, 2, 1),
transpose_rhs=True)
def testTransposeRhsWithRhsAsVector(self):
self._testWithLists(
diags=_sample_diags,
rhs=_sample_rhs,
expected=_sample_result,
transpose_rhs=True)
def testConjugateRhsWithRhsAsVector(self):
self._testWithLists(
diags=_sample_diags,
rhs=_sample_rhs * (1 + 1j),
expected=_sample_result * (1 - 1j),
conjugate_rhs=True)
def testTransposeRhsWithRhsAsVectorAndBatching(self):
self._testWithLists(
diags=np.array([_sample_diags, -_sample_diags]),
rhs=np.array([_sample_rhs, 2 * _sample_rhs]),
expected=np.array([_sample_result, -2 * _sample_result]),
transpose_rhs=True)
# Gradient tests
def _gradientTest(
self,
diags,
rhs,
y, # output = reduce_sum(y * tridiag_solve(diags, rhs))
expected_grad_diags, # expected gradient of output w.r.t. diags
expected_grad_rhs, # expected gradient of output w.r.t. rhs
diags_format="compact",
transpose_rhs=False,
conjugate_rhs=False,
feed_dict=None):
expected_grad_diags = _tfconst(expected_grad_diags)
expected_grad_rhs = _tfconst(expected_grad_rhs)
with backprop.GradientTape() as tape_diags:
with backprop.GradientTape() as tape_rhs:
tape_diags.watch(diags)
tape_rhs.watch(rhs)
if test_util.is_xla_enabled():
# Pivoting is not supported by xla backends.
return
x = linalg_impl.tridiagonal_solve(
diags,
rhs,
diagonals_format=diags_format,
transpose_rhs=transpose_rhs,
conjugate_rhs=conjugate_rhs)
res = math_ops.reduce_sum(x * y)
with self.cached_session() as sess:
actual_grad_diags = sess.run(
tape_diags.gradient(res, diags), feed_dict=feed_dict)
actual_rhs_diags = sess.run(
tape_rhs.gradient(res, rhs), feed_dict=feed_dict)
self.assertAllClose(expected_grad_diags, actual_grad_diags)
self.assertAllClose(expected_grad_rhs, actual_rhs_diags)
def _gradientTestWithLists(self,
diags,
rhs,
y,
expected_grad_diags,
expected_grad_rhs,
diags_format="compact",
transpose_rhs=False,
conjugate_rhs=False):
self._gradientTest(
_tfconst(diags), _tfconst(rhs), _tfconst(y), expected_grad_diags,
expected_grad_rhs, diags_format, transpose_rhs, conjugate_rhs)
def testGradientSimple(self):
self._gradientTestWithLists(
diags=_sample_diags,
rhs=_sample_rhs,
y=[1, 3, 2, 4],
expected_grad_diags=[[-5, 0, 4, 0], [9, 0, -4, -16], [0, 0, 5, 16]],
expected_grad_rhs=[1, 0, -1, 4])
def testGradientWithMultipleRhs(self):
self._gradientTestWithLists(
diags=_sample_diags,
rhs=[[1, 2], [2, 4], [3, 6], [4, 8]],
y=[[1, 5], [2, 6], [3, 7], [4, 8]],
expected_grad_diags=([[-20, 28, -60, 0], [36, -35, 60, 80],
[0, 63, -75, -80]]),
expected_grad_rhs=[[0, 2], [1, 3], [1, 7], [0, -10]])
def _makeDataForGradientWithBatching(self):
y = np.array([1, 3, 2, 4])
grad_diags = np.array([[-5, 0, 4, 0], [9, 0, -4, -16], [0, 0, 5, 16]])
grad_rhs = np.array([1, 0, -1, 4])
diags_batched = np.array(
[[_sample_diags, 2 * _sample_diags, 3 * _sample_diags],
[4 * _sample_diags, 5 * _sample_diags, 6 * _sample_diags]])
rhs_batched = np.array([[_sample_rhs, -_sample_rhs, _sample_rhs],
[-_sample_rhs, _sample_rhs, -_sample_rhs]])
y_batched = np.array([[y, y, y], [y, y, y]])
expected_grad_diags_batched = np.array(
[[grad_diags, -grad_diags / 4, grad_diags / 9],
[-grad_diags / 16, grad_diags / 25, -grad_diags / 36]])
expected_grad_rhs_batched = np.array(
[[grad_rhs, grad_rhs / 2, grad_rhs / 3],
[grad_rhs / 4, grad_rhs / 5, grad_rhs / 6]])
return (y_batched, diags_batched, rhs_batched, expected_grad_diags_batched,
expected_grad_rhs_batched)
def testGradientWithBatchDims(self):
y, diags, rhs, expected_grad_diags, expected_grad_rhs = \
self._makeDataForGradientWithBatching()
self._gradientTestWithLists(
diags=diags,
rhs=rhs,
y=y,
expected_grad_diags=expected_grad_diags,
expected_grad_rhs=expected_grad_rhs)
@test_util.run_deprecated_v1
def testGradientWithUnknownShapes(self):
def placeholder(rank):
return array_ops.placeholder(
dtypes.float64, shape=(None for _ in range(rank)))
y, diags, rhs, expected_grad_diags, expected_grad_rhs = \
self._makeDataForGradientWithBatching()
diags_placeholder = placeholder(rank=4)
rhs_placeholder = placeholder(rank=3)
y_placeholder = placeholder(rank=3)
self._gradientTest(
diags=diags_placeholder,
rhs=rhs_placeholder,
y=y_placeholder,
expected_grad_diags=expected_grad_diags,
expected_grad_rhs=expected_grad_rhs,
feed_dict={
diags_placeholder: diags,
rhs_placeholder: rhs,
y_placeholder: y
})
# Invalid input shapes
@flags(FLAG_NO_PARAMETERIZATION)
def testInvalidShapesCompactFormat(self):
def test_raises(diags_shape, rhs_shape):
self._assertRaises(_tf_ones(diags_shape), _tf_ones(rhs_shape), "compact")
test_raises((5, 4, 4), (5, 4))
test_raises((5, 3, 4), (4, 5))
test_raises((5, 3, 4), (5))
test_raises((5), (5, 4))
@flags(FLAG_NO_PARAMETERIZATION)
def testInvalidShapesSequenceFormat(self):
def test_raises(diags_tuple_shapes, rhs_shape):
diagonals = tuple(_tf_ones(shape) for shape in diags_tuple_shapes)
self._assertRaises(diagonals, _tf_ones(rhs_shape), "sequence")
test_raises(((5, 4), (5, 4)), (5, 4))
test_raises(((5, 4), (5, 4), (5, 6)), (5, 4))
test_raises(((5, 3), (5, 4), (5, 6)), (5, 4))
test_raises(((5, 6), (5, 4), (5, 3)), (5, 4))
test_raises(((5, 4), (7, 4), (5, 4)), (5, 4))
test_raises(((5, 4), (7, 4), (5, 4)), (3, 4))
@flags(FLAG_NO_PARAMETERIZATION)
def testInvalidShapesMatrixFormat(self):
def test_raises(diags_shape, rhs_shape):
self._assertRaises(_tf_ones(diags_shape), _tf_ones(rhs_shape), "matrix")
test_raises((5, 4, 7), (5, 4))
test_raises((5, 4, 4), (3, 4))
test_raises((5, 4, 4), (5, 3))
# Tests with placeholders
def _testWithPlaceholders(self,
diags_shape,
rhs_shape,
diags_feed,
rhs_feed,
expected,
diags_format="compact"):
if context.executing_eagerly():
return
diags = array_ops.placeholder(dtypes.float64, shape=diags_shape)
rhs = array_ops.placeholder(dtypes.float64, shape=rhs_shape)
if test_util.is_xla_enabled() and self.pivoting:
# Pivoting is not supported by xla backends.
return
x = linalg_impl.tridiagonal_solve(
diags, rhs, diags_format, partial_pivoting=self.pivoting)
with self.cached_session() as sess:
result = sess.run(x, feed_dict={diags: diags_feed, rhs: rhs_feed})
self.assertAllClose(result, expected)
@test_util.run_deprecated_v1
def testCompactFormatAllDimsUnknown(self):
self._testWithPlaceholders(
diags_shape=[None, None],
rhs_shape=[None],
diags_feed=_sample_diags,
rhs_feed=_sample_rhs,
expected=_sample_result)
@test_util.run_deprecated_v1
def testCompactFormatUnknownMatrixSize(self):
self._testWithPlaceholders(
diags_shape=[3, None],
rhs_shape=[4],
diags_feed=_sample_diags,
rhs_feed=_sample_rhs,
expected=_sample_result)
@test_util.run_deprecated_v1
def testCompactFormatUnknownRhsCount(self):
self._testWithPlaceholders(
diags_shape=[3, 4],
rhs_shape=[4, None],
diags_feed=_sample_diags,
rhs_feed=np.transpose([_sample_rhs, 2 * _sample_rhs]),
expected=np.transpose([_sample_result, 2 * _sample_result]))
@test_util.run_deprecated_v1
def testCompactFormatUnknownBatchSize(self):
self._testWithPlaceholders(
diags_shape=[None, 3, 4],
rhs_shape=[None, 4],
diags_feed=np.array([_sample_diags, -_sample_diags]),
rhs_feed=np.array([_sample_rhs, 2 * _sample_rhs]),
expected=np.array([_sample_result, -2 * _sample_result]))
@test_util.run_deprecated_v1
def testMatrixFormatWithUnknownDims(self):
if context.executing_eagerly():
return
def test_with_matrix_shapes(matrix_shape, rhs_shape=None):
matrix = np.array([[1, 2, 0, 0], [1, 3, 1, 0], [0, -1, 2, 4],
[0, 0, 1, 2]])
rhs = np.array([1, 2, 3, 4])
x = np.array([-9, 5, -4, 4])
self._testWithPlaceholders(
diags_shape=matrix_shape,
rhs_shape=rhs_shape,
diags_feed=matrix,
rhs_feed=np.transpose([rhs, 2 * rhs]),
expected=np.transpose([x, 2 * x]),
diags_format="matrix")
test_with_matrix_shapes(matrix_shape=[4, 4], rhs_shape=[None, None])
test_with_matrix_shapes(matrix_shape=[None, 4], rhs_shape=[None, None])
test_with_matrix_shapes(matrix_shape=[4, None], rhs_shape=[None, None])
test_with_matrix_shapes(matrix_shape=[None, None], rhs_shape=[None, None])
test_with_matrix_shapes(matrix_shape=[4, 4])
test_with_matrix_shapes(matrix_shape=[None, 4])
test_with_matrix_shapes(matrix_shape=[4, None])
test_with_matrix_shapes(matrix_shape=[None, None])
test_with_matrix_shapes(matrix_shape=None, rhs_shape=[None, None])
test_with_matrix_shapes(matrix_shape=None)
@test_util.run_deprecated_v1
def testSequenceFormatWithUnknownDims(self):
if context.executing_eagerly():
return
if test_util.is_xla_enabled() and self.pivoting:
# Pivoting is not supported by xla backends.
return
superdiag = array_ops.placeholder(dtypes.float64, shape=[None])
diag = array_ops.placeholder(dtypes.float64, shape=[None])
subdiag = array_ops.placeholder(dtypes.float64, shape=[None])
rhs = array_ops.placeholder(dtypes.float64, shape=[None])
x = linalg_impl.tridiagonal_solve((superdiag, diag, subdiag),
rhs,
diagonals_format="sequence",
partial_pivoting=self.pivoting)
with self.cached_session() as sess:
result = sess.run(
x,
feed_dict={
subdiag: [20, 1, -1, 1],
diag: [1, 3, 2, 2],
superdiag: [2, 1, 4, 20],
rhs: [1, 2, 3, 4]
})
self.assertAllClose(result, [-9, 5, -4, 4])
# Benchmark
class TridiagonalSolveBenchmark(test.Benchmark):
sizes = [(100000, 1, 1), (1000000, 1, 1), (10000000, 1, 1), (100000, 10, 1),
(100000, 100, 1), (10000, 1, 10), (10000, 1, 100)]
pivoting_options = [(True, "pivoting"), (False, "no_pivoting")]
def _generateData(self, matrix_size, batch_size, num_rhs, seed=42):
np.random.seed(seed)
data = np.random.normal(size=(batch_size, matrix_size, 3 + num_rhs))
diags = np.stack([data[:, :, 0], data[:, :, 1], data[:, :, 2]], axis=-2)
rhs = data[:, :, 3:]
return (variables.Variable(diags, dtype=dtypes.float64),
variables.Variable(rhs, dtype=dtypes.float64))
def _generateMatrixData(self, matrix_size, batch_size, num_rhs, seed=42):
np.random.seed(seed)
import scipy.sparse as sparse # pylint:disable=g-import-not-at-top
# By being strictly diagonally dominant, we guarantee invertibility.d
diag = 2 * np.abs(np.random.randn(matrix_size)) + 4.1
subdiag = 2 * np.abs(np.random.randn(matrix_size - 1))
superdiag = 2 * np.abs(np.random.randn(matrix_size - 1))
matrix = sparse.diags([superdiag, diag, subdiag], [1, 0, -1]).toarray()
vector = np.random.randn(batch_size, matrix_size, num_rhs)
return (variables.Variable(np.tile(matrix, (batch_size, 1, 1))),
variables.Variable(vector))
def _benchmark(self, generate_data_fn, test_name_format_string):
devices = [("/cpu:0", "cpu")]
if test.is_gpu_available(cuda_only=True):
devices += [("/gpu:0", "gpu")]
for device_option, pivoting_option, size_option in \
itertools.product(devices, self.pivoting_options, self.sizes):
device_id, device_name = device_option
pivoting, pivoting_name = pivoting_option
matrix_size, batch_size, num_rhs = size_option
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device(device_id):
diags, rhs = generate_data_fn(matrix_size, batch_size, num_rhs)
# Pivoting is not supported by XLA backends.
if test.is_xla_enabled() and pivoting:
return
x = linalg_impl.tridiagonal_solve(
diags, rhs, partial_pivoting=pivoting)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(x),
min_iters=10,
store_memory_usage=False,
name=test_name_format_string.format(device_name, matrix_size,
batch_size, num_rhs,
pivoting_name))
def benchmarkTridiagonalSolveOp_WithMatrixInput(self):
self._benchmark(
self._generateMatrixData,
test_name_format_string=(
"tridiagonal_solve_matrix_format_{}_matrix_size_{}_"
"batch_size_{}_num_rhs_{}_{}"))
def benchmarkTridiagonalSolveOp(self):
self._benchmark(
self._generateMatrixData,
test_name_format_string=("tridiagonal_solve_{}_matrix_size_{}_"
"batch_size_{}_num_rhs_{}_{}"))
if __name__ == "__main__":
for name, fun in dict(TridiagonalSolveOpTest.__dict__).items():
if not name.startswith("test"):
continue
if hasattr(fun, FLAG_NO_PARAMETERIZATION):
continue
# Replace testFoo with testFoo_pivoting and testFoo_noPivoting, setting
# self.pivoting to corresponding value.
delattr(TridiagonalSolveOpTest, name)
def decor(test_fun, pivoting):
def wrapped(instance):
instance.pivoting = pivoting
test_fun(instance)
return wrapped
setattr(TridiagonalSolveOpTest, name + "_pivoting",
decor(fun, pivoting=True))
if not hasattr(fun, FLAG_REQUIRES_PIVOTING):
setattr(TridiagonalSolveOpTest, name + "_noPivoting",
decor(fun, pivoting=False))
test.main()
|
|
# Copyright (C) 2013-2014 by Zhang Li <zhangli10 at baidu.com>
# All rights reserved.
#
# vim-javacomplete_ex:
# improved vim-javacomplete, add following features:
# 1. complete a class name.
# 2. add 'import' statement for a given class.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the project nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
import vim
import os
import re
import string
import subprocess
import threading
import zlib
import base64
import zipfile
def __GetBootstrapClassPath():
## mkdir
temp_dir = "/tmp/JavaAutoImport.%s.%s" % (os.getlogin(), os.getpid())
os.mkdir(temp_dir)
## write class file
temp_classfile = temp_dir + "/_.class"
with open(temp_classfile, "w") as classfile:
classfile_data = zlib.decompress(base64.b64decode(
## simple java class displaying Property "sun.boot.class.path":
"""
eJxtUMtOwkAUPVeghVoFQfCJysZUFyVxi3Fj4oooCYaNCzLFCZb0QdqpCZ+lCzUu
/AA/yngHTYiRWdzXnHPm3Pn8ev8AcIaWBROVEjZQLaJmcd60UEfDxJaJbYJx7ke+
uiDknJMBIX8Z30tCuetH8joLPZncCi/gST4UfkRoOHfdiXgU7UBE43ZfJX407mii
1Y+zZCSvfA02hq4G2SiiZGLHxi72CLU0i1wvjpU7CkSaulOhHmzso2niwMYhjgg0
JFQW+jfeRI7Un1F/lioZst0444v6jxk/bvfYiWI/UoQdwupYql4ST2WiZoRjZ4nn
/yN2uESNYE51F/D29WVCA7Rg8CfrswLSO3O0uGtyJs6F01fQExdsjKMxH+YYZmPt
F+rMqYD9jJVq7g35FxQWDItvWYZrzV2fP1T+BtBRZv0= """))
classfile.write(classfile_data)
## read classpath from pipe
classpath = string.join((os.getenv("CLASSPATH"), temp_dir), ":")
pipe = subprocess.Popen((__java(), "-cp", classpath, "_"), stdout=subprocess.PIPE)
bootstrap_classpath = pipe.stdout.read().strip()
## clean
os.remove(temp_classfile)
os.rmdir(temp_dir)
return bootstrap_classpath
## configuration values
__java = lambda: vim.eval("g:JavaCompleteEx_JavaHome") + "/bin/java"
__bootstrapClassPath = __GetBootstrapClassPath()
__classname_mapping = {}
__classpath = lambda: __bootstrapClassPath + ":" + vim.eval("g:JavaCompleteEx_ClassPath")
__classpath_current = ""
def __AddRelativeFilename_into_ClassNameMapping(relative_filename, classname_mapping):
if re.match(r"(\w+/)*\w+\.class", relative_filename):
classname_with_scope_splits = relative_filename.replace(".class", "").split("/")
classname, scope = (
classname_with_scope_splits[-1],
string.join(classname_with_scope_splits[ :-1], "."))
if not classname_mapping.has_key(classname):
classname_mapping[classname] = []
classname_mapping[classname].append(scope)
def __GetClassNameMappingFromDir(dirpath):
classname_mapping = {}
try:
## walk dirpath
for root, dirs, filenames in os.walk(dirpath):
for filename in filenames:
relative_filename = os.path.relpath(root + "/" + filename, dirpath)
__AddRelativeFilename_into_ClassNameMapping(relative_filename, classname_mapping)
return classname_mapping
except Exception:
return {}
def __GetClassNameMappingFromJar(jar_filename):
classname_mapping = {}
try:
for relative_filename in zipfile.ZipFile(jar_filename, "r").namelist():
__AddRelativeFilename_into_ClassNameMapping(relative_filename, classname_mapping)
return classname_mapping
except Exception:
return {}
def __GetClassNameWithScope(classname_mapping, classname):
if classname_mapping.has_key(classname):
return list((scope +"." + classname) for scope in classname_mapping[classname])
return []
def __UpdateClassNameMapping():
global __classpath_current
if __classpath_current == __classpath(): ## classpath not changed -- no need to update
return
__classname_mapping.clear()
## process classpath:
for classpath in map(string.strip, __classpath().split(":")):
## try add jar
if classpath.endswith(".jar"):
for classname, scopes in __GetClassNameMappingFromJar(classpath).items():
if not __classname_mapping.has_key(classname):
__classname_mapping[classname] = []
__classname_mapping[classname].extend(scopes)
## try add dir
else:
for classname, scopes in __GetClassNameMappingFromDir(classpath).items():
if not __classname_mapping.has_key(classname):
__classname_mapping[classname] = []
__classname_mapping[classname].extend(scopes)
## update classpath_current
__classpath_current = __classpath()
## utility vim functions
def __vim_cur_classname_with_scope(*args):
curline = "<%s>" % __vim_curline()
word_l = __vim_getx() + 1
word_r = __vim_getx()
while curline[word_l - 1].isalnum() or curline[word_l - 1] == '_' or curline[word_l - 1] == '.':
word_l -= 1
while curline[word_r + 1].isalnum() or curline[word_r + 1] == '_' or curline[word_r + 1] == '.':
word_r += 1
return curline[word_l : word_r + 1]
__vim_numlines = lambda *args: int(vim.eval("line('$')"))
__vim_gety = lambda *args: int(vim.eval("line('.')")) - 1
__vim_getx = lambda *args: int(vim.eval(" col('.')")) - 1
__vim_addline = lambda *args: vim.eval("append('%d','%s')" % (args[0] + 1, args[1]))
__vim_getline = lambda *args: vim.eval("getline('%d')" % (args[0] + 1))
__vim_curline = lambda *args: vim.eval("getline('.')")
__vim_curword = lambda *args: vim.eval("expand('<cword>')")
def __vim_InsertImport(classname_with_scope):
new_import = "import %s;" % classname_with_scope
line_items = []
## extract all non-empty lines
for nline in range(0, __vim_numlines()):
if __vim_getline(nline) != "":
line_items.append( (nline, __vim_getline(nline)))
line_items.append( (__vim_numlines(), ""))
## old imports existed -- add new import alphabetically
last_import_item = None
for line_item in line_items:
if re.match(r"import [\w.]+;", line_item[1]):
last_import_item = line_item
if line_item[1] >= new_import:
if line_item[1] != new_import:
__vim_addline(line_item[0] - 1, new_import);
return True
else:
return False
if last_import_item != None: ## add to last import line
__vim_addline(last_import_item[0], new_import);
return True
## old imports not existed -- add new import in front of code, excluding 'package bla.bla'
if re.match(r"package [\w.]+;", line_items[0][1]):
__vim_addline(0, new_import); ## first line is 'package bla', add to next line
else:
__vim_addline(-1, new_import); ## add to first line
return True
def __vim_interface_JavaCompleteEx_AddImport():
__UpdateClassNameMapping()
index_userinput = 0
classname_with_scope = __GetClassNameWithScope(__classname_mapping, __vim_curword())
## no candidate
if classname_with_scope.__len__() == 0:
print "JavaCompleteEx: classname '%s' not found in any scope." % __vim_curword()
return
else:
## multiple candidate -- select one from user input
if classname_with_scope.__len__() > 1:
for index_classname in enumerate(classname_with_scope):
print "candidate [%d]: %s" % index_classname
try: ## will ignore invalid user input
index_userinput = int(vim.eval("input('select one candidate: ', '0')"))
vim.command(":redraw!")
except:
print "JavaCompleteEx: invalid input."
return
## insert selected classname
if __vim_InsertImport(classname_with_scope[index_userinput]):
print "JavaCompleteEx: class '%s' import done." % classname_with_scope[index_userinput]
return
else:
print "JavaCompleteEx: class '%s' already imported." % classname_with_scope[index_userinput]
return
def __vim_interface_JavaCompleteEx_CompleteClassName(findstart, base):
classname_with_scope = __vim_cur_classname_with_scope()
if int(findstart) == 1:
curline = __vim_curline()
start = __vim_getx()
if classname_with_scope == "" or re.match(r"^[A-Z][A-Za-z0-9_]*$", classname_with_scope):
while start > 0 and (curline[start - 1].isalnum() or curline[start - 1] == "_"):
start -= 1
return start
else:
complete_items = []
if classname_with_scope == "" or re.match(r"^[A-Z][A-Za-z0-9_]*$", classname_with_scope):
for classname, scopes in __classname_mapping.items():
if classname.startswith(base):
for scope in scopes:
complete_items.append({
"word": classname,
"menu": scope,
"kind": "c"})
complete_items.sort(key=lambda item: item["word"])
return complete_items.__repr__()
## update mapping immediately after loading plugin
## updating is done with another thread, so that users will not fall into block
if __name__ == "__main__":
update_thread = threading.Thread(target=__UpdateClassNameMapping)
update_thread.start()
__classpath_current = __classpath()
|
|
#!/usr/bin/env python3
# Copyright 2016 The Fontbakery Authors
# Copyright 2017 The Google Font Tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unittests to check the functionality of Google Fonts Tools"""
import os
import re
from glob import glob
import unittest
import subprocess
class TestSubcommands(unittest.TestCase):
"""Functional tests to determine that bin/gftools runs correctly"""
def setUp(self):
self.bin_path = os.path.join('bin')
self.maxDiff = None
def test_list_subcommands_has_all_scripts(self):
"""Tests if the output from running gftools --list-subcommands
matches the scripts within the bin folder"""
scripts = [re.sub('\.\w*$', '', f.replace('gftools-', '')) for f in \
os.listdir(self.bin_path) if f.startswith('gftools-')]
subcommands = subprocess.check_output(['python',
os.path.join('bin', 'gftools'),
'--list-subcommands'], encoding="utf-8").split()
self.assertEqual(sorted(scripts), sorted(subcommands))
class TestGFToolsScripts(unittest.TestCase):
"""Functional tests to determine whether each script can execute successfully"""
def setUp(self):
self.get_path = lambda name: os.path.join('bin', 'gftools-' + name + '.py')
self.example_dir = os.path.join('data', 'test', 'cabin')
self.example_font = os.path.join(self.example_dir, 'Cabin-Regular.ttf')
self.example_family = glob(os.path.join("data", "test", "mavenpro", "*.ttf"))
self.example_vf_font = os.path.join("data", "test", 'Lora-Roman-VF.ttf')
self.example_vf_stat = os.path.join("data", "test", 'lora_stat.yaml')
self.example_builder_config = os.path.join("data", "test", 'builder_test.yaml')
self.src_vtt_font = os.path.join("data", "test", "Inconsolata[wdth,wght].ttf")
self.gf_family_dir = os.path.join('data', 'test', 'mock_googlefonts', 'ofl', 'abel')
self.nam_file = os.path.join('data', 'test', 'arabic_unique-glyphs.nam')
self.blacklisted_scripts = [
['python', self.get_path('build-contributors')], # requires source folder of git commits
['python', self.get_path('check-category')], # Requires GF key
['python', self.get_path('check-gf-github')], # Requires github credentials
['python', self.get_path('build-font2ttf')], # Requires fontforge
['python', self.get_path('generate-glyphdata')], # Generates desired_glyph_data.json
['python', self.get_path('metadata-vs-api')], # Requires an API key
['python', self.get_path('update-version')], # Needs to know the current font version and the next version to set
['python', self.get_path('family-html-snippet')], # Requires GF api token
['python', self.get_path('qa')], # Has seperate checks
['python', self.get_path('sanity-check')], # Very old doesn't follow new spec. Should be deprecated.
]
self.dir_before_tests = os.listdir(self.example_dir)
def tearDown(self):
"""Clears the example folder of any files created during the unit tests"""
files_to_delete = set(os.listdir(self.example_dir)) - set(self.dir_before_tests)
for f in files_to_delete:
os.remove(os.path.join(self.example_dir, f))
def check_script(self, command):
"""Template for unit testing the python scripts"""
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
stdout, stderr = process.communicate()
self.assertNotIn('Err', stderr, ' '.join(command) + ':\n\n' + stderr)
def test_build_ofl(self):
self.check_script(['python', self.get_path('add-font'), self.gf_family_dir])
def test_build_ofl(self):
self.check_script(['python', self.get_path('build-ofl'), self.example_dir])
def test_check_bbox(self):
self.check_script(['python', self.get_path('check-bbox'), self.example_font, '--glyphs', '--extremes'])
def test_check_copyright_notices(self):
self.check_script(['python', self.get_path('check-copyright-notices')])
def test_check_font_version(self):
self.check_script(['python', self.get_path('check-font-version'), self.example_font])
def test_check_name(self):
self.check_script(['python', self.get_path('check-name'), self.example_font])
def test_check_vtt_compatibility(self):
self.check_script(['python', self.get_path('check-vtt-compatibility'), self.example_font, self.example_font])
def test_compare_font(self):
self.check_script(['python', self.get_path('compare-font'), self.example_font, self.example_font])
def test_find_features(self):
self.check_script(['python', self.get_path('find-features'), self.example_font])
def test_fix_ascii_fontmetadata(self):
self.check_script(['python', self.get_path('fix-ascii-fontmetadata'), self.example_font])
def test_fix_cmap(self):
self.check_script(['python', self.get_path('fix-cmap'), self.example_font])
def test_fix_familymetadata(self):
self.check_script(['python', self.get_path('fix-familymetadata'), self.example_font])
def test_fix_fsselection(self):
self.check_script(['python', self.get_path('fix-fsselection'), self.example_font])
def test_fix_fstype(self):
self.check_script(['python', self.get_path('fix-fstype'), self.example_font])
def test_fix_gasp(self):
self.check_script(['python', self.get_path('fix-gasp'), self.example_font])
def test_fix_glyph_private_encoding(self):
self.check_script(['python', self.get_path('fix-glyph-private-encoding'), self.example_font])
def test_fix_glyphs(self):
self.check_script(['python', self.get_path('fix-glyphs')])
def test_fix_hinting(self):
self.check_script(['python', self.get_path('fix-hinting'), self.example_font])
def test_fix_isfixedpitch(self):
self.check_script(['python', self.get_path('fix-isfixedpitch'), "--fonts", self.example_font])
def test_fix_nameids(self):
self.check_script(['python', self.get_path('fix-nameids'), self.example_font])
def test_fix_nonhinting(self):
self.check_script(['python', self.get_path('fix-nonhinting'), self.example_font, self.example_font + '.fix'])
def test_fix_ttfautohint(self):
self.check_script(['python', self.get_path('fix-ttfautohint'), self.example_font])
def test_fix_vendorid(self):
self.check_script(['python', self.get_path('fix-vendorid'), self.example_font])
def test_fix_vertical_metrics(self):
self.check_script(['python', self.get_path('fix-vertical-metrics'), self.example_font])
def test_font_diff(self):
self.check_script(['python', self.get_path('font-diff'), self.example_font, self.example_font])
def test_font_weights_coveraget(self):
self.check_script(['python', self.get_path('font-weights-coverage'), self.example_font])
def test_fix_font(self):
self.check_script(['python', self.get_path('fix-font'), self.example_font])
def test_fix_family(self):
self.check_script(['python', self.get_path('fix-family')] + self.example_family)
def test_list_italicangle(self):
self.check_script(['python', self.get_path('list-italicangle'), self.example_font])
def test_list_panose(self):
self.check_script(['python', self.get_path('list-panose'), self.example_font])
def test_list_variable_source(self):
self.check_script(['python', self.get_path('list-variable-source')])
def test_list_weightclass(self):
self.check_script(['python', self.get_path('list-weightclass'), self.example_font])
def test_list_widthclass(self):
self.check_script(['python', self.get_path('list-widthclass'), self.example_font])
def test_nametable_from_filename(self):
self.check_script(['python', self.get_path('nametable-from-filename'), self.example_font])
def test_namelist(self):
self.check_script(['python', self.get_path('namelist'), self.example_font])
def test_ots(self):
self.check_script(['python', self.get_path('ots'), self.example_font])
def test_rangify(self):
self.check_script(['python', self.get_path('rangify'), self.nam_file])
def test_test_gf_coverage(self):
self.check_script(['python', self.get_path('test-gf-coverage'), self.example_font])
def test_ttf2cp(self):
self.check_script(['python', self.get_path('ttf2cp'), self.example_font])
def test_unicode_names(self):
self.check_script(['python', self.get_path('unicode-names'), "--nam_file", self.nam_file])
def test_update_families(self):
self.check_script(['python', self.get_path('update-families'), self.example_font])
def test_update_version(self):
self.check_script(['python', self.get_path('update-version'), self.example_font])
def test_varfont_info(self):
self.check_script(['python', self.get_path('varfont-info'), self.example_vf_font])
def test_what_subsets(self):
self.check_script(['python', self.get_path('what-subsets'), self.example_font])
def test_rename_font(self):
self.check_script(['python', self.get_path('rename-font'), self.example_font, "Foobar"])
# Temporarily disabling this until we close issue #13
# (https://github.com/googlefonts/tools/issues/13)
# See also https://github.com/googlefonts/fontbakery/issues/1535
# def test_update_families(self):
# self.check_script(['python', self.get_path('update-families'), self.example_font])
def test_update_nameids(self):
self.check_script(['python', self.get_path('update-nameids'), self.example_font, "-c", "Foobar"])
def test_check_vtt_compile(self):
self.check_script(['python', self.get_path('check-vtt-compile'), self.src_vtt_font])
def test_gen_stat(self):
self.check_script(
['python', self.get_path('gen-stat'), self.example_vf_font, "--axis-order", "wght"]
)
def test_gen_stat2(self):
self.check_script(
['python', self.get_path('gen-stat'), self.example_vf_font, "--src", self.example_vf_stat]
)
def test_builder(self):
self.check_script(['python', self.get_path('builder'), self.example_builder_config])
if __name__ == '__main__':
unittest.main()
|
|
from contextlib import contextmanager
import collections
import random
import threading
import time
from typing import TypeVar, Generic, Iterable, List, Callable, Any
import ray
from ray.util.iter_metrics import MetricsContext, SharedMetrics
# The type of an iterator element.
T = TypeVar("T")
U = TypeVar("U")
def from_items(
items: List[T], num_shards: int = 2, repeat: bool = False
) -> "ParallelIterator[T]":
"""Create a parallel iterator from an existing set of objects.
The objects will be divided round-robin among the number of shards.
Args:
items (list): The list of items to iterate over.
num_shards (int): The number of worker actors to create.
repeat (bool): Whether to cycle over the items forever.
"""
shards = [[] for _ in range(num_shards)]
for i, item in enumerate(items):
shards[i % num_shards].append(item)
name = "from_items[{}, {}, shards={}{}]".format(
items and type(items[0]).__name__ or "None",
len(items),
num_shards,
", repeat=True" if repeat else "",
)
return from_iterators(shards, repeat=repeat, name=name)
def from_range(
n: int, num_shards: int = 2, repeat: bool = False
) -> "ParallelIterator[int]":
"""Create a parallel iterator over the range 0..n.
The range will be partitioned sequentially among the number of shards.
Args:
n (int): The max end of the range of numbers.
num_shards (int): The number of worker actors to create.
repeat (bool): Whether to cycle over the range forever.
"""
generators = []
shard_size = n // num_shards
for i in range(num_shards):
start = i * shard_size
if i == num_shards - 1:
end = n
else:
end = (i + 1) * shard_size
generators.append(range(start, end))
name = (
f"from_range[{n}, shards={num_shards}" f"{', repeat=True' if repeat else ''}]"
)
return from_iterators(
generators,
repeat=repeat,
name=name,
)
def from_iterators(
generators: List[Iterable[T]], repeat: bool = False, name=None
) -> "ParallelIterator[T]":
"""Create a parallel iterator from a list of iterables.
An iterable can be a conatiner (list, str, tuple, set, etc.),
a generator, or a custom class that implements __iter__ or __getitem__.
An actor will be created for each iterable.
Examples:
>>> # Create using a list of generators.
>>> from_iterators([range(100), range(100)])
>>> # Certain generators are not serializable.
>>> from_iterators([(x for x in range(100))])
... TypeError: can't pickle generator objects
>>> # So use lambda functions instead.
>>> # Lambda functions are serializable.
>>> from_iterators([lambda: (x for x in range(100))])
Args:
generators (list): A list of Python iterables or lambda
functions that produce an iterable when called. We allow lambda
functions since certain generators might not be serializable,
but a lambda that returns it can be.
repeat (bool): Whether to cycle over the iterators forever.
name (str): Optional name to give the iterator.
"""
worker_cls = ray.remote(ParallelIteratorWorker)
actors = [worker_cls.remote(g, repeat) for g in generators]
if not name:
name = "from_iterators[shards={}{}]".format(
len(generators), ", repeat=True" if repeat else ""
)
return from_actors(actors, name=name)
def from_actors(
actors: List["ray.actor.ActorHandle"], name=None
) -> "ParallelIterator[T]":
"""Create a parallel iterator from an existing set of actors.
Each actor must subclass the ParallelIteratorWorker interface.
Args:
actors (list): List of actors that each implement
ParallelIteratorWorker.
name (str): Optional name to give the iterator.
"""
if not name:
name = f"from_actors[shards={len(actors)}]"
return ParallelIterator([_ActorSet(actors, [])], name, parent_iterators=[])
class ParallelIterator(Generic[T]):
"""A parallel iterator over a set of remote actors.
This can be used to iterate over a fixed set of task results
(like an actor pool), or a stream of data (e.g., a fixed range of numbers,
an infinite stream of RLlib rollout results).
This class is **serializable** and can be passed to other remote
tasks and actors. However, each shard should be read from at most one
process at a time.
Examples:
>>> # Applying a function over items in parallel.
>>> it = ray.util.iter.from_items([1, 2, 3], num_shards=2)
... <__main__.ParallelIterator object>
>>> it = it.for_each(lambda x: x * 2).gather_sync()
... <__main__.LocalIterator object>
>>> print(list(it))
... [2, 4, 6]
>>> # Creating from generators.
>>> it = ray.util.iter.from_iterators([range(3), range(3)])
... <__main__.ParallelIterator object>
>>> print(list(it.gather_sync()))
... [0, 0, 1, 1, 2, 2]
>>> # Accessing the individual shards of an iterator.
>>> it = ray.util.iter.from_range(10, num_shards=2)
... <__main__.ParallelIterator object>
>>> it0 = it.get_shard(0)
... <__main__.LocalIterator object>
>>> print(list(it0))
... [0, 1, 2, 3, 4]
>>> it1 = it.get_shard(1)
... <__main__.LocalIterator object>
>>> print(list(it1))
... [5, 6, 7, 8, 9]
>>> # Gathering results from actors synchronously in parallel.
>>> it = ray.util.iter.from_actors(workers)
... <__main__.ParallelIterator object>
>>> it = it.batch_across_shards()
... <__main__.LocalIterator object>
>>> print(next(it))
... [worker_1_result_1, worker_2_result_1]
>>> print(next(it))
... [worker_1_result_2, worker_2_result_2]
"""
def __init__(
self,
actor_sets: List["_ActorSet"],
name: str,
parent_iterators: List["ParallelIterator[Any]"],
):
"""Create a parallel iterator (this is an internal function)."""
# We track multiple sets of actors to support parallel .union().
self.actor_sets = actor_sets
self.name = name
# keep explicit reference to parent iterator for repartition
self.parent_iterators = parent_iterators
def __iter__(self):
raise TypeError(
"You must use it.gather_sync() or it.gather_async() to "
"iterate over the results of a ParallelIterator."
)
def __str__(self):
return repr(self)
def __repr__(self):
return f"ParallelIterator[{self.name}]"
def _with_transform(self, local_it_fn, name):
"""Helper function to create new Parallel Iterator"""
return ParallelIterator(
[a.with_transform(local_it_fn) for a in self.actor_sets],
name=self.name + name,
parent_iterators=self.parent_iterators,
)
def transform(
self, fn: Callable[[Iterable[T]], Iterable[U]]
) -> "ParallelIterator[U]":
"""Remotely transform the iterator.
This is advanced version of for_each that allows you to apply arbitrary
generator transformations over the iterator. Prefer to use .for_each()
when possible for simplicity.
Args:
fn (func): function to use to transform the iterator. The function
should pass through instances of _NextValueNotReady that appear
in its input iterator. Note that this function is only called
**once** over the input iterator.
Returns:
ParallelIterator[U]: a parallel iterator.
Examples:
>>> def f(it):
... for x in it:
... if x % 2 == 0:
... yield x
>>> from_range(10, 1).transform(f).gather_sync().take(5)
... [0, 2, 4, 6, 8]
"""
return self._with_transform(
lambda local_it: local_it.transform(fn), ".transform()"
)
def for_each(
self, fn: Callable[[T], U], max_concurrency=1, resources=None
) -> "ParallelIterator[U]":
"""Remotely apply fn to each item in this iterator.
If `max_concurrency` == 1 then `fn` will be executed serially by each
shards
`max_concurrency` should be used to achieve a high degree of
parallelism without the overhead of increasing the number of shards
(which are actor based). If `max_concurrency` is not 1, this function
provides no semantic guarantees on the output order.
Results will be returned as soon as they are ready.
A performance note: When executing concurrently, this function
maintains its own internal buffer. If `num_async` is `n` and
max_concur is `k` then the total number of buffered objects could be up
to `n + k - 1`
Args:
fn (func): function to apply to each item.
max_concurrency (int): max number of concurrent calls to fn per
shard. If 0, then apply all operations concurrently.
resources (dict): resources that the function requires to execute.
This has the same default as `ray.remote` and is only used
when `max_concurrency > 1`.
Returns:
ParallelIterator[U]: a parallel iterator whose elements have `fn`
applied.
Examples:
>>> next(from_range(4).for_each(
lambda x: x * 2,
max_concur=2,
resources={"num_cpus": 0.1}).gather_sync()
)
... [0, 2, 4, 8]
"""
assert max_concurrency >= 0, "max_concurrency must be non-negative."
return self._with_transform(
lambda local_it: local_it.for_each(fn, max_concurrency, resources),
".for_each()",
)
def filter(self, fn: Callable[[T], bool]) -> "ParallelIterator[T]":
"""Remotely filter items from this iterator.
Args:
fn (func): returns False for items to drop from the iterator.
Examples:
>>> it = from_items([0, 1, 2]).filter(lambda x: x > 0)
>>> next(it.gather_sync())
... [1, 2]
"""
return self._with_transform(lambda local_it: local_it.filter(fn), ".filter()")
def batch(self, n: int) -> "ParallelIterator[List[T]]":
"""Remotely batch together items in this iterator.
Args:
n (int): Number of items to batch together.
Examples:
>>> next(from_range(10, 1).batch(4).gather_sync())
... [0, 1, 2, 3]
"""
return self._with_transform(lambda local_it: local_it.batch(n), f".batch({n})")
def flatten(self) -> "ParallelIterator[T[0]]":
"""Flatten batches of items into individual items.
Examples:
>>> next(from_range(10, 1).batch(4).flatten())
... 0
"""
return self._with_transform(lambda local_it: local_it.flatten(), ".flatten()")
def combine(self, fn: Callable[[T], List[U]]) -> "ParallelIterator[U]":
"""Transform and then combine items horizontally.
This is the equivalent of for_each(fn).flatten() (flat map).
"""
it = self.for_each(fn).flatten()
it.name = self.name + ".combine()"
return it
def local_shuffle(
self, shuffle_buffer_size: int, seed: int = None
) -> "ParallelIterator[T]":
"""Remotely shuffle items of each shard independently
Args:
shuffle_buffer_size (int): The algorithm fills a buffer with
shuffle_buffer_size elements and randomly samples elements from
this buffer, replacing the selected elements with new elements.
For perfect shuffling, this argument should be greater than or
equal to the largest iterator size.
seed (int): Seed to use for
randomness. Default value is None.
Returns:
A ParallelIterator with a local shuffle applied on the base
iterator
Examples:
>>> it = from_range(10, 1).local_shuffle(shuffle_buffer_size=2)
>>> it = it.gather_sync()
>>> next(it)
0
>>> next(it)
2
>>> next(it)
3
>>> next(it)
1
"""
return self._with_transform(
lambda local_it: local_it.shuffle(shuffle_buffer_size, seed),
".local_shuffle(shuffle_buffer_size={}, seed={})".format(
shuffle_buffer_size, str(seed) if seed is not None else "None"
),
)
def repartition(
self, num_partitions: int, batch_ms: int = 0
) -> "ParallelIterator[T]":
"""Returns a new ParallelIterator instance with num_partitions shards.
The new iterator contains the same data in this instance except with
num_partitions shards. The data is split in round-robin fashion for
the new ParallelIterator.
Args:
num_partitions (int): The number of shards to use for the new
ParallelIterator
batch_ms (int): Batches items for batch_ms milliseconds
on each shard before retrieving it.
Increasing batch_ms increases latency but improves throughput.
Returns:
A ParallelIterator with num_partitions number of shards and the
data of this ParallelIterator split round-robin among the new
number of shards.
Examples:
>>> it = from_range(8, 2)
>>> it = it.repartition(3)
>>> list(it.get_shard(0))
[0, 4, 3, 7]
>>> list(it.get_shard(1))
[1, 5]
>>> list(it.get_shard(2))
[2, 6]
"""
# initialize the local iterators for all the actors
all_actors = []
for actor_set in self.actor_sets:
actor_set.init_actors()
all_actors.extend(actor_set.actors)
def base_iterator(num_partitions, partition_index, timeout=None):
futures = {}
for a in all_actors:
futures[
a.par_iter_slice_batch.remote(
step=num_partitions, start=partition_index, batch_ms=batch_ms
)
] = a
while futures:
pending = list(futures)
if timeout is None:
# First try to do a batch wait for efficiency.
ready, _ = ray.wait(pending, num_returns=len(pending), timeout=0)
# Fall back to a blocking wait.
if not ready:
ready, _ = ray.wait(pending, num_returns=1)
else:
ready, _ = ray.wait(
pending, num_returns=len(pending), timeout=timeout
)
for obj_ref in ready:
actor = futures.pop(obj_ref)
try:
batch = ray.get(obj_ref)
futures[
actor.par_iter_slice_batch.remote(
step=num_partitions,
start=partition_index,
batch_ms=batch_ms,
)
] = actor
for item in batch:
yield item
except StopIteration:
pass
# Always yield after each round of wait with timeout.
if timeout is not None:
yield _NextValueNotReady()
def make_gen_i(i):
return lambda: base_iterator(num_partitions, i)
name = self.name + f".repartition[num_partitions={num_partitions}]"
generators = [make_gen_i(s) for s in range(num_partitions)]
worker_cls = ray.remote(ParallelIteratorWorker)
actors = [worker_cls.remote(g, repeat=False) for g in generators]
# need explicit reference to self so actors in this instance do not die
return ParallelIterator([_ActorSet(actors, [])], name, parent_iterators=[self])
def gather_sync(self) -> "LocalIterator[T]":
"""Returns a local iterable for synchronous iteration.
New items will be fetched from the shards on-demand as the iterator
is stepped through.
This is the equivalent of batch_across_shards().flatten().
Examples:
>>> it = from_range(100, 1).gather_sync()
>>> next(it)
... 0
>>> next(it)
... 1
>>> next(it)
... 2
"""
it = self.batch_across_shards().flatten()
it.name = f"{self}.gather_sync()"
return it
def batch_across_shards(self) -> "LocalIterator[List[T]]":
"""Iterate over the results of multiple shards in parallel.
Examples:
>>> it = from_iterators([range(3), range(3)])
>>> next(it.batch_across_shards())
... [0, 0]
"""
def base_iterator(timeout=None):
active = []
for actor_set in self.actor_sets:
actor_set.init_actors()
active.extend(actor_set.actors)
futures = [a.par_iter_next.remote() for a in active]
while active:
try:
yield ray.get(futures, timeout=timeout)
futures = [a.par_iter_next.remote() for a in active]
# Always yield after each round of gets with timeout.
if timeout is not None:
yield _NextValueNotReady()
except TimeoutError:
yield _NextValueNotReady()
except StopIteration:
# Find and remove the actor that produced StopIteration.
results = []
for a, f in zip(list(active), futures):
try:
results.append(ray.get(f))
except StopIteration:
active.remove(a)
if results:
yield results
futures = [a.par_iter_next.remote() for a in active]
name = f"{self}.batch_across_shards()"
return LocalIterator(base_iterator, SharedMetrics(), name=name)
def gather_async(self, batch_ms=0, num_async=1) -> "LocalIterator[T]":
"""Returns a local iterable for asynchronous iteration.
New items will be fetched from the shards asynchronously as soon as
the previous one is computed. Items arrive in non-deterministic order.
Arguments:
batch_ms (int): Batches items for batch_ms milliseconds
on each shard before retrieving it.
Increasing batch_ms increases latency but improves throughput.
If this value is 0, then items are returned immediately.
num_async (int): The max number of async requests in flight
per actor. Increasing this improves the amount of pipeline
parallelism in the iterator.
Examples:
>>> it = from_range(100, 1).gather_async()
>>> next(it)
... 3
>>> next(it)
... 0
>>> next(it)
... 1
"""
if num_async < 1:
raise ValueError("queue depth must be positive")
if batch_ms < 0:
raise ValueError("batch time must be positive")
# Forward reference to the returned iterator.
local_iter = None
def base_iterator(timeout=None):
all_actors = []
for actor_set in self.actor_sets:
actor_set.init_actors()
all_actors.extend(actor_set.actors)
futures = {}
for _ in range(num_async):
for a in all_actors:
futures[a.par_iter_next_batch.remote(batch_ms)] = a
while futures:
pending = list(futures)
if timeout is None:
# First try to do a batch wait for efficiency.
ready, _ = ray.wait(pending, num_returns=len(pending), timeout=0)
# Fall back to a blocking wait.
if not ready:
ready, _ = ray.wait(pending, num_returns=1)
else:
ready, _ = ray.wait(
pending, num_returns=len(pending), timeout=timeout
)
for obj_ref in ready:
actor = futures.pop(obj_ref)
try:
local_iter.shared_metrics.get().current_actor = actor
batch = ray.get(obj_ref)
futures[actor.par_iter_next_batch.remote(batch_ms)] = actor
for item in batch:
yield item
except StopIteration:
pass
# Always yield after each round of wait with timeout.
if timeout is not None:
yield _NextValueNotReady()
name = f"{self}.gather_async()"
local_iter = LocalIterator(base_iterator, SharedMetrics(), name=name)
return local_iter
def take(self, n: int) -> List[T]:
"""Return up to the first n items from this iterator."""
return self.gather_sync().take(n)
def show(self, n: int = 20):
"""Print up to the first n items from this iterator."""
return self.gather_sync().show(n)
def union(self, other: "ParallelIterator[T]") -> "ParallelIterator[T]":
"""Return an iterator that is the union of this and the other."""
if not isinstance(other, ParallelIterator):
raise TypeError(
f"other must be of type ParallelIterator, got {type(other)}"
)
actor_sets = []
actor_sets.extend(self.actor_sets)
actor_sets.extend(other.actor_sets)
# if one of these iterators is a result of a repartition, we need to
# keep an explicit reference to its parent iterator
return ParallelIterator(
actor_sets,
f"ParallelUnion[{self}, {other}]",
parent_iterators=self.parent_iterators + other.parent_iterators,
)
def select_shards(self, shards_to_keep: List[int]) -> "ParallelIterator[T]":
"""Return a child iterator that only iterates over given shards.
It is the user's responsibility to ensure child iterators are operating
over disjoint sub-sets of this iterator's shards.
"""
if len(self.actor_sets) > 1:
raise ValueError("select_shards() is not allowed after union()")
if len(shards_to_keep) == 0:
raise ValueError("at least one shard must be selected")
old_actor_set = self.actor_sets[0]
new_actors = [
a for (i, a) in enumerate(old_actor_set.actors) if i in shards_to_keep
]
assert len(new_actors) == len(shards_to_keep), "Invalid actor index"
new_actor_set = _ActorSet(new_actors, old_actor_set.transforms)
return ParallelIterator(
[new_actor_set],
f"{self}.select_shards({len(shards_to_keep)} total)",
parent_iterators=self.parent_iterators,
)
def num_shards(self) -> int:
"""Return the number of worker actors backing this iterator."""
return sum(len(a.actors) for a in self.actor_sets)
def shards(self) -> List["LocalIterator[T]"]:
"""Return the list of all shards."""
return [self.get_shard(i) for i in range(self.num_shards())]
def get_shard(
self, shard_index: int, batch_ms: int = 0, num_async: int = 1
) -> "LocalIterator[T]":
"""Return a local iterator for the given shard.
The iterator is guaranteed to be serializable and can be passed to
remote tasks or actors.
Arguments:
shard_index (int): Index of the shard to gather.
batch_ms (int): Batches items for batch_ms milliseconds
before retrieving it.
Increasing batch_ms increases latency but improves throughput.
If this value is 0, then items are returned immediately.
num_async (int): The max number of requests in flight.
Increasing this improves the amount of pipeline
parallelism in the iterator.
"""
if num_async < 1:
raise ValueError("num async must be positive")
if batch_ms < 0:
raise ValueError("batch time must be positive")
a, t = None, None
i = shard_index
for actor_set in self.actor_sets:
if i < len(actor_set.actors):
a = actor_set.actors[i]
t = actor_set.transforms
break
else:
i -= len(actor_set.actors)
if a is None:
raise ValueError("Shard index out of range", shard_index, self.num_shards())
def base_iterator(timeout=None):
queue = collections.deque()
ray.get(a.par_iter_init.remote(t))
for _ in range(num_async):
queue.append(a.par_iter_next_batch.remote(batch_ms))
while True:
try:
batch = ray.get(queue.popleft(), timeout=timeout)
queue.append(a.par_iter_next_batch.remote(batch_ms))
for item in batch:
yield item
# Always yield after each round of gets with timeout.
if timeout is not None:
yield _NextValueNotReady()
except TimeoutError:
yield _NextValueNotReady()
except StopIteration:
break
name = self.name + f".shard[{shard_index}]"
return LocalIterator(base_iterator, SharedMetrics(), name=name)
class LocalIterator(Generic[T]):
"""An iterator over a single shard of data.
It implements similar transformations as ParallelIterator[T], but the
transforms will be applied locally and not remotely in parallel.
This class is **serializable** and can be passed to other remote
tasks and actors. However, it should be read from at most one process at
a time."""
# If a function passed to LocalIterator.for_each() has this method,
# we will call it at the beginning of each data fetch call. This can be
# used to measure the underlying wait latency for measurement purposes.
ON_FETCH_START_HOOK_NAME = "_on_fetch_start"
thread_local = threading.local()
def __init__(
self,
base_iterator: Callable[[], Iterable[T]],
shared_metrics: SharedMetrics,
local_transforms: List[Callable[[Iterable], Any]] = None,
timeout: int = None,
name=None,
):
"""Create a local iterator (this is an internal function).
Args:
base_iterator (func): A function that produces the base iterator.
This is a function so that we can ensure LocalIterator is
serializable.
shared_metrics (SharedMetrics): Existing metrics context or a new
context. Should be the same for each chained iterator.
local_transforms (list): A list of transformation functions to be
applied on top of the base iterator. When iteration begins, we
create the base iterator and apply these functions. This lazy
creation ensures LocalIterator is serializable until you start
iterating over it.
timeout (int): Optional timeout in seconds for this iterator, after
which _NextValueNotReady will be returned. This avoids
blocking.
name (str): Optional name for this iterator.
"""
assert isinstance(shared_metrics, SharedMetrics)
self.base_iterator = base_iterator
self.built_iterator = None
self.local_transforms = local_transforms or []
self.shared_metrics = shared_metrics
self.timeout = timeout
self.name = name or "unknown"
@staticmethod
def get_metrics() -> MetricsContext:
"""Return the current metrics context.
This can only be called within an iterator function."""
if (
not hasattr(LocalIterator.thread_local, "metrics")
or LocalIterator.thread_local.metrics is None
):
raise ValueError("Cannot access context outside an iterator.")
return LocalIterator.thread_local.metrics
def _build_once(self):
if self.built_iterator is None:
it = iter(self.base_iterator(self.timeout))
for fn in self.local_transforms:
it = fn(it)
self.built_iterator = it
@contextmanager
def _metrics_context(self):
self.thread_local.metrics = self.shared_metrics.get()
yield
def __iter__(self):
self._build_once()
return self.built_iterator
def __next__(self):
self._build_once()
return next(self.built_iterator)
def __str__(self):
return repr(self)
def __repr__(self):
return f"LocalIterator[{self.name}]"
def transform(self, fn: Callable[[Iterable[T]], Iterable[U]]) -> "LocalIterator[U]":
# TODO(ekl) can we automatically handle NextValueNotReady here?
def apply_transform(it):
for item in fn(it):
yield item
return LocalIterator(
self.base_iterator,
self.shared_metrics,
self.local_transforms + [apply_transform],
name=self.name + ".transform()",
)
def for_each(
self, fn: Callable[[T], U], max_concurrency=1, resources=None
) -> "LocalIterator[U]":
if max_concurrency == 1:
def apply_foreach(it):
for item in it:
if isinstance(item, _NextValueNotReady):
yield item
else:
# Keep retrying the function until it returns a valid
# value. This allows for non-blocking functions.
while True:
with self._metrics_context():
result = fn(item)
yield result
if not isinstance(result, _NextValueNotReady):
break
else:
if resources is None:
resources = {}
def apply_foreach(it):
cur = []
remote = ray.remote(fn).options(**resources)
remote_fn = remote.remote
for item in it:
if isinstance(item, _NextValueNotReady):
yield item
else:
if max_concurrency and len(cur) >= max_concurrency:
finished, cur = ray.wait(cur)
yield from ray.get(finished)
cur.append(remote_fn(item))
while cur:
finished, cur = ray.wait(cur)
yield from ray.get(finished)
if hasattr(fn, LocalIterator.ON_FETCH_START_HOOK_NAME):
unwrapped = apply_foreach
def add_wait_hooks(it):
it = unwrapped(it)
new_item = True
while True:
# Avoids calling on_fetch_start repeatedly if we are
# yielding _NextValueNotReady.
if new_item:
with self._metrics_context():
fn._on_fetch_start()
new_item = False
item = next(it)
if not isinstance(item, _NextValueNotReady):
new_item = True
yield item
apply_foreach = add_wait_hooks
return LocalIterator(
self.base_iterator,
self.shared_metrics,
self.local_transforms + [apply_foreach],
name=self.name + ".for_each()",
)
def filter(self, fn: Callable[[T], bool]) -> "LocalIterator[T]":
def apply_filter(it):
for item in it:
with self._metrics_context():
if isinstance(item, _NextValueNotReady) or fn(item):
yield item
return LocalIterator(
self.base_iterator,
self.shared_metrics,
self.local_transforms + [apply_filter],
name=self.name + ".filter()",
)
def batch(self, n: int) -> "LocalIterator[List[T]]":
def apply_batch(it):
batch = []
for item in it:
if isinstance(item, _NextValueNotReady):
yield item
else:
batch.append(item)
if len(batch) >= n:
yield batch
batch = []
if batch:
yield batch
return LocalIterator(
self.base_iterator,
self.shared_metrics,
self.local_transforms + [apply_batch],
name=self.name + f".batch({n})",
)
def flatten(self) -> "LocalIterator[T[0]]":
def apply_flatten(it):
for item in it:
if isinstance(item, _NextValueNotReady):
yield item
else:
for subitem in item:
yield subitem
return LocalIterator(
self.base_iterator,
self.shared_metrics,
self.local_transforms + [apply_flatten],
name=self.name + ".flatten()",
)
def shuffle(self, shuffle_buffer_size: int, seed: int = None) -> "LocalIterator[T]":
"""Shuffle items of this iterator
Args:
shuffle_buffer_size (int): The algorithm fills a buffer with
shuffle_buffer_size elements and randomly samples elements from
this buffer, replacing the selected elements with new elements.
For perfect shuffling, this argument should be greater than or
equal to the largest iterator size.
seed (int): Seed to use for
randomness. Default value is None.
Returns:
A new LocalIterator with shuffling applied
"""
shuffle_random = random.Random(seed)
def apply_shuffle(it):
buffer = []
for item in it:
if isinstance(item, _NextValueNotReady):
yield item
else:
buffer.append(item)
if len(buffer) >= shuffle_buffer_size:
yield buffer.pop(shuffle_random.randint(0, len(buffer) - 1))
while len(buffer) > 0:
yield buffer.pop(shuffle_random.randint(0, len(buffer) - 1))
return LocalIterator(
self.base_iterator,
self.shared_metrics,
self.local_transforms + [apply_shuffle],
name=self.name
+ ".shuffle(shuffle_buffer_size={}, seed={})".format(
shuffle_buffer_size, str(seed) if seed is not None else "None"
),
)
def combine(self, fn: Callable[[T], List[U]]) -> "LocalIterator[U]":
it = self.for_each(fn).flatten()
it.name = self.name + ".combine()"
return it
def zip_with_source_actor(self):
def zip_with_source(item):
metrics = LocalIterator.get_metrics()
if metrics.current_actor is None:
raise ValueError("Could not identify source actor of item")
return metrics.current_actor, item
it = self.for_each(zip_with_source)
it.name = self.name + ".zip_with_source_actor()"
return it
def take(self, n: int) -> List[T]:
"""Return up to the first n items from this iterator."""
out = []
for item in self:
out.append(item)
if len(out) >= n:
break
return out
def show(self, n: int = 20):
"""Print up to the first n items from this iterator."""
i = 0
for item in self:
print(item)
i += 1
if i >= n:
break
def duplicate(self, n) -> List["LocalIterator[T]"]:
"""Copy this iterator `n` times, duplicating the data.
The child iterators will be prioritized by how much of the parent
stream they have consumed. That is, we will not allow children to fall
behind, since that can cause infinite memory buildup in this operator.
Returns:
List[LocalIterator[T]]: child iterators that each have a copy
of the data of this iterator.
"""
if n < 2:
raise ValueError("Number of copies must be >= 2")
queues = []
for _ in range(n):
queues.append(collections.deque())
def fill_next(timeout):
self.timeout = timeout
item = next(self)
for q in queues:
q.append(item)
def make_next(i):
def gen(timeout):
while True:
my_len = len(queues[i])
max_len = max(len(q) for q in queues)
# Yield to let other iterators that have fallen behind
# process more items.
if my_len < max_len:
yield _NextValueNotReady()
else:
if len(queues[i]) == 0:
try:
fill_next(timeout)
except StopIteration:
return
yield queues[i].popleft()
return gen
iterators = []
for i in range(n):
iterators.append(
LocalIterator(
make_next(i),
self.shared_metrics,
[],
name=self.name + f".duplicate[{i}]",
)
)
return iterators
def union(
self,
*others: "LocalIterator[T]",
deterministic: bool = False,
round_robin_weights: List[float] = None,
) -> "LocalIterator[T]":
"""Return an iterator that is the union of this and the others.
Args:
deterministic (bool): If deterministic=True, we alternate between
reading from one iterator and the others. Otherwise we return
items from iterators as they become ready.
round_robin_weights (list): List of weights to use for round robin
mode. For example, [2, 1] will cause the iterator to pull twice
as many items from the first iterator as the second.
[2, 1, "*"] will cause as many items to be pulled as possible
from the third iterator without blocking. This overrides the
deterministic flag.
"""
for it in others:
if not isinstance(it, LocalIterator):
raise ValueError(f"other must be of type LocalIterator, got {type(it)}")
active = []
parent_iters = [self] + list(others)
shared_metrics = SharedMetrics(parents=[p.shared_metrics for p in parent_iters])
timeout = None if deterministic else 0
if round_robin_weights:
if len(round_robin_weights) != len(parent_iters):
raise ValueError(
"Length of round robin weights must equal number of "
"iterators total."
)
timeouts = [0 if w == "*" else None for w in round_robin_weights]
else:
timeouts = [timeout] * len(parent_iters)
round_robin_weights = [1] * len(parent_iters)
for i, it in enumerate(parent_iters):
active.append(
LocalIterator(
it.base_iterator,
shared_metrics,
it.local_transforms,
timeout=timeouts[i],
)
)
active = list(zip(round_robin_weights, active))
def build_union(timeout=None):
while True:
for weight, it in list(active):
if weight == "*":
max_pull = 100 # TOOD(ekl) how to best bound this?
else:
max_pull = _randomized_int_cast(weight)
try:
for _ in range(max_pull):
item = next(it)
if isinstance(item, _NextValueNotReady):
if timeout is not None:
yield item
break
else:
yield item
except StopIteration:
active.remove((weight, it))
if not active:
break
return LocalIterator(
build_union,
shared_metrics,
[],
name=f"LocalUnion[{self}, {', '.join(map(str, others))}]",
)
class ParallelIteratorWorker(object):
"""Worker actor for a ParallelIterator.
Actors that are passed to iter.from_actors() must subclass this interface.
"""
def __init__(self, item_generator: Any, repeat: bool):
"""Create an iterator worker.
Subclasses must call this init function.
Args:
item_generator (obj): A Python iterable or lambda function
that produces a generator when called. We allow lambda
functions since the generator itself might not be serializable,
but a lambda that returns it can be.
repeat (bool): Whether to loop over the iterator forever.
"""
def make_iterator():
if callable(item_generator):
return item_generator()
else:
return item_generator
if repeat:
def cycle():
while True:
it = iter(make_iterator())
if it is item_generator:
raise ValueError(
"Cannot iterate over {0} multiple times."
+ "Please pass in the base iterable or"
+ "lambda: {0} instead.".format(item_generator)
)
for item in it:
yield item
self.item_generator = cycle()
else:
self.item_generator = make_iterator()
self.transforms = []
self.local_it = None
self.next_ith_buffer = None
def par_iter_init(self, transforms):
"""Implements ParallelIterator worker init."""
it = LocalIterator(lambda timeout: self.item_generator, SharedMetrics())
for fn in transforms:
it = fn(it)
assert it is not None, fn
self.local_it = iter(it)
def par_iter_next(self):
"""Implements ParallelIterator worker item fetch."""
assert self.local_it is not None, "must call par_iter_init()"
return next(self.local_it)
def par_iter_next_batch(self, batch_ms: int):
"""Batches par_iter_next."""
batch = []
if batch_ms == 0:
batch.append(self.par_iter_next())
return batch
t_end = time.time() + (0.001 * batch_ms)
while time.time() < t_end:
try:
batch.append(self.par_iter_next())
except StopIteration:
if len(batch) == 0:
raise StopIteration
else:
pass
return batch
def par_iter_slice(self, step: int, start: int):
"""Iterates in increments of step starting from start."""
assert self.local_it is not None, "must call par_iter_init()"
if self.next_ith_buffer is None:
self.next_ith_buffer = collections.defaultdict(list)
index_buffer = self.next_ith_buffer[start]
if len(index_buffer) > 0:
return index_buffer.pop(0)
else:
for j in range(step):
try:
val = next(self.local_it)
self.next_ith_buffer[j].append(val)
except StopIteration:
pass
if not self.next_ith_buffer[start]:
raise StopIteration
return self.next_ith_buffer[start].pop(0)
def par_iter_slice_batch(self, step: int, start: int, batch_ms: int):
"""Batches par_iter_slice."""
batch = []
if batch_ms == 0:
batch.append(self.par_iter_slice(step, start))
return batch
t_end = time.time() + (0.001 * batch_ms)
while time.time() < t_end:
try:
batch.append(self.par_iter_slice(step, start))
except StopIteration:
if len(batch) == 0:
raise StopIteration
else:
pass
return batch
def _randomized_int_cast(float_value):
base = int(float_value)
remainder = float_value - base
if random.random() < remainder:
base += 1
return base
class _NextValueNotReady(Exception):
"""Indicates that a local iterator has no value currently available.
This is used internally to implement the union() of multiple blocking
local generators."""
pass
class _ActorSet(object):
"""Helper class that represents a set of actors and transforms."""
def __init__(
self,
actors: List["ray.actor.ActorHandle"],
transforms: List[Callable[["LocalIterator"], "LocalIterator"]],
):
self.actors = actors
self.transforms = transforms
def init_actors(self):
ray.get([a.par_iter_init.remote(self.transforms) for a in self.actors])
def with_transform(self, fn):
return _ActorSet(self.actors, self.transforms + [fn])
|
|
#! /usr/bin/env python
import sys ; sys.path.append('~/InstallingSoftware/pythons/')
import imagetools
from import_tools import *
fl=sys.argv[-1]
ending=""
#fl='/nfs/slac/g/ki/ki18/anja/SUBARU/MACS0416-24/W-S-Z+_2010-11-04/SCIENCE/SUPA0125892_7OCF.fits'
#crfl='/nfs/slac/g/ki/ki18/anja/SUBARU/eyes/data_filter_results/results_2.2/data_SCIENCE_cosmics/SEGMENTATION_BB_CRN-cosmics_MACS0416-24_W-S-Z+.SUPA0125892_7.fits'
header=pyfits.open(fl)[0].header
#OBJECT=header['MYOBJ']
OBJECT='MACS0416-24'
FILTER=header['FILTER']
CCDnum=header['IMAGEID']
BASE=os.path.basename(fl).split('OCF.')[0]
OFB='%s_%s_%s' % (OBJECT,FILTER,BASE,)
image=imagetools.GetImage(fl)
compare_dir='/nfs/slac/g/ki/ki18/anja/SUBARU/eyes/data_filter_results/results_2.2/data_SCIENCE_compare/'
plot_dir='/nfs/slac/g/ki/ki18/anja/SUBARU/eyes/data_filter_results/results_2.2/plot_SCIENCE_SS/'
BBCRfl='/nfs/slac/g/ki/ki18/anja/SUBARU/eyes/data_filter_results/results_2.2/data_SCIENCE_cosmics/SEGMENTATION_BB_CRNitschke.%s%s.fits' % (BASE,ending)
CR_segfl='/nfs/slac/g/ki/ki18/anja/SUBARU/eyes/data_filter_results/results_2.2/data_SCIENCE_cosmics/SEGMENTATION_CRNitshke.%s%s.fits' % (BASE,ending)
CR_filtfl='/nfs/slac/g/ki/ki18/anja/SUBARU/eyes/data_filter_results/results_2.2/data_SCIENCE_cosmics/FILTERED_CRNitschke.%s%s.fits' % (BASE,ending)
#fl_original=compare_dir+'BBout_ORIGINAL_%s_%s.%s.fits' % (OBJECT,FILTER,BASE)
fl_woblend=compare_dir+'BBout_WOblend.%s%s.fits' % (BASE,ending)
fl_revised=compare_dir+'BBrevised_*_BBCR.%s%s.fits' % (BASE,ending)
#fl_erase=compare_dir+'BB_ERASED_'+bthresh1_tag+'_BBCR_%s_%s.%s.fits' % (OBJECT,FILTER,BASE)
#fl_revised=compare_dir+'BBrevised_'+bthresh1_tag+'_BBCR_%s_%s.%s.fits' % (OBJECT,FILTER,BASE)
#im_erased=pyfits.open(fl_erased)[0].data
#im_erased.max()
BBCRseg=imagetools.GetImage(BBCRfl)
BBCRseg=asarray(BBCRseg,dtype=int)
crheader=pyfits.open(BBCRfl)[0].header
seeing=crheader['MYSEEING']
seeing_str=('%.3f' % (seeing)).replace('0.','pt')
OFB_seeing=OFB.replace('SUPA',seeing_str+'_SUPA')
filtim=imagetools.GetImage(CR_filtfl)
## get properties of the masks
import skimage
from skimage import measure
cr_regs=skimage.measure.regionprops(label_image=BBCRseg, intensity_image=image)
cr_labels=arange(BBCRseg.max(),dtype=int)+1
cr_e=asarray([cr_regs[i-1].eccentricity for i in cr_labels])
cr_diam=asarray([cr_regs[i-1].equivalent_diameter for i in cr_labels])
cr_solidity=asarray([cr_regs[i-1].solidity for i in cr_labels])
cr_max=asarray([cr_regs[i-1].max_intensity for i in cr_labels])
cr_mean=asarray([cr_regs[i-1].mean_intensity for i in cr_labels])
cr_area=asarray([cr_regs[i-1].area for i in cr_labels])
conn8=ones((3,3),dtype=bool)
CRslices=scipy.ndimage.find_objects(BBCRseg)
def cr_any_label(labels):
boolim=zeros(BBCRseg.shape,dtype=bool)
for l in labels:
boolim+=BBCRseg==l
return boolim
## see if skewness and kurtosis does anything
def skew_kurt_2D(Z):
h,w = np.shape(Z)
x = range(w)
y = range(h)
X,Y = np.meshgrid(x,y)
#Centroid (mean)
cx = np.sum(Z*X)/np.sum(Z)
cy = np.sum(Z*Y)/np.sum(Z)
###Standard deviation
x2 = (range(w) - cx)**2
y2 = (range(h) - cy)**2
X2,Y2 = np.meshgrid(x2,y2)
#Find the variance
vx = np.sum(Z*X2)/np.sum(Z)
vy = np.sum(Z*Y2)/np.sum(Z)
#SD is the sqrt of the variance
sx,sy = np.sqrt(vx),np.sqrt(vy)
###Skewness
x3 = (range(w) - cx)**3
y3 = (range(h) - cy)**3
X3,Y3 = np.meshgrid(x3,y3)
#Find the thid central moment
m3x = np.sum(Z*X3)/np.sum(Z)
m3y = np.sum(Z*Y3)/np.sum(Z)
#Skewness is the third central moment divided by SD cubed
skx = m3x/sx**3
sky = m3y/sy**3
###Kurtosis
x4 = (range(w) - cx)**4
y4 = (range(h) - cy)**4
X4,Y4 = np.meshgrid(x4,y4)
#Find the fourth central moment
m4x = np.sum(Z*X4)/np.sum(Z)
m4y = np.sum(Z*Y4)/np.sum(Z)
#Kurtosis is the fourth central moment divided by SD to the fourth power
kx = m4x/sx**4
ky = m4y/sy**4
#Centroid x: cx #Centroid y: cy
#StdDev x: sx #StdDev y: sy
#Skewness x: skx #Skewness y: sky
#Kurtosis x: kx #Kurtosis y: ky
return skx,sky,kx,ky
cr_skxs,cr_skys,cr_kxs,cr_kys=[],[],[],[]
## make the final cuts
MaxInside8s=[]
removed_labels=[]
for i,sl in enumerate(CRslices):
l=i+1
spots=BBCRseg[sl]==l
patch=image[sl]
max_pos_pt=scipy.ndimage.measurements.maximum_position(patch,spots)
max_spot=zeros(patch.shape,dtype=bool)
max_spot[max_pos_pt]=1
#now make sure max isn't on the edge and is in an open8 portion
insides_spots=scipy.ndimage.binary_erosion(spots,conn4)
open8_spots=scipy.ndimage.binary_opening(spots,conn8)
MaxInside8=(max_spot*insides_spots*open8_spots).any()
MaxInside8s.append(MaxInside8)
#now get clipped eccentricity
clip_spots=binary_propagation(max_spot,mask=spots)
try:
reg=skimage.measure.regionprops(clip_spots)[0]
except TypeError:
if 1 in clip_spots.shape:
cr_skxs.append(nan);cr_skys.append(nan);cr_kxs.append(nan);cr_kys.append(nan)
continue
e_clip=reg.eccentricity
e_orig=cr_e[i]
#now get skewness and kurtosis
skx,sky,kx,ky=skew_kurt_2D(patch-patch.min())
cr_skxs.append(skx);cr_skys.append(sky);cr_kxs.append(kx);cr_kys.append(ky)
if e_clip>e_orig:
cr_e[i]=e_clip
if e_clip>.8 and e_orig<.8:
removed_labels.append(l)
#######Xspots_starlike=cr_any_label(cr_labels[starlike])
#######Xspots_not_starlike=cr_any_label(cr_labels[logical_not(starlike)])
#######CLIPseg,CLIPseg_Nlabels=scipy.ndimage.label(Xspots_starlike,conn8)
#######CLIPslices=scipy.ndimage.find_objects(CLIPseg)
########f=figure()
########skx,sky,kx,ky=skew_kurt_2D(sp);f.add_subplot(321);title('sp: skx=%.2f,sky=%.2f,kx=%.2f,ky=%.2f' % (skx,sky,kx,ky));imshow(sp,interpolation='nearest',origin='lower left')
########skx,sky,kx,ky=skew_kurt_2D(sp);f.add_subplot(322);title('sp: skx=%.2f,sky=%.2f,kx=%.2f,ky=%.2f' % (skx,sky,kx,ky));imshow(sp,interpolation='nearest',origin='lower left')
########skx,sky,kx,ky=skew_kurt_2D(p);f.add_subplot(323);title('p: skx=%.2f,sky=%.2f,kx=%.2f,ky=%.2f' % (skx,sky,kx,ky));imshow(p,interpolation='nearest',origin='lower left')
########skx,sky,kx,ky=skew_kurt_2D(pppp);f.add_subplot(324);title('pppp: skx=%.2f,sky=%.2f,kx=%.2f,ky=%.2f' % (skx,sky,kx,ky));imshow(pppp,interpolation='nearest',origin='lower left')
########skx,sky,kx,ky=skew_kurt_2D(pp);f.add_subplot(325);title('pp: skx=%.2f,sky=%.2f,kx=%.2f,ky=%.2f' % (skx,sky,kx,ky));imshow(pp,interpolation='nearest',origin='lower left')
########skx,sky,kx,ky=skew_kurt_2D(ppp);f.add_subplot(326);title('ppp: skx=%.2f,sky=%.2f,kx=%.2f,ky=%.2f' % (skx,sky,kx,ky));imshow(ppp,interpolation='nearest',origin='lower left')
########show()
cr_skxs=asarray(cr_skxs).__abs__();cr_skys=asarray(cr_skys).__abs__();cr_kxs=asarray(cr_kxs).__abs__();cr_kys=asarray(cr_kys).__abs__()
cr_kmax=asarray([max(ky,kx) for ky,kx in zip(cr_kys,cr_kxs)])
cr_skmax=asarray([max(sky,skx) for sky,skx in zip(cr_skys,cr_skxs)])
MaxInside8s=asarray(MaxInside8s)
removed_labels=asarray(removed_labels)
starlike=(cr_e<.8)*(cr_area>9)*(cr_area<50)*(cr_max<30000)*MaxInside8s*(cr_kmax<4.1)*(cr_skmax<.88)
starlike_labels=cr_labels[starlike]
Xspots_starlike=cr_any_label(starlike_labels)
Xspots_not_starlike=cr_any_label(cr_labels[logical_not(starlike)])
## save plots of star postage stamps and things that missed the cut
#params=['e=%.2f , area=%i' % (cr_e[i],cr_area[i]) for i in cr_labels-1]
#params=['skxy=%.2f/%.2f|kxy=%.2f/%.2f' % (cr_skxs[i],cr_skys[i],cr_kxs[i],cr_kys[i]) for i in cr_labels-1]
params=['sk=%.2f|k=%.2f' % (cr_skmax[i],cr_kmax[i]) for i in cr_labels-1]
import img_scale
zmin,zmax,ziter=img_scale.range_from_zscale(image,contrast=.25)
fig=imagetools.plotlabels(ll=starlike_labels,segments=BBCRseg,slices=CRslices,params=params,background=image,zscale=(zmin,zmax))
fig.suptitle('Possible Stars Picked from blocked_blender.2.2.py Masks\neccentricity<.8 & 9<area<50 & max intensity<30,000 & 3x3 inside mask shape')
fig.savefig(plot_dir+'pltSS_Star_Candidates_'+OFB_seeing)
if len(removed_labels):
fig=imagetools.plotlabels(ll=removed_labels,segments=BBCRseg,slices=CRslices,params=params,background=image,zscale=(zmin,zmax))
fig.suptitle('Not Starlike: eccentricity<.8 & when clipped eccentricity>.8')
fig.savefig(plot_dir+'pltSS_Star_Candidates-Removed-Clip_e_raise_'+OFB_seeing)
starlike_not8=(cr_e<.8)*(cr_area>5)*(cr_area<50)*(cr_max<30000)*logical_not(MaxInside8s)*(cr_kmax<4.1)*(cr_skmax<.88)
if starlike_not8.any():
fig=imagetools.plotlabels(cr_labels[starlike_not8],segments=BBCRseg,slices=CRslices,params=params,background=image,zscale=(zmin,zmax))
fig.suptitle('Not Starlike: Would be starlike, but no conn8 in the shape')
fig.savefig(plot_dir+'pltSS_Star_Candidates-Removed-open8_'+OFB_seeing)
starlike_e=(cr_e>.8)*(cr_e<.84)*(cr_area>9)*(cr_area<50)*(cr_max<30000)*MaxInside8s*(cr_kmax<4.1)*(cr_skmax<.88)
if starlike_e.any():
fig=imagetools.plotlabels(ll=cr_labels[starlike_e],segments=BBCRseg,slices=CRslices,params=params,background=image,zscale=(zmin,zmax))
fig.suptitle('Not Starlike: .8<eccentricity<.84')
fig.savefig(plot_dir+'pltSS_Star_Candidates-Removed-e_to_pt84_'+OFB_seeing)
starlike_gt30000=(cr_e<.8)*(cr_area>9)*(cr_area<50)*MaxInside8s*(cr_max>30000)*(cr_kmax<4.1)*(cr_skmax<.88)
if starlike_gt30000.any():
fig=imagetools.plotlabels(ll=cr_labels[starlike_gt30000],segments=BBCRseg,slices=CRslices,params=params,background=image,zscale=(zmin,zmax))
fig.suptitle('Not Starlike: Greater than 30,000')
fig.savefig(plot_dir+'pltSS_Star_Candidates-Removed-greater_than_30000_'+OFB_seeing)
starlike_skew_kurt=(cr_e<.8)*(cr_area>9)*(cr_area<50)*(cr_max<30000)*MaxInside8s*((cr_kmax>=4.1)+(cr_skmax>=.88))
if starlike_skew_kurt.any():
print 'skewness and kurtosis cut removed: ',starlike_skew_kurt.sum()
fig=imagetools.plotlabels(ll=cr_labels[starlike_skew_kurt],segments=BBCRseg,slices=CRslices,params=params,background=image,zscale=(zmin,zmax))
fig.suptitle('Not Starlike: too skewed or large kurtosis')
fig.savefig(plot_dir+'pltSS_Star_Candidates-Removed-skew_kurt'+OFB_seeing)
#f=imagetools.ImageWithSpots([image,filtim],Xspots_starlike,name1='image',name2='filtered image',nameX='Possible Stars',ignore_scale=True,mode='box')
#f.savefig(plot_dir+'pltSS_Star_Candidates-full_image_'+OFB_seeing)
## Save KeepOrRM image and the final image with masks included
KeepOrRM=zeros(Xspots_starlike.shape,dtype=int)
KeepOrRM[Xspots_starlike]=-1
KeepOrRM[Xspots_not_starlike]=1
hdu=pyfits.PrimaryHDU(asarray(KeepOrRM,dtype=int))
hdu.header=crheader
fl_KeepOrRM=BBCRfl.replace('SEGMENTATION_BB_CRNitschke','SEGMENTATION_KeepOrRM-starlike_cosmics')
hdu.writeto(fl_KeepOrRM,clobber=False)
final_im=image.copy()
final_im[Xspots_not_starlike]=0
hdu=pyfits.PrimaryHDU(asarray(final_im,dtype=float))
hdu.header=crheader
fl_final=BBCRfl.replace('SEGMENTATION_BB_CRNitschke','StarRMout_KeepOrRM-purified_cosmics')
hdu.writeto(fl_final,clobber=False)
files2check=[fl,fl_woblend,fl_revised,fl_KeepOrRM,fl_final]
print '\nds9 -zscale -tile mode column '+' '.join(files2check)+' -zscale -lock frame image -lock crosshair image -geometry 2000x2000 &'
## plot star column in eccentricity vs. diameter space
from matplotlib import collections
fig, ax_d = subplots(figsize=(14,11))
ax_d.plot(cr_e, cr_diam, 'b.')
ax_d.plot(cr_e[starlike], cr_diam[starlike], 'bo')
star_e,star_diam,star_area=(cr_e[starlike], cr_diam[starlike], cr_area[starlike])
median_star_area=median(star_area)
median_star_diam=median(star_diam)
fwhm=seeing/.202 #convert to pixels
star_diam_fwhm_ratio=median_star_diam/fwhm
fig.suptitle('Plot of eccentricity vs. effective diameter (blue) or vs. area (red) \n Seeing=%.2f" & FWHM Star = %.2f pixels & Median Diameter = %.2f & Ratio FWHM/Median(Diam)=%.2f' % (seeing,fwhm,median_star_diam,star_diam_fwhm_ratio))
ax_d.set_xlabel('eccentricity')
# Make the y-axis label and tick labels match the line color.
ax_d.set_ylabel(r'Effective Diameter = $\sqrt{4/\pi \times area}$', color='b')
for tl in ax_d.get_yticklabels():
tl.set_color('b')
ax_a = ax_d.twinx()
ax_a.plot(cr_e, cr_area, 'r.')
ax_a.plot(cr_e[starlike], cr_area[starlike], 'ro')
ax_a.set_ylabel('Area [pixels]', color='r')
for tl in ax_a.get_yticklabels():
tl.set_color('r')
collection_a = collections.BrokenBarHCollection(xranges=[(0,.8)],yrange=[9,50],facecolor='red', alpha=0.5)
collection_d = collections.BrokenBarHCollection(xranges=[(0,.8)],yrange=[sqrt(4/pi*9),sqrt(4/pi*50)],facecolor='blue', alpha=0.5)
ax_a.add_collection(collection_a)
ax_d.add_collection(collection_d)
fig.savefig(plot_dir+'pltSS_e_vs_diam_and_area_'+OFB_seeing)
## print stats
print "\nfor fl: %s \n\tseeing=%.2f" % (fl,seeing)
CRseg_tot_num=BBCRseg.max()
CRseg_removed_stars=starlike.sum()
print "\t# CR masks started with: %s\n\t# CR masks finished with: %s\n\t# CR masks deleted/starlike: %s" % (CRseg_tot_num,CRseg_tot_num-CRseg_removed_stars,CRseg_removed_stars)
print "\nBBCR_stats",BASE,CRseg_removed_stars
## save SEGMENTATION_BBSS_CRN-cosmics (the new file that replaces SEGMENTATION_BB_CRN-cosmics in the pipeline)
BBSSCRseg,BBSSCRseg_Nlabels=scipy.ndimage.label(Xspots_not_starlike,conn8)
hdu=pyfits.PrimaryHDU(data=BBSSCRseg,header=crheader)
BBSSCRfl=BBCRfl.replace('SEGMENTATION_BB_CRNitschke','SEGMENTATION_BBSS_CRNitschke')
hdu.writeto(BBSSCRfl,clobber=False)
|
|
import matplotlib.pyplot as plt
import numpy as np
import pathlib
import scipy.interpolate
import sys
import typing
import vorpy.pickle
def zero_crossing_times (t_v:np.ndarray, f_v:np.ndarray, *, orientation:int=0) -> np.ndarray:
"""
Returns piecewise-linearly-computed approximations of the actual zero crossing times of the function (t,f(t))
(where the function defined on the elements of t_v by t_v[i] |-> f_v[i]), and the pairs of indices [of t_v]
that the zero crossings occur between.
The orientation parameter may be used to specify the orientation of zero crossings to return.
- orientation == 0 : return all zero crossings
- orientation < 0 : return negatively oriented zero crossings (where the function goes from positive to negative)
- orientation > 0 : return positively oriented zero crossings (where the function goes from negative to positive)
"""
if len(t_v) != len(f_v):
raise TypeError(f'expected len(t_v) == len(f_v), but got len(t_v) = {len(t_v)} and len(f_v) = {len(f_v)}')
# zc stands for zero crossing.
# Non-positive elements of this indicate a zero crossing.
zc_discriminant_v = f_v[:-1] * f_v[1:]
# Consider only strictly negative discriminant as indicating a zero crossing. This will not pick up
# cases where there is a repeated zero, or where the function touches but doesn't cross zero.
zc_v = zc_discriminant_v < 0
zc_index_v = np.where(zc_v)[0]
assert np.all(zc_index_v < len(t_v)-1)
if orientation != 0:
zc_orientation_v = np.sign(f_v[zc_index_v+1] - f_v[zc_index_v])
assert np.all(zc_orientation_v != 0), 'this should be true by construction (following the zc_discriminant_v < 0 condition)'
zc_index_v = zc_index_v[zc_orientation_v == np.sign(orientation)]
assert np.all(np.sign(f_v[zc_index_v+1]) != np.sign(f_v[zc_index_v]))
assert np.all(f_v[zc_index_v+1]*f_v[zc_index_v] < 0), 'this should be equivalent to the sign check, but is done using discriminant'
if orientation != 0:
assert np.all(np.sign(f_v[zc_index_v+1]) == np.sign(orientation))
zc_index_pair_t = np.ndarray((len(zc_index_v),2), dtype=int)
zc_index_pair_t[:,0] = zc_index_v
zc_index_pair_t[:,1] = zc_index_v+1
assert np.all(zc_index_pair_t < len(t_v)), 'each element of zc_index_pair_t should be a valid index for both t_v and f_v'
# Make tensors quantifying the intervals containing the zero crossings.
# Note here that because zc_index_pair_t is a 2-tensor, and t_v and f_v are 1-tensors,
# zc_interval_t_v and zc_interval_f_v will be a 2-tensor whose rows are the interval bounds.
zc_interval_t_v = t_v[zc_index_pair_t]
zc_interval_f_v = f_v[zc_index_pair_t]
assert zc_interval_t_v.shape == (len(zc_index_v),2)
assert zc_interval_f_v.shape == (len(zc_index_v),2)
# For each zero crossing, use a piecewise linear interpolation of f_v to solve for a better
# approximation of the exact time it crosses zero.
zc_t_delta_v = np.diff(zc_interval_t_v, axis=1).reshape(-1)
zc_f_delta_v = np.diff(zc_interval_f_v, axis=1).reshape(-1)
zc_t_v = zc_interval_t_v[:,0] - zc_interval_f_v[:,0]*zc_t_delta_v/zc_f_delta_v
## Numerical sanity check (the bound is based on the max number encountered in the solution for the respective component of zc_t_v).
#assert np.all(np.interp(zc_t_v, t_v, f_v) < 1.0e-8*np.max(zc_interval_f_v, axis=1))
return zc_t_v, zc_index_pair_t
def critical_points (t_v:np.ndarray, f_v:np.ndarray, *, orientation:int=0) -> np.ndarray:
"""
Returns a tensor C of shape (k,2), where the ith critical point (t_i,f_i) is (C[i,0], C[i,1]), and the pairs of
indices [of t_v] that the critical points occur between.
"""
if len(t_v) != len(f_v):
raise TypeError(f'expected len(t_v) == len(f_v), but got len(t_v) = {len(t_v)} and len(f_v) = {len(f_v)}')
# Use a symmetric definition of derivative.
discrete_deriv_f_v = (f_v[2:] - f_v[:-2]) / (t_v[2:] - t_v[:-2])
critical_point_t_v, critical_point_index_pair_t = zero_crossing_times(t_v[1:-1], discrete_deriv_f_v, orientation=orientation)
critical_point_t = np.ndarray((len(critical_point_t_v),2), dtype=critical_point_t_v.dtype)
critical_point_t[:,0] = critical_point_t_v
critical_point_t[:,1] = np.interp(critical_point_t_v, t_v, f_v)
return critical_point_t, critical_point_index_pair_t
def local_maxima (t_v:np.ndarray, f_v:np.ndarray) -> np.ndarray:
return critical_points(t_v, f_v, orientation=-1)
def local_minima (t_v:np.ndarray, f_v:np.ndarray) -> np.ndarray:
return critical_points(t_v, f_v, orientation=1)
def compute_lambda_v (x_v:np.ndarray, *, name_o:typing.Optional[str]=None) -> np.ndarray:
prefix = '' if name_o is None else f'{name_o} '
pos_diff_v = np.diff(x_v)
lambda_v = pos_diff_v[1:] / pos_diff_v[:-1]
print(f'{prefix}lambda_v = {lambda_v}')
if len(lambda_v) > 0:
lambda_range = (np.min(lambda_v), np.max(lambda_v))
lambda_range_size = lambda_range[1] - lambda_range[0]
else:
lambda_range = (np.nan, np.nan)
lambda_range_size = np.nan
print(f'{prefix}lambda_v in range {lambda_range}')
print(f'{prefix}lambda_v range size = {lambda_range_size}')
return lambda_v, lambda_range, lambda_range_size
def main (*, pickle_p:pathlib.Path, plot_p:pathlib.Path, plot_momentum=False) -> None:
data_d = vorpy.pickle.unpickle(pickle_filename=pickle_p, log_out=sys.stdout)
results = data_d['results']
t_v = results.t_v
R_v = results.y_t[:,0,0]
p_R_v = results.y_t[:,1,0]
R_local_maximum_t, _ = local_maxima(t_v, R_v)
R_lambda_v, R_lambda_range, R_lambda_range_size = compute_lambda_v(R_local_maximum_t[:,0], name_o='R')
#R_quasiperiod = R_local_maximum_t[1,0] - R_local_maximum_t[0,0]
theta_v = results.y_t[:,0,1]
p_theta_v = results.y_t[:,1,1]
#theta_critical_point_t, _ = critical_points(t_v, theta_v)
theta_local_maximum_t, _ = local_maxima(t_v, theta_v)
theta_local_minimum_t, _ = local_minima(t_v, theta_v)
theta_lambda_v, theta_lambda_range, theta_lambda_range_size = compute_lambda_v(theta_local_maximum_t[:,0], name_o='theta')
#theta_quasiperiod = theta_local_maximum_t[1,0] - theta_local_maximum_t[0,0]
w_v = results.y_t[:,0,2]
w_zero_crossing_v, _ = zero_crossing_times(t_v, w_v)
w_zero_crossing_pos_v, _ = zero_crossing_times(t_v, w_v, orientation=1)
w_zero_crossing_neg_v, _ = zero_crossing_times(t_v, w_v, orientation=-1)
w_lambda_v, w_lambda_range, w_lambda_range_size = compute_lambda_v(w_zero_crossing_pos_v, name_o='w')
p_w_v = results.y_t[:,1,2]
rho_v = np.sqrt(R_v**2 + w_v**2)
J_v = 2*(R_v*p_R_v + w_v*p_w_v)
J_initial = J_v[0]
J_mean = np.mean(J_v)
sqrt_R_initial = np.sqrt(R_v[0])
P_R_initial = 2.0*sqrt_R_initial*p_R_v[0]
P_theta_initial = p_theta_v[0]/sqrt_R_initial + 2.0*sqrt_R_initial*p_w_v[0]
H_initial = (P_R_initial**2 + P_theta_initial**2)/2 - 1.0/(8.0*np.pi*np.sqrt(R_v[0]**2 + w_v[0]**2))
# Collate lambda values
lambda_v = []
if R_lambda_range_size < 1.0e-4:
lambda_v.extend(R_lambda_v)
if theta_lambda_range_size < 1.0e-4:
lambda_v.extend(theta_lambda_v)
if w_lambda_range_size < 1.0e-4:
lambda_v.extend(w_lambda_v)
lambda_v = np.array(lambda_v)
if len(lambda_v) > 0:
lambda_range = (np.min(lambda_v), np.max(lambda_v))
lambda_range_size = np.diff(lambda_range)
else:
lambda_range = (np.nan, np.nan)
lambda_range_size = np.nan
if np.isfinite(lambda_range_size) and lambda_range_size < 1.0e-4 and len(w_zero_crossing_pos_v) >= 2:
lam = np.mean(lambda_v)
## Record lambda vs J_initial
#vorpy.pickle.pickle(
#data=dict(
#coordinates_name='QuadraticCylindrical',
#qp_initial=results.y_t[0],
#lam=lam,
#J_initial=J_initial,
#),
#pickle_filename=str(pickle_p)+'.J_vs_lam.pickle',
#log_out=sys.stdout,
#)
quasiperiod_t_range = (w_zero_crossing_pos_v[0], w_zero_crossing_pos_v[1])
quasiperiod = np.diff(quasiperiod_t_range)[0]
theta_delta = theta_local_maximum_t[1,1] - theta_local_maximum_t[0,1]
y_t_interpolator = scipy.interpolate.interp1d(t_v, results.y_t, axis=0)
extrapolated_t_v = np.linspace(quasiperiod_t_range[0], quasiperiod_t_range[1], 10000)
extrapolated_y_t = y_t_interpolator(extrapolated_t_v)
extrapolated_R_v = extrapolated_y_t[:,0,0]
extrapolated_theta_v = extrapolated_y_t[:,0,1]
extrapolated_w_v = extrapolated_y_t[:,0,2]
extrapolated_p_R_v = extrapolated_y_t[:,1,0]
extrapolated_p_theta_v = extrapolated_y_t[:,1,1]
extrapolated_p_w_v = extrapolated_y_t[:,1,2]
# Transform the extrapolated curve
extrapolated_t_v -= quasiperiod_t_range[0]
extrapolated_t_v *= lam
extrapolated_t_v += quasiperiod_t_range[1]
#extrapolated_R_v[:] = 0.5*np.log(lam*np.exp(2.0*extrapolated_R_v))
extrapolated_R_v *= lam
extrapolated_theta_v += theta_delta
extrapolated_w_v *= lam
extrapolated_p_R_v /= lam
extrapolated_p_w_v /= lam
# TODO: extrapolate momentum
# Sample the actual solution curve at the extrapolated time values and compare.
valid_t_mask_v = extrapolated_t_v <= t_v[-1]
valid_t_v = extrapolated_t_v[valid_t_mask_v]
sampled_y_t = y_t_interpolator(valid_t_v)
extrapolation_error_v = np.max(np.abs(sampled_y_t[valid_t_mask_v,:,:] - extrapolated_y_t[valid_t_mask_v,:,:]), axis=0)
extrapolation_error = np.max(extrapolation_error_v)
print(f'\n\nextrapolation_error_v = {extrapolation_error_v}\n\n')
print(f'\n\nextrapolation_error = {extrapolation_error}\n\n')
else:
print('NO UNIQUE LAMBDA, SKIPPING QUASI-PERIODIC SOLUTION SOLVE')
lam = None
extrapolated_t_v = None
extrapolated_y_t = None
extrapolation_error = None
row_count = 2 if plot_momentum else 1
col_count = 2
#size = 8
size = 5
fig,axis_vv = plt.subplots(row_count, col_count, squeeze=False, figsize=(size*col_count,size*row_count))
qp_t = np.ndarray((len(t_v),2,3), dtype=float)
# Change coordinates back into Cartesian
sqrt_R_v = np.sqrt(R_v)
qp_t[:,0,0] = sqrt_R_v*np.cos(theta_v)
qp_t[:,0,1] = sqrt_R_v*np.sin(theta_v)
qp_t[:,0,2] = w_v
qp_t[:,1,0] = 2*sqrt_R_v*p_R_v*np.cos(theta_v) - p_theta_v*np.sin(theta_v)/sqrt_R_v
qp_t[:,1,1] = 2*sqrt_R_v*p_R_v*np.sin(theta_v) + p_theta_v*np.cos(theta_v)/sqrt_R_v
qp_t[:,1,2] = p_w_v
# Sanity check
euclidean_J_v = qp_t[:,0,0]*qp_t[:,1,0] + qp_t[:,0,1]*qp_t[:,1,1] + 2*qp_t[:,0,2]*qp_t[:,1,2]
J_v_error = np.max(np.abs(euclidean_J_v - J_v))
print(f'J_v_error = {J_v_error}')
qp_t_interpolator = scipy.interpolate.interp1d(t_v, qp_t, axis=0)
if extrapolated_y_t is not None:
extrapolated_qp_t = np.ndarray((len(extrapolated_t_v),2,3), dtype=float)
# Change coordinates back into cylindrical
extrapolated_R_v = extrapolated_y_t[:,0,0]
extrapolated_p_R_v = extrapolated_y_t[:,1,0]
extrapolated_theta_v = extrapolated_y_t[:,0,1]
extrapolated_p_theta_v = extrapolated_y_t[:,1,1]
extrapolated_w_v = extrapolated_y_t[:,0,2]
extrapolated_p_w_v = extrapolated_y_t[:,1,2]
# Change coordinates back into Cartesian
sqrt_extrapolated_R_v = np.sqrt(extrapolated_R_v)
extrapolated_qp_t[:,0,0] = sqrt_extrapolated_R_v*np.cos(extrapolated_theta_v)
extrapolated_qp_t[:,0,1] = sqrt_extrapolated_R_v*np.sin(extrapolated_theta_v)
extrapolated_qp_t[:,0,2] = extrapolated_w_v
extrapolated_qp_t[:,1,0] = 2*sqrt_extrapolated_R_v*extrapolated_p_R_v*np.cos(extrapolated_theta_v) - extrapolated_p_theta_v*np.sin(extrapolated_theta_v)/sqrt_extrapolated_R_v
extrapolated_qp_t[:,1,1] = 2*sqrt_extrapolated_R_v*extrapolated_p_R_v*np.sin(extrapolated_theta_v) + extrapolated_p_theta_v*np.cos(extrapolated_theta_v)/sqrt_extrapolated_R_v
extrapolated_qp_t[:,1,2] = extrapolated_p_w_v
source_t_mask_v = (quasiperiod_t_range[0] <= t_v) & (t_v <= quasiperiod_t_range[1])
else:
extrapolated_qp_t = None
axis = axis_vv[0][0]
#axis.set_title(f'Plot of (x(t),y(t))\nInitial conditions (x,y,z,p_x,p_y,p_z):\n{tuple(qp_t[0,:,:].reshape(-1).tolist())}\nPurple segment: source fundamental domain\nOrange segment: extrapolated fundamental domain')
axis.set_aspect(1.0)
axis.plot([0], [0], '.', color='black')
axis.plot(qp_t[:,0,0], qp_t[:,0,1])
if extrapolated_qp_t is not None:
axis.plot(qp_t[source_t_mask_v,0,0], qp_t[source_t_mask_v,0,1], color='purple')
axis.plot(extrapolated_qp_t[:,0,0], extrapolated_qp_t[:,0,1], color='orange')
# Make the plot square
axis_xlim_old = axis.get_xlim()
axis_ylim_old = axis.get_ylim()
axis_x_size = abs(axis_xlim_old[1] - axis_xlim_old[0])
axis_y_size = abs(axis_ylim_old[1] - axis_ylim_old[0])
axis_size = max(axis_x_size, axis_y_size)
if axis_x_size < axis_size:
difference = axis_size - axis_x_size
axis.set_xlim(axis_xlim_old[0]-difference/2.0, axis_xlim_old[1]+difference/2.0)
if axis_y_size < axis_size:
difference = axis_size - axis_y_size
axis.set_ylim(axis_ylim_old[0]-difference/2.0, axis_ylim_old[1]+difference/2.0)
axis = axis_vv[1][0]
#axis.set_title(f'(p_x(t),p_y(t))\npurple: source fund. domain\norange: extrap\'ed fund. domain')
axis.set_aspect(1.0)
axis.plot([0], [0], '.', color='black')
axis.plot(qp_t[:,1,0], qp_t[:,1,1])
if extrapolated_qp_t is not None:
axis.plot(qp_t[source_t_mask_v,1,0], qp_t[source_t_mask_v,1,1], color='purple')
axis.plot(extrapolated_qp_t[:,1,0], extrapolated_qp_t[:,1,1], color='orange')
# Make the plot square
axis_xlim_old = axis.get_xlim()
axis_ylim_old = axis.get_ylim()
axis_x_size = abs(axis_xlim_old[1] - axis_xlim_old[0])
axis_y_size = abs(axis_ylim_old[1] - axis_ylim_old[0])
axis_size = max(axis_x_size, axis_y_size)
if axis_x_size < axis_size:
difference = axis_size - axis_x_size
axis.set_xlim(axis_xlim_old[0]-difference/2.0, axis_xlim_old[1]+difference/2.0)
if axis_y_size < axis_size:
difference = axis_size - axis_y_size
axis.set_ylim(axis_ylim_old[0]-difference/2.0, axis_ylim_old[1]+difference/2.0)
#axis = axis_vv[0][1]
#axis.set_title(f'(t,R(t))\npurple: source fund. domain\norange: extrap\'ed fund. domain')
#axis.axhline(0, color='black')
#axis.plot(t_v, R_v)
#if extrapolated_y_t is not None:
#axis.plot(t_v[source_t_mask_v], R_v[source_t_mask_v], color='purple')
#axis.plot(extrapolated_t_v, extrapolated_y_t[:,0,0], color='orange')
#axis.axvline(quasiperiod_t_range[0], color='black', alpha=0.5)
#axis.axvline(quasiperiod_t_range[1], color='black', alpha=0.5)
#for R_local_maximum in R_local_maximum_t:
#axis.axvline(R_local_maximum[0], color='green', alpha=0.3)
#axis.axhline(R_local_maximum[1], color='green', alpha=0.3)
#axis = axis_vv[1][1]
#axis.set_title(f'(t,p_R(t)) (R = log(r))\npurple: source fund. domain\norange: extrap\'ed fund. domain')
#axis.axhline(0, color='black')
#axis.plot(t_v, p_R_v)
#if extrapolated_y_t is not None:
#axis.plot(t_v[source_t_mask_v], p_R_v[source_t_mask_v], color='purple')
#axis.plot(extrapolated_t_v, extrapolated_y_t[:,1,0], color='orange')
#axis.axvline(quasiperiod_t_range[0], color='black', alpha=0.5)
#axis.axvline(quasiperiod_t_range[1], color='black', alpha=0.5)
#axis = axis_vv[0][2]
#axis.set_title(f'(t,theta(t))\ntheta_lambda range = {theta_lambda_range}\ntheta_lambda range size = {theta_lambda_range_size}')
#axis.axhline(0, color='black')
#axis.plot(t_v, theta_v)
#if extrapolated_y_t is not None:
#axis.plot(t_v[source_t_mask_v], theta_v[source_t_mask_v], color='purple')
#axis.plot(extrapolated_t_v, extrapolated_y_t[:,0,1], color='orange')
#axis.axvline(quasiperiod_t_range[0], color='black', alpha=0.5)
#axis.axvline(quasiperiod_t_range[1], color='black', alpha=0.5)
#for theta_local_maximum in theta_local_maximum_t:
#axis.axvline(theta_local_maximum[0], color='green', alpha=0.3)
#axis.axhline(theta_local_maximum[1], color='green', alpha=0.3)
#axis = axis_vv[1][2]
#axis.set_title(f'(t,p_theta(t))\nlambda used for extrapolation = {lam}\nextrapolation error = {extrapolation_error}')
#axis.axhline(0, color='black')
#axis.plot(t_v, p_theta_v)
#if extrapolated_y_t is not None:
#axis.plot(t_v[source_t_mask_v], p_theta_v[source_t_mask_v], color='purple')
#axis.plot(extrapolated_t_v, extrapolated_y_t[:,1,1], color='orange')
#axis.axvline(quasiperiod_t_range[0], color='black', alpha=0.5)
#axis.axvline(quasiperiod_t_range[1], color='black', alpha=0.5)
#axis = axis_vv[0][3]
axis = axis_vv[0][1]
#axis.set_title(f'(t,w(t))\nw_lambda range = {w_lambda_range}\nw_lambda range size = {w_lambda_range_size}')
#axis.set_title(f'Plot of (t,z(t))\nH = {H_initial}, J = {J_initial}\nlambda = {lam}')
axis.axhline(0, color='black')
#axis.plot(t_v, w_v)
axis.plot(t_v, w_v/4) # w = 4*z
if extrapolated_y_t is not None:
#axis.plot(t_v[source_t_mask_v], w_v[source_t_mask_v], color='purple')
axis.plot(t_v[source_t_mask_v], w_v[source_t_mask_v]/4, color='purple')
#axis.plot(extrapolated_t_v, extrapolated_y_t[:,0,2], color='orange')
axis.plot(extrapolated_t_v, extrapolated_y_t[:,0,2]/4, color='orange')
axis.axvline(quasiperiod_t_range[0], color='black', alpha=0.5)
axis.axvline(quasiperiod_t_range[1], color='black', alpha=0.5)
#for w_zero_crossing in w_zero_crossing_v:
#axis.axvline(w_zero_crossing, color='black', alpha=0.3)
for w_zero_crossing_pos in w_zero_crossing_pos_v:
axis.axvline(w_zero_crossing_pos, color='green', alpha=0.3)
for w_zero_crossing_neg in w_zero_crossing_neg_v:
axis.axvline(w_zero_crossing_neg, color='red', alpha=0.3)
axis = axis_vv[1][1]
#axis.set_title(f'(t,p_w(t))\nw_lambda range = {w_lambda_range}\nw_lambda range size = {w_lambda_range_size}')
#axis.set_title(f'Plot of (t,p_z(t))\nH = {H_initial}, J = {J_initial}\nlambda = {lam}')
axis.axhline(0, color='black')
#axis.plot(t_v, w_v)
axis.plot(t_v, p_w_v*4) # w = 4*z, so p_w = z/4
if extrapolated_y_t is not None:
#axis.plot(t_v[source_t_mask_v], w_v[source_t_mask_v], color='purple')
axis.plot(t_v[source_t_mask_v], p_w_v[source_t_mask_v]*4, color='purple')
#axis.plot(extrapolated_t_v, extrapolated_y_t[:,0,2], color='orange')
axis.plot(extrapolated_t_v, extrapolated_y_t[:,1,2]*4, color='orange')
axis.axvline(quasiperiod_t_range[0], color='black', alpha=0.5)
axis.axvline(quasiperiod_t_range[1], color='black', alpha=0.5)
#for w_zero_crossing in w_zero_crossing_v:
#axis.axvline(w_zero_crossing, color='black', alpha=0.3)
for w_zero_crossing_pos in w_zero_crossing_pos_v:
axis.axvline(w_zero_crossing_pos, color='green', alpha=0.3)
for w_zero_crossing_neg in w_zero_crossing_neg_v:
axis.axvline(w_zero_crossing_neg, color='red', alpha=0.3)
#axis = axis_vv[1][3]
#axis.set_title(f'(t,p_w(t))\nJ_initial = {J_initial}')
#axis.axhline(0, color='black')
#axis.plot(t_v, p_w_v)
#if extrapolated_y_t is not None:
#axis.plot(t_v[source_t_mask_v], p_w_v[source_t_mask_v], color='purple')
#axis.plot(extrapolated_t_v, extrapolated_y_t[:,1,2], color='orange')
#axis.axvline(quasiperiod_t_range[0], color='black', alpha=0.5)
#axis.axvline(quasiperiod_t_range[1], color='black', alpha=0.5)
#axis = axis_vv[0][4]
#axis.set_title(f'(t,rho(t))\nrho = sqrt(R^2 + w^2)\nH_initial = {H_initial}, J_initial = {J_initial}')
#axis.axhline(0, color='black')
#axis.plot(t_v, rho_v)
fig.tight_layout()
plot_p.parent.mkdir(parents=True, exist_ok=True)
plt.savefig(str(plot_p), bbox_inches='tight')
print(f'wrote to file "{plot_p}"')
# VERY important to do this -- otherwise your memory will slowly fill up!
# Not sure which one is actually sufficient -- apparently none of them are, YAY!
plt.clf()
plt.cla()
plt.close()
plt.close(fig)
plt.close('all')
del fig
del axis_vv
if __name__ == '__main__':
if len(sys.argv) != 3:
print(f'Usage: {sys.argv[0]} <pickle> <plot.png>')
sys.exit(-1)
main(pickle_p=pathlib.Path(sys.argv[1]), plot_p=pathlib.Path(sys.argv[2]))
|
|
#
# Nodes used as utilities and support for transforms etc.
# These often make up sets including both Nodes and ExprNodes
# so it is convenient to have them in a seperate module.
#
import Nodes
import ExprNodes
from Nodes import Node
from ExprNodes import AtomicExprNode
from PyrexTypes import c_ptr_type
class TempHandle(object):
# THIS IS DEPRECATED, USE LetRefNode instead
temp = None
needs_xdecref = False
def __init__(self, type):
self.type = type
self.needs_cleanup = type.is_pyobject
def ref(self, pos):
return TempRefNode(pos, handle=self, type=self.type)
def cleanup_ref(self, pos):
return CleanupTempRefNode(pos, handle=self, type=self.type)
class TempRefNode(AtomicExprNode):
# THIS IS DEPRECATED, USE LetRefNode instead
# handle TempHandle
def analyse_types(self, env):
assert self.type == self.handle.type
def analyse_target_types(self, env):
assert self.type == self.handle.type
def analyse_target_declaration(self, env):
pass
def calculate_result_code(self):
result = self.handle.temp
if result is None: result = "<error>" # might be called and overwritten
return result
def generate_result_code(self, code):
pass
def generate_assignment_code(self, rhs, code):
if self.type.is_pyobject:
rhs.make_owned_reference(code)
# TODO: analyse control flow to see if this is necessary
code.put_xdecref(self.result(), self.ctype())
code.putln('%s = %s;' % (self.result(), rhs.result_as(self.ctype())))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
class CleanupTempRefNode(TempRefNode):
# THIS IS DEPRECATED, USE LetRefNode instead
# handle TempHandle
def generate_assignment_code(self, rhs, code):
pass
def generate_execution_code(self, code):
if self.type.is_pyobject:
code.put_decref_clear(self.result(), self.type)
self.handle.needs_cleanup = False
class TempsBlockNode(Node):
# THIS IS DEPRECATED, USE LetNode instead
"""
Creates a block which allocates temporary variables.
This is used by transforms to output constructs that need
to make use of a temporary variable. Simply pass the types
of the needed temporaries to the constructor.
The variables can be referred to using a TempRefNode
(which can be constructed by calling get_ref_node).
"""
# temps [TempHandle]
# body StatNode
child_attrs = ["body"]
def generate_execution_code(self, code):
for handle in self.temps:
handle.temp = code.funcstate.allocate_temp(
handle.type, manage_ref=handle.needs_cleanup)
self.body.generate_execution_code(code)
for handle in self.temps:
if handle.needs_cleanup:
if handle.needs_xdecref:
code.put_xdecref_clear(handle.temp, handle.type)
else:
code.put_decref_clear(handle.temp, handle.type)
code.funcstate.release_temp(handle.temp)
def analyse_control_flow(self, env):
self.body.analyse_control_flow(env)
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.body.analyse_expressions(env)
def generate_function_definitions(self, env, code):
self.body.generate_function_definitions(env, code)
def annotate(self, code):
self.body.annotate(code)
class ResultRefNode(AtomicExprNode):
# A reference to the result of an expression. The result_code
# must be set externally (usually a temp name).
subexprs = []
lhs_of_first_assignment = False
def __init__(self, expression=None, pos=None, type=None, may_hold_none=True):
self.expression = expression
self.pos = None
self.may_hold_none = may_hold_none
if expression is not None:
self.pos = expression.pos
if hasattr(expression, "type"):
self.type = expression.type
if pos is not None:
self.pos = pos
if type is not None:
self.type = type
assert self.pos is not None
def analyse_types(self, env):
if self.expression is not None:
self.type = self.expression.type
def infer_type(self, env):
if self.expression is not None:
return self.expression.infer_type(env)
if self.type is not None:
return self.type
assert False, "cannot infer type of ResultRefNode"
def may_be_none(self):
if not self.type.is_pyobject:
return False
return self.may_hold_none
def _DISABLED_may_be_none(self):
# not sure if this is safe - the expression may not be the
# only value that gets assigned
if self.expression is not None:
return self.expression.may_be_none()
if self.type is not None:
return self.type.is_pyobject
return True # play safe
def is_simple(self):
return True
def result(self):
try:
return self.result_code
except AttributeError:
if self.expression is not None:
self.result_code = self.expression.result()
return self.result_code
def generate_evaluation_code(self, code):
pass
def generate_result_code(self, code):
pass
def generate_disposal_code(self, code):
pass
def generate_assignment_code(self, rhs, code):
if self.type.is_pyobject:
rhs.make_owned_reference(code)
if not self.lhs_of_first_assignment:
code.put_decref(self.result(), self.ctype())
code.putln('%s = %s;' % (self.result(), rhs.result_as(self.ctype())))
rhs.generate_post_assignment_code(code)
rhs.free_temps(code)
def allocate_temps(self, env):
pass
def release_temp(self, env):
pass
def free_temps(self, code):
pass
class LetNodeMixin:
def set_temp_expr(self, lazy_temp):
self.lazy_temp = lazy_temp
self.temp_expression = lazy_temp.expression
def setup_temp_expr(self, code):
self.temp_expression.generate_evaluation_code(code)
self.temp_type = self.temp_expression.type
if self.temp_type.is_array:
self.temp_type = c_ptr_type(self.temp_type.base_type)
self._result_in_temp = self.temp_expression.result_in_temp()
if self._result_in_temp:
self.temp = self.temp_expression.result()
else:
self.temp_expression.make_owned_reference(code)
self.temp = code.funcstate.allocate_temp(
self.temp_type, manage_ref=True)
code.putln("%s = %s;" % (self.temp, self.temp_expression.result()))
self.temp_expression.generate_disposal_code(code)
self.temp_expression.free_temps(code)
self.lazy_temp.result_code = self.temp
def teardown_temp_expr(self, code):
if self._result_in_temp:
self.temp_expression.generate_disposal_code(code)
self.temp_expression.free_temps(code)
else:
if self.temp_type.is_pyobject:
code.put_decref_clear(self.temp, self.temp_type)
code.funcstate.release_temp(self.temp)
class EvalWithTempExprNode(ExprNodes.ExprNode, LetNodeMixin):
# A wrapper around a subexpression that moves an expression into a
# temp variable and provides it to the subexpression.
subexprs = ['temp_expression', 'subexpression']
def __init__(self, lazy_temp, subexpression):
self.set_temp_expr(lazy_temp)
self.pos = subexpression.pos
self.subexpression = subexpression
# if called after type analysis, we already know the type here
self.type = self.subexpression.type
def infer_type(self, env):
return self.subexpression.infer_type(env)
def result(self):
return self.subexpression.result()
def analyse_types(self, env):
self.temp_expression.analyse_types(env)
self.subexpression.analyse_types(env)
self.type = self.subexpression.type
def free_subexpr_temps(self, code):
self.subexpression.free_temps(code)
def generate_subexpr_disposal_code(self, code):
self.subexpression.generate_disposal_code(code)
def generate_evaluation_code(self, code):
self.setup_temp_expr(code)
self.subexpression.generate_evaluation_code(code)
self.teardown_temp_expr(code)
LetRefNode = ResultRefNode
class LetNode(Nodes.StatNode, LetNodeMixin):
# Implements a local temporary variable scope. Imagine this
# syntax being present:
# let temp = VALUE:
# BLOCK (can modify temp)
# if temp is an object, decref
#
# Usually used after analysis phase, but forwards analysis methods
# to its children
child_attrs = ['temp_expression', 'body']
def __init__(self, lazy_temp, body):
self.set_temp_expr(lazy_temp)
self.pos = body.pos
self.body = body
def analyse_control_flow(self, env):
self.body.analyse_control_flow(env)
def analyse_declarations(self, env):
self.temp_expression.analyse_declarations(env)
self.body.analyse_declarations(env)
def analyse_expressions(self, env):
self.temp_expression.analyse_expressions(env)
self.body.analyse_expressions(env)
def generate_execution_code(self, code):
self.setup_temp_expr(code)
self.body.generate_execution_code(code)
self.teardown_temp_expr(code)
class TempResultFromStatNode(ExprNodes.ExprNode):
# An ExprNode wrapper around a StatNode that executes the StatNode
# body. Requires a ResultRefNode that it sets up to refer to its
# own temp result. The StatNode must assign a value to the result
# node, which then becomes the result of this node.
subexprs = []
child_attrs = ['body']
def __init__(self, result_ref, body):
self.result_ref = result_ref
self.pos = body.pos
self.body = body
self.type = result_ref.type
self.is_temp = 1
def analyse_declarations(self, env):
self.body.analyse_declarations(env)
def analyse_types(self, env):
self.body.analyse_expressions(env)
def generate_result_code(self, code):
self.result_ref.result_code = self.result()
self.body.generate_execution_code(code)
|
|
import numpy as np
from snob import nips_search3 as mixture
#from snob import mixture_ka as mixture
# Generate data from the example in Section 13.4 of P & A (2015)
np.random.seed(888)
N = 1000
weight = np.array([0.3, 0.3, 0.3, 0.1])
mu = np.array([
[-4, -4],
[-4, -4],
[2, 2],
[-1, -6]
])
cov = np.array([
[
[1, 0.5],
[0.5, 1]
],
[
[6, -2],
[-2, 6]
],
[
[2, -1],
[-1, 2]
],
[
[0.125, 0],
[0, 0.125]
]
])
y = np.vstack([np.random.multivariate_normal(
mu[i], cov[i], size=int(N * weight[i])) \
for i in range(len(weight))])
#
y = np.loadtxt("cluster_example.txt")
#y = np.loadtxt("s4.txt")
from sklearn import datasets
y, _ = datasets.make_blobs(n_samples=1000, n_features=2, centers=50,
cluster_std=1, center_box=(-20, 20))
#y = np.loadtxt("a3.txt")
search_model = mixture.GaussianMixture(covariance_type="full", predict_mixtures=10)
foo = search_model.search(y)
fig, ax = plt.subplots()
ax.scatter(y.T[0], y.T[1])
means = foo[0]
scat = ax.scatter(means.T[0], means.T[1], c=foo[2], edgecolor="k", s=50)
plt.colorbar(scat)
raise a
# Generate some fake data
N = 1000
D = 2
K = 16
covariance_type = "full"
raise a
from snob import mixture_ka as mixture
model1 = mixture.GaussianMixture()
mu_1, cov_1, weight_1, meta_1 = model1.fit(y, 1)
model2 = mixture.GaussianMixture()
mu_2, cov_2, weight_2, meta_2 = model2.fit(y, 2)
N, D = y.shape
K = 1
Q_K = (0.5 * D * (D + 3) * K) + (K - 1)
Q_K2 = (0.5 * D * (D + 3) * (K + 1)) + (K + 1 - 1)
# Calculate message lengths according to our simplified expression.
import scipy
exp_I1 = (1 - D/2.0) * np.log(2) + 0.5 * Q_K * np.log(N) + 0.5 * np.log(Q_K * np.pi) \
- 0.5 * np.sum(np.log(weight_1)) - scipy.special.gammaln(K) - N * D * np.log(0.001) \
- meta_1["log_likelihood"].sum() + (D*(D+3))/4.0 * np.sum(np.log(weight_1)) \
- (D + 2)/2.0 * np.sum(np.log(np.linalg.det(cov_1)))
# Calculate the deltas in message length, according to our expression.
actual_delta_I = meta_2["message_length"] - meta_1["message_length"]
expected_delta_I = np.log(2) \
+ np.log(N)/2.0 - np.log(K) - 0.5 * (np.sum(np.log(weight_2)) - np.sum(np.log(weight_1))) \
- D * np.log(2)/2.0 + D * (D+3)/4.0 * (np.log(N) + np.sum(np.log(weight_2)) - np.sum(np.log(weight_1))) - (D + 2)/2.0 * (np.sum(np.log(np.linalg.det(cov_2))) - np.sum(np.log(np.linalg.det(cov_1)))) \
+ 0.25 * (2 * np.log(Q_K2/Q_K) - (D * (D+3) + 2) * np.log(2*np.pi)) \
+ meta_2["log_likelihood"] - meta_1["log_likelihood"]
expected_delta_I = expected_delta_I/np.log(2)
dk = 1
expected_delta_I2 = dk * (
(1 - D/2.) * np.log(2) + 0.25 * (D*(D+3) + 2) * np.log(N/(2*np.pi))) \
+ 0.5 * (D*(D+3)/2. - 1) * (np.sum(np.log(weight_2)) - np.sum(np.log(weight_1))) \
- np.sum([np.log(K + _) for _ in range(dk)]) \
- meta_2["log_likelihood"].sum() + meta_1["log_likelihood"].sum() \
+ 0.5 * np.log(Q_K2/float(Q_K)) \
+ (D + 2)/2.0 * (np.sum(np.log(np.linalg.det(cov_1))) - np.sum(np.log(np.linalg.det(cov_2))))
expected_delta_I2 = expected_delta_I2/np.log(2)
# OK,. let's see if we can estimate the learning rate \gamma
def _evaluate_gaussian(y, mu, cov):
N, D = y.shape
Cinv = np.linalg.inv(cov)
scale = 1.0/np.sqrt((2*np.pi)**D * np.linalg.det(cov))#
#Cinv**(-0.5)
d = y - mu
return scale * np.exp(-0.5 * np.sum(d.T * np.dot(Cinv, d.T), axis=0))
model = mixture.GaussianMixture()
mu1, cov1, weight1, meta1 = model.fit(y, 1)
x = []
yvals = []
evaluated = []
prediction = []
for k in range(1, 10):
model = mixture.GaussianMixture()
mu, cov, weight, meta = model.fit(y, k)
yvals.append(meta["log_likelihood"].sum())
evaluated.append(np.sum(weight * np.vstack([_evaluate_gaussian(y, mu[i], cov[i]) for i in range(k)]).T))
x.append(k)
if k < 2:
prediction.append(np.nan)
else:
func = mixture._approximate_log_likelihood_improvement(y, mu1, cov1,
weight1, meta1["log_likelihood"].sum(), *yvals[1:])
prediction.append(func(k + 1))
x = np.array(x)
yvals = np.array(yvals)
#ax.scatter(x, yvals)
foo = np.diff(yvals) / np.array(evaluated)[:-1]
cost_function = lambda x, *p: p[0] / np.exp(x) #+ p[1]
import scipy.optimize as op
p_opt, p_cov = op.curve_fit(cost_function, x[:-1][:2], foo[:2], p0=np.ones(1))
fig, ax = plt.subplots()
ax.scatter(x[:-1], foo)
ax.plot(x[:-1], cost_function(x[:-1], *p_opt))
model = mixture.GaussianMixture()
mu, cov, weight, meta = model.fit(y, 1)
model2 = mixture.GaussianMixture()
mu2, cov2, weight2, meta2 = model.fit(y, 2)
model3 = mixture.GaussianMixture()
mu3, cov3, weight3, meta3 = model.fit(y, 3)
func = mixture._approximate_log_likelihood_improvement(y, mu, cov, weight,
meta["log_likelihood"].sum(), *[meta2["log_likelihood"].sum()])
fig, ax = plt.subplots()
ax.scatter(x, yvals)
ax.scatter(x, prediction)
ax.plot(x, [func(xi + 1) for xi in x], c='r')
#ax.plot(x[:-1][1:], [func(xi) for xi in x[:-1][1:]])
raise a
# OK,. let's see if we can estimate the learning rate \gamma
def _evaluate_gaussian(y, mu, cov):
N, D = y.shape
Cinv = np.linalg.inv(cov)
scale = 1.0/np.sqrt((2*np.pi)**D * np.linalg.det(cov))#
#Cinv**(-0.5)
d = y - mu
return scale * np.exp(-0.5 * np.sum(d.T * np.dot(Cinv, d.T), axis=0))
other = np.log(2) \
+ np.log(N)/2.0 - np.log(K) - 0.5 * (np.sum(np.log(weight_2)) - np.sum(np.log(weight_1))) \
- D * np.log(2)/2.0 + D * (D+3)/4.0 * (np.log(N) + np.sum(np.log(weight_2)) - np.sum(np.log(weight_1))) \
- (D + 2)/2.0 * (np.sum(np.log(np.linalg.det(cov_2))) - np.sum(np.log(np.linalg.det(cov_1)))) \
+ 0.25 * (2 * np.log(Q_K2/Q_K) - (D * (D+3) + 2) * np.log(2*np.pi))
gamma = K * _evaluate_gaussian(y, mu_1[0], cov_1[0]).sum() * (actual_delta_I - other)
# OK, now use gamma to estimate K = 3
K = 2
Q_K3 = (0.5 * D * (D + 3) * K) + (K - 1)
# Let us assume the determinants of covariance matrices will decrease:
cov_3_est = K / (K + 1) * np.linalg.det(cov_2)
cov_3_est = np.hstack([cov_3_est.min(), cov_3_est])
est_weight_3 = np.array([1/3., 1/3., 1/3.])
I_K3_to_K2 = np.log(2) \
+ np.log(N)/2.0 - np.log(K) - 0.5 * (np.sum(np.log(est_weight_3)) - np.sum(np.log(weight_2))) \
- D * np.log(2)/2.0 + D * (D+3)/4.0 * (np.log(N) + np.sum(np.log(est_weight_3)) - np.sum(np.log(weight_2))) \
- (D + 2)/2.0 * (np.sum(np.log(cov_3_est)) - np.sum(np.log(np.linalg.det(cov_2)))) \
+ 0.25 * (2 * np.log(Q_K3/Q_K2) - (D * (D+3) + 2) * np.log(2*np.pi)) \
+ gamma/(K+1) * np.sum(weight_2 * np.vstack([_evaluate_gaussian(y, mu_2[i], cov_2[i]) for i in range(2)]).T)
raise a
delta_I = np.log(2) + 0.5 * np.log(N) - np.log(K) \
+ 0.5 * (D*(D+3)/2.0 * np.log(N) - D * np.log(2)) \
+ 0.5 * (np.sum(np.log(np.linalg.det(cov_2))) - np.sum(np.log(np.linalg.det(cov_1)))) \
+ 0.5 * (np.log(Q_K2/Q_K) - np.log(2*np.pi)/2.0 * (D * (D + 3) + 2))
raise a
|
|
"""
This module provides WSGI application to serve the Home Assistant API.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/http/
"""
import asyncio
import json
import logging
import ssl
from ipaddress import ip_network
from pathlib import Path
import os
import voluptuous as vol
from aiohttp import web
from aiohttp.web_exceptions import HTTPUnauthorized, HTTPMovedPermanently
import homeassistant.helpers.config_validation as cv
import homeassistant.remote as rem
import homeassistant.util as hass_util
from homeassistant.const import (
SERVER_PORT, CONTENT_TYPE_JSON, ALLOWED_CORS_HEADERS,
EVENT_HOMEASSISTANT_STOP, EVENT_HOMEASSISTANT_START)
from homeassistant.core import is_callback
from homeassistant.util.logging import HideSensitiveDataFilter
from .auth import auth_middleware
from .ban import ban_middleware
from .const import (
KEY_USE_X_FORWARDED_FOR, KEY_TRUSTED_NETWORKS,
KEY_BANS_ENABLED, KEY_LOGIN_THRESHOLD,
KEY_DEVELOPMENT, KEY_AUTHENTICATED)
from .static import FILE_SENDER, CACHING_FILE_SENDER, staticresource_middleware
from .util import get_real_ip
DOMAIN = 'http'
REQUIREMENTS = ('aiohttp_cors==0.5.0',)
CONF_API_PASSWORD = 'api_password'
CONF_SERVER_HOST = 'server_host'
CONF_SERVER_PORT = 'server_port'
CONF_BASE_URL = 'base_url'
CONF_DEVELOPMENT = 'development'
CONF_SSL_CERTIFICATE = 'ssl_certificate'
CONF_SSL_KEY = 'ssl_key'
CONF_CORS_ORIGINS = 'cors_allowed_origins'
CONF_USE_X_FORWARDED_FOR = 'use_x_forwarded_for'
CONF_TRUSTED_NETWORKS = 'trusted_networks'
CONF_LOGIN_ATTEMPTS_THRESHOLD = 'login_attempts_threshold'
CONF_IP_BAN_ENABLED = 'ip_ban_enabled'
# TLS configuation follows the best-practice guidelines specified here:
# https://wiki.mozilla.org/Security/Server_Side_TLS
# Intermediate guidelines are followed.
SSL_VERSION = ssl.PROTOCOL_SSLv23
SSL_OPTS = ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3
if hasattr(ssl, 'OP_NO_COMPRESSION'):
SSL_OPTS |= ssl.OP_NO_COMPRESSION
CIPHERS = "ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:" \
"ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:" \
"ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:" \
"DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:" \
"ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:" \
"ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:" \
"ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES256-SHA384:" \
"ECDHE-ECDSA-AES256-SHA:ECDHE-RSA-AES256-SHA:" \
"DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-RSA-AES256-SHA256:" \
"DHE-RSA-AES256-SHA:ECDHE-ECDSA-DES-CBC3-SHA:" \
"ECDHE-RSA-DES-CBC3-SHA:EDH-RSA-DES-CBC3-SHA:AES128-GCM-SHA256:" \
"AES256-GCM-SHA384:AES128-SHA256:AES256-SHA256:AES128-SHA:" \
"AES256-SHA:DES-CBC3-SHA:!DSS"
_LOGGER = logging.getLogger(__name__)
DEFAULT_SERVER_HOST = '0.0.0.0'
DEFAULT_DEVELOPMENT = '0'
DEFAULT_LOGIN_ATTEMPT_THRESHOLD = -1
HTTP_SCHEMA = vol.Schema({
vol.Optional(CONF_API_PASSWORD, default=None): cv.string,
vol.Optional(CONF_SERVER_HOST, default=DEFAULT_SERVER_HOST): cv.string,
vol.Optional(CONF_SERVER_PORT, default=SERVER_PORT):
vol.All(vol.Coerce(int), vol.Range(min=1, max=65535)),
vol.Optional(CONF_BASE_URL): cv.string,
vol.Optional(CONF_DEVELOPMENT, default=DEFAULT_DEVELOPMENT): cv.string,
vol.Optional(CONF_SSL_CERTIFICATE, default=None): cv.isfile,
vol.Optional(CONF_SSL_KEY, default=None): cv.isfile,
vol.Optional(CONF_CORS_ORIGINS, default=[]): vol.All(cv.ensure_list,
[cv.string]),
vol.Optional(CONF_USE_X_FORWARDED_FOR, default=False): cv.boolean,
vol.Optional(CONF_TRUSTED_NETWORKS, default=[]):
vol.All(cv.ensure_list, [ip_network]),
vol.Optional(CONF_LOGIN_ATTEMPTS_THRESHOLD,
default=DEFAULT_LOGIN_ATTEMPT_THRESHOLD): cv.positive_int,
vol.Optional(CONF_IP_BAN_ENABLED, default=True): cv.boolean
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: HTTP_SCHEMA,
}, extra=vol.ALLOW_EXTRA)
@asyncio.coroutine
def async_setup(hass, config):
"""Set up the HTTP API and debug interface."""
conf = config.get(DOMAIN)
if conf is None:
conf = HTTP_SCHEMA({})
api_password = conf[CONF_API_PASSWORD]
server_host = conf[CONF_SERVER_HOST]
server_port = conf[CONF_SERVER_PORT]
development = conf[CONF_DEVELOPMENT] == '1'
ssl_certificate = conf[CONF_SSL_CERTIFICATE]
ssl_key = conf[CONF_SSL_KEY]
cors_origins = conf[CONF_CORS_ORIGINS]
use_x_forwarded_for = conf[CONF_USE_X_FORWARDED_FOR]
trusted_networks = conf[CONF_TRUSTED_NETWORKS]
is_ban_enabled = conf[CONF_IP_BAN_ENABLED]
login_threshold = conf[CONF_LOGIN_ATTEMPTS_THRESHOLD]
if api_password is not None:
logging.getLogger('aiohttp.access').addFilter(
HideSensitiveDataFilter(api_password))
server = HomeAssistantWSGI(
hass,
development=development,
server_host=server_host,
server_port=server_port,
api_password=api_password,
ssl_certificate=ssl_certificate,
ssl_key=ssl_key,
cors_origins=cors_origins,
use_x_forwarded_for=use_x_forwarded_for,
trusted_networks=trusted_networks,
login_threshold=login_threshold,
is_ban_enabled=is_ban_enabled
)
@asyncio.coroutine
def stop_server(event):
"""Callback to stop the server."""
yield from server.stop()
@asyncio.coroutine
def start_server(event):
"""Callback to start the server."""
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, stop_server)
yield from server.start()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_server)
hass.http = server
host = conf.get(CONF_BASE_URL)
if host:
port = None
elif server_host != DEFAULT_SERVER_HOST:
host = server_host
port = server_port
else:
host = hass_util.get_local_ip()
port = server_port
hass.config.api = rem.API(host, api_password, port,
ssl_certificate is not None)
return True
class HomeAssistantWSGI(object):
"""WSGI server for Home Assistant."""
def __init__(self, hass, development, api_password, ssl_certificate,
ssl_key, server_host, server_port, cors_origins,
use_x_forwarded_for, trusted_networks,
login_threshold, is_ban_enabled):
"""Initialize the WSGI Home Assistant server."""
import aiohttp_cors
middlewares = [auth_middleware, staticresource_middleware]
if is_ban_enabled:
middlewares.insert(0, ban_middleware)
self.app = web.Application(middlewares=middlewares, loop=hass.loop)
self.app['hass'] = hass
self.app[KEY_USE_X_FORWARDED_FOR] = use_x_forwarded_for
self.app[KEY_TRUSTED_NETWORKS] = trusted_networks
self.app[KEY_BANS_ENABLED] = is_ban_enabled
self.app[KEY_LOGIN_THRESHOLD] = login_threshold
self.app[KEY_DEVELOPMENT] = development
self.hass = hass
self.development = development
self.api_password = api_password
self.ssl_certificate = ssl_certificate
self.ssl_key = ssl_key
self.server_host = server_host
self.server_port = server_port
self._handler = None
self.server = None
if cors_origins:
self.cors = aiohttp_cors.setup(self.app, defaults={
host: aiohttp_cors.ResourceOptions(
allow_headers=ALLOWED_CORS_HEADERS,
allow_methods='*',
) for host in cors_origins
})
else:
self.cors = None
def register_view(self, view):
"""Register a view with the WSGI server.
The view argument must be a class that inherits from HomeAssistantView.
It is optional to instantiate it before registering; this method will
handle it either way.
"""
if isinstance(view, type):
# Instantiate the view, if needed
view = view()
if not hasattr(view, 'url'):
class_name = view.__class__.__name__
raise AttributeError(
'{0} missing required attribute "url"'.format(class_name)
)
if not hasattr(view, 'name'):
class_name = view.__class__.__name__
raise AttributeError(
'{0} missing required attribute "name"'.format(class_name)
)
view.register(self.app.router)
def register_redirect(self, url, redirect_to):
"""Register a redirect with the server.
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax.
"""
def redirect(request):
"""Redirect to location."""
raise HTTPMovedPermanently(redirect_to)
self.app.router.add_route('GET', url, redirect)
def register_static_path(self, url_root, path, cache_length=31):
"""Register a folder to serve as a static path.
Specify optional cache length of asset in days.
"""
if os.path.isdir(path):
self.app.router.add_static(url_root, path)
return
filepath = Path(path)
@asyncio.coroutine
def serve_file(request):
"""Serve file from disk."""
res = yield from CACHING_FILE_SENDER.send(request, filepath)
return res
# aiohttp supports regex matching for variables. Using that as temp
# to work around cache busting MD5.
# Turns something like /static/dev-panel.html into
# /static/{filename:dev-panel(-[a-z0-9]{32}|)\.html}
base, ext = url_root.rsplit('.', 1)
base, file = base.rsplit('/', 1)
regex = r"{}(-[a-z0-9]{{32}}|)\.{}".format(file, ext)
url_pattern = "{}/{{filename:{}}}".format(base, regex)
self.app.router.add_route('GET', url_pattern, serve_file)
@asyncio.coroutine
def start(self):
"""Start the wsgi server."""
cors_added = set()
if self.cors is not None:
for route in list(self.app.router.routes()):
if hasattr(route, 'resource'):
route = route.resource
if route in cors_added:
continue
self.cors.add(route)
cors_added.add(route)
if self.ssl_certificate:
try:
context = ssl.SSLContext(SSL_VERSION)
context.options |= SSL_OPTS
context.set_ciphers(CIPHERS)
context.load_cert_chain(self.ssl_certificate, self.ssl_key)
except OSError as error:
_LOGGER.error("Could not read SSL certificate from %s: %s",
self.ssl_certificate, error)
context = None
return
else:
context = None
# Aiohttp freezes apps after start so that no changes can be made.
# However in Home Assistant components can be discovered after boot.
# This will now raise a RunTimeError.
# To work around this we now fake that we are frozen.
# A more appropriate fix would be to create a new app and
# re-register all redirects, views, static paths.
self.app._frozen = True # pylint: disable=protected-access
self._handler = self.app.make_handler()
try:
self.server = yield from self.hass.loop.create_server(
self._handler, self.server_host, self.server_port, ssl=context)
except OSError as error:
_LOGGER.error("Failed to create HTTP server at port %d: %s",
self.server_port, error)
self.app._frozen = False # pylint: disable=protected-access
@asyncio.coroutine
def stop(self):
"""Stop the wsgi server."""
if self.server:
self.server.close()
yield from self.server.wait_closed()
yield from self.app.shutdown()
if self._handler:
yield from self._handler.finish_connections(60.0)
yield from self.app.cleanup()
class HomeAssistantView(object):
"""Base view for all views."""
url = None
extra_urls = []
requires_auth = True # Views inheriting from this class can override this
# pylint: disable=no-self-use
def json(self, result, status_code=200):
"""Return a JSON response."""
msg = json.dumps(
result, sort_keys=True, cls=rem.JSONEncoder).encode('UTF-8')
return web.Response(
body=msg, content_type=CONTENT_TYPE_JSON, status=status_code)
def json_message(self, error, status_code=200):
"""Return a JSON message response."""
return self.json({'message': error}, status_code)
@asyncio.coroutine
# pylint: disable=no-self-use
def file(self, request, fil):
"""Return a file."""
assert isinstance(fil, str), 'only string paths allowed'
response = yield from FILE_SENDER.send(request, Path(fil))
return response
def register(self, router):
"""Register the view with a router."""
assert self.url is not None, 'No url set for view'
urls = [self.url] + self.extra_urls
for method in ('get', 'post', 'delete', 'put'):
handler = getattr(self, method, None)
if not handler:
continue
handler = request_handler_factory(self, handler)
for url in urls:
router.add_route(method, url, handler)
# aiohttp_cors does not work with class based views
# self.app.router.add_route('*', self.url, self, name=self.name)
# for url in self.extra_urls:
# self.app.router.add_route('*', url, self)
def request_handler_factory(view, handler):
"""Factory to wrap our handler classes."""
assert asyncio.iscoroutinefunction(handler) or is_callback(handler), \
"Handler should be a coroutine or a callback."
@asyncio.coroutine
def handle(request):
"""Handle incoming request."""
if not request.app['hass'].is_running:
return web.Response(status=503)
remote_addr = get_real_ip(request)
authenticated = request.get(KEY_AUTHENTICATED, False)
if view.requires_auth and not authenticated:
raise HTTPUnauthorized()
_LOGGER.info('Serving %s to %s (auth: %s)',
request.path, remote_addr, authenticated)
result = handler(request, **request.match_info)
if asyncio.iscoroutine(result):
result = yield from result
if isinstance(result, web.StreamResponse):
# The method handler returned a ready-made Response, how nice of it
return result
status_code = 200
if isinstance(result, tuple):
result, status_code = result
if isinstance(result, str):
result = result.encode('utf-8')
elif result is None:
result = b''
elif not isinstance(result, bytes):
assert False, ('Result should be None, string, bytes or Response. '
'Got: {}').format(result)
return web.Response(body=result, status=status_code)
return handle
|
|
"""
Tests for DatetimeArray
"""
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import DatetimeArray
class TestDatetimeArrayComparisons:
# TODO: merge this into tests/arithmetic/test_datetime64 once it is
# sufficiently robust
def test_cmp_dt64_arraylike_tznaive(self, comparison_op):
# arbitrary tz-naive DatetimeIndex
op = comparison_op
dti = pd.date_range("2016-01-1", freq="MS", periods=9, tz=None)
arr = DatetimeArray(dti)
assert arr.freq == dti.freq
assert arr.tz == dti.tz
right = dti
expected = np.ones(len(arr), dtype=bool)
if comparison_op.__name__ in ["ne", "gt", "lt"]:
# for these the comparisons should be all-False
expected = ~expected
result = op(arr, arr)
tm.assert_numpy_array_equal(result, expected)
for other in [right, np.array(right)]:
# TODO: add list and tuple, and object-dtype once those
# are fixed in the constructor
result = op(arr, other)
tm.assert_numpy_array_equal(result, expected)
result = op(other, arr)
tm.assert_numpy_array_equal(result, expected)
class TestDatetimeArray:
def test_astype_to_same(self):
arr = DatetimeArray._from_sequence(
["2000"], dtype=DatetimeTZDtype(tz="US/Central")
)
result = arr.astype(DatetimeTZDtype(tz="US/Central"), copy=False)
assert result is arr
@pytest.mark.parametrize("dtype", ["datetime64[ns]", "datetime64[ns, UTC]"])
@pytest.mark.parametrize(
"other", ["datetime64[ns]", "datetime64[ns, UTC]", "datetime64[ns, CET]"]
)
def test_astype_copies(self, dtype, other):
# https://github.com/pandas-dev/pandas/pull/32490
ser = pd.Series([1, 2], dtype=dtype)
orig = ser.copy()
warn = None
if (dtype == "datetime64[ns]") ^ (other == "datetime64[ns]"):
# deprecated in favor of tz_localize
warn = FutureWarning
with tm.assert_produces_warning(warn):
t = ser.astype(other)
t[:] = pd.NaT
tm.assert_series_equal(ser, orig)
@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"])
def test_astype_int(self, dtype):
arr = DatetimeArray._from_sequence([pd.Timestamp("2000"), pd.Timestamp("2001")])
with tm.assert_produces_warning(FutureWarning):
# astype(int..) deprecated
result = arr.astype(dtype)
if np.dtype(dtype).kind == "u":
expected_dtype = np.dtype("uint64")
else:
expected_dtype = np.dtype("int64")
with tm.assert_produces_warning(FutureWarning):
# astype(int..) deprecated
expected = arr.astype(expected_dtype)
assert result.dtype == expected_dtype
tm.assert_numpy_array_equal(result, expected)
def test_tz_setter_raises(self):
arr = DatetimeArray._from_sequence(
["2000"], dtype=DatetimeTZDtype(tz="US/Central")
)
with pytest.raises(AttributeError, match="tz_localize"):
arr.tz = "UTC"
def test_setitem_str_impute_tz(self, tz_naive_fixture):
# Like for getitem, if we are passed a naive-like string, we impute
# our own timezone.
tz = tz_naive_fixture
data = np.array([1, 2, 3], dtype="M8[ns]")
dtype = data.dtype if tz is None else DatetimeTZDtype(tz=tz)
arr = DatetimeArray(data, dtype=dtype)
expected = arr.copy()
ts = pd.Timestamp("2020-09-08 16:50").tz_localize(tz)
setter = str(ts.tz_localize(None))
# Setting a scalar tznaive string
expected[0] = ts
arr[0] = setter
tm.assert_equal(arr, expected)
# Setting a listlike of tznaive strings
expected[1] = ts
arr[:2] = [setter, setter]
tm.assert_equal(arr, expected)
def test_setitem_different_tz_raises(self):
data = np.array([1, 2, 3], dtype="M8[ns]")
arr = DatetimeArray(data, copy=False, dtype=DatetimeTZDtype(tz="US/Central"))
with pytest.raises(TypeError, match="Cannot compare tz-naive and tz-aware"):
arr[0] = pd.Timestamp("2000")
ts = pd.Timestamp("2000", tz="US/Eastern")
with pytest.raises(ValueError, match="US/Central"):
with tm.assert_produces_warning(
FutureWarning, match="mismatched timezones"
):
arr[0] = ts
# once deprecation is enforced
# assert arr[0] == ts.tz_convert("US/Central")
def test_setitem_clears_freq(self):
a = DatetimeArray(pd.date_range("2000", periods=2, freq="D", tz="US/Central"))
a[0] = pd.Timestamp("2000", tz="US/Central")
assert a.freq is None
@pytest.mark.parametrize(
"obj",
[
pd.Timestamp.now(),
pd.Timestamp.now().to_datetime64(),
pd.Timestamp.now().to_pydatetime(),
],
)
def test_setitem_objects(self, obj):
# make sure we accept datetime64 and datetime in addition to Timestamp
dti = pd.date_range("2000", periods=2, freq="D")
arr = dti._data
arr[0] = obj
assert arr[0] == obj
def test_repeat_preserves_tz(self):
dti = pd.date_range("2000", periods=2, freq="D", tz="US/Central")
arr = DatetimeArray(dti)
repeated = arr.repeat([1, 1])
# preserves tz and values, but not freq
expected = DatetimeArray(arr.asi8, freq=None, dtype=arr.dtype)
tm.assert_equal(repeated, expected)
def test_value_counts_preserves_tz(self):
dti = pd.date_range("2000", periods=2, freq="D", tz="US/Central")
arr = DatetimeArray(dti).repeat([4, 3])
result = arr.value_counts()
# Note: not tm.assert_index_equal, since `freq`s do not match
assert result.index.equals(dti)
arr[-2] = pd.NaT
result = arr.value_counts(dropna=False)
expected = pd.Series([4, 2, 1], index=[dti[0], dti[1], pd.NaT])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("method", ["pad", "backfill"])
def test_fillna_preserves_tz(self, method):
dti = pd.date_range("2000-01-01", periods=5, freq="D", tz="US/Central")
arr = DatetimeArray(dti, copy=True)
arr[2] = pd.NaT
fill_val = dti[1] if method == "pad" else dti[3]
expected = DatetimeArray._from_sequence(
[dti[0], dti[1], fill_val, dti[3], dti[4]],
dtype=DatetimeTZDtype(tz="US/Central"),
)
result = arr.fillna(method=method)
tm.assert_extension_array_equal(result, expected)
# assert that arr and dti were not modified in-place
assert arr[2] is pd.NaT
assert dti[2] == pd.Timestamp("2000-01-03", tz="US/Central")
def test_fillna_2d(self):
dti = pd.date_range("2016-01-01", periods=6, tz="US/Pacific")
dta = dti._data.reshape(3, 2).copy()
dta[0, 1] = pd.NaT
dta[1, 0] = pd.NaT
res1 = dta.fillna(method="pad")
expected1 = dta.copy()
expected1[1, 0] = dta[0, 0]
tm.assert_extension_array_equal(res1, expected1)
res2 = dta.fillna(method="backfill")
expected2 = dta.copy()
expected2 = dta.copy()
expected2[1, 0] = dta[2, 0]
expected2[0, 1] = dta[1, 1]
tm.assert_extension_array_equal(res2, expected2)
# with different ordering for underlying ndarray; behavior should
# be unchanged
dta2 = dta._from_backing_data(dta._ndarray.copy(order="F"))
assert dta2._ndarray.flags["F_CONTIGUOUS"]
assert not dta2._ndarray.flags["C_CONTIGUOUS"]
tm.assert_extension_array_equal(dta, dta2)
res3 = dta2.fillna(method="pad")
tm.assert_extension_array_equal(res3, expected1)
res4 = dta2.fillna(method="backfill")
tm.assert_extension_array_equal(res4, expected2)
# test the DataFrame method while we're here
df = pd.DataFrame(dta)
res = df.fillna(method="pad")
expected = pd.DataFrame(expected1)
tm.assert_frame_equal(res, expected)
res = df.fillna(method="backfill")
expected = pd.DataFrame(expected2)
tm.assert_frame_equal(res, expected)
def test_array_interface_tz(self):
tz = "US/Central"
data = DatetimeArray(pd.date_range("2017", periods=2, tz=tz))
result = np.asarray(data)
expected = np.array(
[
pd.Timestamp("2017-01-01T00:00:00", tz=tz),
pd.Timestamp("2017-01-02T00:00:00", tz=tz),
],
dtype=object,
)
tm.assert_numpy_array_equal(result, expected)
result = np.asarray(data, dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = np.asarray(data, dtype="M8[ns]")
expected = np.array(
["2017-01-01T06:00:00", "2017-01-02T06:00:00"], dtype="M8[ns]"
)
tm.assert_numpy_array_equal(result, expected)
def test_array_interface(self):
data = DatetimeArray(pd.date_range("2017", periods=2))
expected = np.array(
["2017-01-01T00:00:00", "2017-01-02T00:00:00"], dtype="datetime64[ns]"
)
result = np.asarray(data)
tm.assert_numpy_array_equal(result, expected)
result = np.asarray(data, dtype=object)
expected = np.array(
[pd.Timestamp("2017-01-01T00:00:00"), pd.Timestamp("2017-01-02T00:00:00")],
dtype=object,
)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("index", [True, False])
def test_searchsorted_different_tz(self, index):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = DatetimeArray(data, freq="D").tz_localize("Asia/Tokyo")
if index:
arr = pd.Index(arr)
expected = arr.searchsorted(arr[2])
result = arr.searchsorted(arr[2].tz_convert("UTC"))
assert result == expected
expected = arr.searchsorted(arr[2:6])
result = arr.searchsorted(arr[2:6].tz_convert("UTC"))
tm.assert_equal(result, expected)
@pytest.mark.parametrize("index", [True, False])
def test_searchsorted_tzawareness_compat(self, index):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = DatetimeArray(data, freq="D")
if index:
arr = pd.Index(arr)
mismatch = arr.tz_localize("Asia/Tokyo")
msg = "Cannot compare tz-naive and tz-aware datetime-like objects"
with pytest.raises(TypeError, match=msg):
arr.searchsorted(mismatch[0])
with pytest.raises(TypeError, match=msg):
arr.searchsorted(mismatch)
with pytest.raises(TypeError, match=msg):
mismatch.searchsorted(arr[0])
with pytest.raises(TypeError, match=msg):
mismatch.searchsorted(arr)
@pytest.mark.parametrize(
"other",
[
1,
np.int64(1),
1.0,
np.timedelta64("NaT"),
pd.Timedelta(days=2),
"invalid",
np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9,
np.arange(10).view("timedelta64[ns]") * 24 * 3600 * 10 ** 9,
pd.Timestamp.now().to_period("D"),
],
)
@pytest.mark.parametrize("index", [True, False])
def test_searchsorted_invalid_types(self, other, index):
data = np.arange(10, dtype="i8") * 24 * 3600 * 10 ** 9
arr = DatetimeArray(data, freq="D")
if index:
arr = pd.Index(arr)
msg = "|".join(
[
"searchsorted requires compatible dtype or scalar",
"value should be a 'Timestamp', 'NaT', or array of those. Got",
]
)
with pytest.raises(TypeError, match=msg):
arr.searchsorted(other)
def test_shift_fill_value(self):
dti = pd.date_range("2016-01-01", periods=3)
dta = dti._data
expected = DatetimeArray(np.roll(dta._data, 1))
fv = dta[-1]
for fill_value in [fv, fv.to_pydatetime(), fv.to_datetime64()]:
result = dta.shift(1, fill_value=fill_value)
tm.assert_datetime_array_equal(result, expected)
dta = dta.tz_localize("UTC")
expected = expected.tz_localize("UTC")
fv = dta[-1]
for fill_value in [fv, fv.to_pydatetime()]:
result = dta.shift(1, fill_value=fill_value)
tm.assert_datetime_array_equal(result, expected)
def test_shift_value_tzawareness_mismatch(self):
dti = pd.date_range("2016-01-01", periods=3)
dta = dti._data
fv = dta[-1].tz_localize("UTC")
for invalid in [fv, fv.to_pydatetime()]:
with pytest.raises(TypeError, match="Cannot compare"):
dta.shift(1, fill_value=invalid)
dta = dta.tz_localize("UTC")
fv = dta[-1].tz_localize(None)
for invalid in [fv, fv.to_pydatetime(), fv.to_datetime64()]:
with pytest.raises(TypeError, match="Cannot compare"):
dta.shift(1, fill_value=invalid)
def test_shift_requires_tzmatch(self):
# since filling is setitem-like, we require a matching timezone,
# not just matching tzawawreness
dti = pd.date_range("2016-01-01", periods=3, tz="UTC")
dta = dti._data
fill_value = pd.Timestamp("2020-10-18 18:44", tz="US/Pacific")
msg = "Timezones don't match. 'UTC' != 'US/Pacific'"
with pytest.raises(ValueError, match=msg):
with tm.assert_produces_warning(
FutureWarning, match="mismatched timezones"
):
dta.shift(1, fill_value=fill_value)
# once deprecation is enforced
# expected = dta.shift(1, fill_value=fill_value.tz_convert("UTC"))
# tm.assert_equal(result, expected)
def test_tz_localize_t2d(self):
dti = pd.date_range("1994-05-12", periods=12, tz="US/Pacific")
dta = dti._data.reshape(3, 4)
result = dta.tz_localize(None)
expected = dta.ravel().tz_localize(None).reshape(dta.shape)
tm.assert_datetime_array_equal(result, expected)
roundtrip = expected.tz_localize("US/Pacific")
tm.assert_datetime_array_equal(roundtrip, dta)
|
|
from jinja2 import Undefined
from contextlib import contextmanager
from werkzeug.local import LocalStack, LocalProxy
from lektor.reporter import reporter
from lektor.utils import make_relative_url
_ctx_stack = LocalStack()
def url_to(*args, **kwargs):
"""Calculates a URL to another record."""
ctx = get_ctx()
if ctx is None:
raise RuntimeError('No context found')
return ctx.url_to(*args, **kwargs)
def get_asset_url(asset):
"""Calculates the asset URL relative to the current record."""
ctx = get_ctx()
if ctx is None:
raise RuntimeError('No context found')
asset = site_proxy.get_asset(asset)
if asset is None:
return Undefined('Asset not found')
info = ctx.build_state.get_file_info(asset.source_filename)
return '%s?h=%s' % (
ctx.source.url_to('!' + asset.url_path),
info.checksum[:8],
)
@LocalProxy
def site_proxy():
"""Returns the current pad."""
ctx = get_ctx()
if ctx is None:
return Undefined(hint='Cannot access the site from here', name='site')
return ctx.pad
@LocalProxy
def config_proxy():
"""Returns the current config."""
return site_proxy.db.config
def get_ctx():
"""Returns the current context."""
return _ctx_stack.top
def get_locale(default='en_US'):
"""Returns the current locale."""
ctx = get_ctx()
if ctx is not None:
rv = ctx.locale
if rv is not None:
return rv
return ctx.pad.db.config.site_locale
return default
class Context(object):
"""The context is a thread local object that provides the system with
general information about in which state it is. The context is created
whenever a source is processed and can be accessed by template engine and
other things.
It's considered read and write and also accumulates changes that happen
during processing of the object.
"""
def __init__(self, artifact):
self.artifact = artifact
self.source = artifact.source_obj
self.exc_info = None
self.build_state = self.artifact.build_state
self.pad = self.build_state.pad
# Processing information
self.referenced_dependencies = set()
self.sub_artifacts = []
self.flow_block_render_stack = []
self._forced_base_url = None
# General cache system where other things can put their temporary
# stuff in.
self.cache = {}
self._dependency_collectors = []
@property
def env(self):
"""The environment of the context."""
return self.pad.db.env
@property
def record(self):
"""If the source is a record it will be available here."""
rv = self.source
if rv is not None and rv.source_classification == 'record':
return rv
@property
def locale(self):
"""Returns the current locale if it's available, otherwise `None`.
This does not fall back to the site locale.
"""
source = self.source
if source is not None:
alt_cfg = self.pad.db.config['ALTERNATIVES'].get(source.alt)
if alt_cfg:
return alt_cfg['locale']
def push(self):
_ctx_stack.push(self)
def pop(self):
_ctx_stack.pop()
def __enter__(self):
self.push()
return self
def __exit__(self, exc_type, exc_value, tb):
self.pop()
@property
def base_url(self):
"""The URL path for the current context."""
if self._forced_base_url:
return self._forced_base_url
if self.source is not None:
return self.source.url_path
return '/'
def url_to(self, path, alt=None, absolute=False, external=False):
"""Returns a URL to another path."""
if self.source is None:
raise RuntimeError('Can only generate paths to other pages if '
'the context has a source document set.')
rv = self.source.url_to(path, alt=alt, absolute=True)
if absolute:
return rv
elif external:
return self.pad.make_absolute_url(rv)
return make_relative_url(self.base_url, rv)
def sub_artifact(self, *args, **kwargs):
"""Decorator version of :func:`add_sub_artifact`."""
def decorator(f):
self.add_sub_artifact(build_func=f, *args, **kwargs)
return f
return decorator
def add_sub_artifact(self, artifact_name, build_func=None,
sources=None, source_obj=None, config_hash=None):
"""Sometimes it can happen that while building an artifact another
artifact needs building. This function is generally used to record
this request.
"""
aft = self.build_state.new_artifact(
artifact_name=artifact_name,
sources=sources,
source_obj=source_obj,
config_hash=config_hash,
)
self.sub_artifacts.append((aft, build_func))
reporter.report_sub_artifact(aft)
def record_dependency(self, filename):
"""Records a dependency from processing."""
self.referenced_dependencies.add(filename)
for coll in self._dependency_collectors:
coll(filename)
@contextmanager
def gather_dependencies(self, func):
"""For the duration of the `with` block the provided function will be
invoked for all dependencies encountered.
"""
self._dependency_collectors.append(func)
try:
yield
finally:
self._dependency_collectors.pop()
@contextmanager
def changed_base_url(self, value):
"""Temporarily overrides the URL path of the context."""
old = self._forced_base_url
self._forced_base_url = value
try:
yield
finally:
self._forced_base_url = old
|
|
"""
.. module:: predictor
:synopsis: prediction method (for un-annotated text)
.. moduleauthor:: Liyuan Liu
"""
import torch
import torch.autograd as autograd
import numpy as np
import itertools
import sys
from tqdm import tqdm
from model.crf import CRFDecode_vb
from model.utils import *
class predict:
"""Base class for prediction, provide method to calculate f1 score and accuracy
args:
if_cuda: if use cuda to speed up
l_map: dictionary for labels
label_seq: type of decode function, set `True` to couple label with text, or set 'False' to insert label into test
batch_size: size of batch in decoding
"""
def __init__(self, if_cuda, l_map, label_seq = True, batch_size = 50):
self.if_cuda = if_cuda
self.l_map = l_map
self.r_l_map = revlut(l_map)
self.batch_size = batch_size
if label_seq:
self.decode_str = self.decode_l
else:
self.decode_str = self.decode_s
def decode_l(self, feature, label):
"""
decode a sentence coupled with label
args:
feature (list): words list
label (list): label list
"""
return '\n'.join(map(lambda t: t[0] + ' '+ self.r_l_map[t[1].item()], zip(feature, label)))
def decode_s(self, feature, label):
"""
decode a sentence in the format of <>
args:
feature (list): words list
label (list): label list
"""
chunks = ""
current = None
for f, y in zip(feature, label):
label = self.r_l_map[y.item()]
if label.startswith('B-'):
if current is not None:
chunks += "</"+current+"> "
current = label[2:]
chunks += "<"+current+"> " + f + " "
elif label.startswith('S-'):
if current is not None:
chunks += " </"+current+"> "
current = label[2:]
chunks += "<"+current+"> " + f + " </"+current+"> "
current = None
elif label.startswith('I-'):
if current is not None:
base = label[2:]
if base == current:
chunks += f+" "
else:
chunks += "</"+current+"> <"+base+"> " + f + " "
current = base
else:
current = label[2:]
chunks += "<"+current+"> " + f + " "
elif label.startswith('E-'):
if current is not None:
base = label[2:]
if base == current:
chunks += f + " </"+base+"> "
current = None
else:
chunks += "</"+current+"> <"+base+"> " + f + " </"+base+"> "
current = None
else:
current = label[2:]
chunks += "<"+current+"> " + f + " </"+current+"> "
current = None
else:
if current is not None:
chunks += "</"+current+"> "
chunks += f+" "
current = None
if current is not None:
chunks += "</"+current+"> "
return chunks
def output_batch(self, ner_model, documents, fout):
"""
decode the whole corpus in the specific format by calling apply_model to fit specific models
args:
ner_model: sequence labeling model
feature (list): list of words list
fout: output file
"""
ner_model.eval()
d_len = len(documents)
for d_ind in tqdm( range(0, d_len), mininterval=1,
desc=' - Process', leave=False, file=sys.stdout):
fout.write('-DOCSTART- -DOCSTART- -DOCSTART-\n\n')
features = documents[d_ind]
f_len = len(features)
for ind in range(0, f_len, self.batch_size):
eind = min(f_len, ind + self.batch_size)
labels = self.apply_model(ner_model, features[ind: eind])
labels = torch.unbind(labels, 1)
for ind2 in range(ind, eind):
f = features[ind2]
l = labels[ind2 - ind][0: len(f) ]
fout.write(self.decode_str(features[ind2], l) + '\n\n')
def apply_model(self, ner_model, features):
"""
template function for apply_model
args:
ner_model: sequence labeling model
feature (list): list of words list
"""
return None
class predict_w(predict):
"""prediction class for word level model (LSTM-CRF)
args:
if_cuda: if use cuda to speed up
f_map: dictionary for words
l_map: dictionary for labels
pad_word: word padding
pad_label: label padding
start_label: start label
label_seq: type of decode function, set `True` to couple label with text, or set 'False' to insert label into test
batch_size: size of batch in decoding
caseless: caseless or not
"""
def __init__(self, if_cuda, f_map, l_map, pad_word, pad_label, start_label, label_seq = True, batch_size = 50, caseless=True):
predict.__init__(self, if_cuda, l_map, label_seq, batch_size)
self.decoder = CRFDecode_vb(len(l_map), start_label, pad_label)
self.pad_word = pad_word
self.f_map = f_map
self.l_map = l_map
self.caseless = caseless
def apply_model(self, ner_model, features):
"""
apply_model function for LSTM-CRF
args:
ner_model: sequence labeling model
feature (list): list of words list
"""
if self.caseless:
features = list(map(lambda t: list(map(lambda x: x.lower(), t)), features))
features = encode_safe(features, self.f_map, self.f_map['<unk>'])
f_len = max(map(lambda t: len(t) + 1, features))
masks = torch.ByteTensor(list(map(lambda t: [1] * (len(t) + 1) + [0] * (f_len - len(t) - 1), features)))
word_features = torch.LongTensor(list(map(lambda t: t + [self.pad_word] * (f_len - len(t)), features)))
if self.if_cuda:
fea_v = autograd.Variable(word_features.transpose(0, 1)).cuda()
mask_v = masks.transpose(0, 1).cuda()
else:
fea_v = autograd.Variable(word_features.transpose(0, 1))
mask_v = masks.transpose(0, 1).contiguous()
scores, _ = ner_model(fea_v)
decoded = self.decoder.decode(scores.data, mask_v)
return decoded
class predict_wc(predict):
"""prediction class for LM-LSTM-CRF
args:
if_cuda: if use cuda to speed up
f_map: dictionary for words
c_map: dictionary for chars
l_map: dictionary for labels
pad_word: word padding
pad_char: word padding
pad_label: label padding
start_label: start label
label_seq: type of decode function, set `True` to couple label with text, or set 'False' to insert label into test
batch_size: size of batch in decoding
caseless: caseless or not
"""
def __init__(self, if_cuda, f_map, c_map, l_map, pad_word, pad_char, pad_label, start_label, label_seq = True, batch_size = 50, caseless=True):
predict.__init__(self, if_cuda, l_map, label_seq, batch_size)
self.decoder = CRFDecode_vb(len(l_map), start_label, pad_label)
self.pad_word = pad_word
self.pad_char = pad_char
self.f_map = f_map
self.c_map = c_map
self.l_map = l_map
self.caseless = caseless
def apply_model(self, ner_model, features):
"""
apply_model function for LM-LSTM-CRF
args:
ner_model: sequence labeling model
feature (list): list of words list
"""
char_features = encode2char_safe(features, self.c_map)
if self.caseless:
word_features = encode_safe(list(map(lambda t: list(map(lambda x: x.lower(), t)), features)), self.f_map, self.f_map['<unk>'])
else:
word_features = encode_safe(features, self.f_map, self.f_map['<unk>'])
fea_len = [list( map( lambda t: len(t) + 1, f) ) for f in char_features]
forw_features = concatChar(char_features, self.c_map)
word_len = max(map(lambda t: len(t) + 1, word_features))
char_len = max(map(lambda t: len(t[0]) + word_len - len(t[1]), zip(forw_features, word_features)))
forw_t = list( map( lambda t: t + [self.pad_char] * ( char_len - len(t) ), forw_features ) )
back_t = torch.LongTensor( list( map( lambda t: t[::-1], forw_t ) ) )
forw_t = torch.LongTensor( forw_t )
forw_p = torch.LongTensor( list( map( lambda t: list(itertools.accumulate( t + [1] * (word_len - len(t) ) ) ), fea_len) ) )
back_p = torch.LongTensor( list( map( lambda t: [char_len - 1] + [ char_len - 1 - tup for tup in t[:-1] ], forw_p) ) )
masks = torch.ByteTensor(list(map(lambda t: [1] * (len(t) + 1) + [0] * (word_len - len(t) - 1), word_features)))
word_t = torch.LongTensor(list(map(lambda t: t + [self.pad_word] * (word_len - len(t)), word_features)))
if self.if_cuda:
f_f = autograd.Variable(forw_t.transpose(0, 1)).cuda()
f_p = autograd.Variable(forw_p.transpose(0, 1)).cuda()
b_f = autograd.Variable(back_t.transpose(0, 1)).cuda()
b_p = autograd.Variable(back_p.transpose(0, 1)).cuda()
w_f = autograd.Variable(word_t.transpose(0, 1)).cuda()
mask_v = masks.transpose(0, 1).cuda()
else:
f_f = autograd.Variable(forw_t.transpose(0, 1))
f_p = autograd.Variable(forw_p.transpose(0, 1))
b_f = autograd.Variable(back_t.transpose(0, 1))
b_p = autograd.Variable(back_p.transpose(0, 1))
w_f = autograd.Variable(word_t.transpose(0, 1))
mask_v = masks.transpose(0, 1)
scores = ner_model(f_f, f_p, b_f, b_p, w_f)
decoded = self.decoder.decode(scores.data, mask_v)
return decoded
|
|
# election/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .controllers import election_remote_retrieve, elections_import_from_master_server, elections_sync_out_list_for_api
from .models import Election
from .serializers import ElectionSerializer
from admin_tools.views import redirect_to_sign_in_page
from ballot.models import BallotReturnedListManager
from candidate.models import CandidateCampaignListManager
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.shortcuts import render
from election.models import ElectionManager
from exception.models import handle_record_found_more_than_one_exception, handle_record_not_found_exception, \
handle_record_not_saved_exception
from import_export_google_civic.controllers import retrieve_one_ballot_from_google_civic_api, \
store_one_ballot_from_google_civic_api
import json
from office.models import ContestOfficeListManager
from polling_location.models import PollingLocation
from position.models import PositionListManager
from rest_framework.views import APIView
from rest_framework.response import Response
from voter.models import voter_has_authority
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, get_voter_device_id, positive_value_exists
from wevote_settings.models import fetch_next_we_vote_election_id_integer
logger = wevote_functions.admin.get_logger(__name__)
@login_required
def election_all_ballots_retrieve_view(request, election_local_id=0):
"""
Reach out to Google and retrieve (for one election):
1) Polling locations (so we can use those addresses to retrieve a representative set of ballots)
2) Cycle through a portion of those polling locations, enough that we are caching all of the possible ballot items
:param request:
:return:
"""
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
try:
if positive_value_exists(election_local_id):
election_on_stage = Election.objects.get(id=election_local_id)
else:
election_on_stage = Election.objects.get(google_civic_election_id=google_civic_election_id)
election_local_id = election_on_stage.id
except Election.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
messages.add_message(request, messages.ERROR, 'Could not retrieve ballot data. More than one election found.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
except Election.DoesNotExist:
messages.add_message(request, messages.ERROR, 'Could not retrieve ballot data. Election could not be found.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
# Check to see if we have polling location data related to the region(s) covered by this election
# We request the ballot data for each polling location as a way to build up our local data
state = election_on_stage.get_election_state()
try:
polling_location_count_query = PollingLocation.objects.all()
polling_location_count_query = polling_location_count_query.filter(state__iexact=state)
polling_location_count = polling_location_count_query.count()
polling_location_list = PollingLocation.objects.all()
polling_location_list = polling_location_list.filter(state__iexact=state)
# We used to have a limit of 500 ballots to pull per election, but now retrieve all
# Ordering by "location_name" creates a bit of (locational) random order
polling_location_list = polling_location_list.order_by('location_name') # [:500]
except PollingLocation.DoesNotExist:
messages.add_message(request, messages.INFO,
'Could not retrieve ballot data for the {election_name}. '
'No polling locations exist for the state \'{state}\'. '
'Data needed from VIP.'.format(
election_name=election_on_stage.election_name,
state=state))
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
if polling_location_count == 0:
messages.add_message(request, messages.ERROR,
'Could not retrieve ballot data for the {election_name}. '
'No polling locations returned for the state \'{state}\'. (error 2)'.format(
election_name=election_on_stage.election_name,
state=state))
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
ballots_retrieved = 0
ballots_not_retrieved = 0
ballots_with_contests_retrieved = 0
# We used to only retrieve up to 500 locations from each state, but we don't limit now
# # We retrieve 10% of the total polling locations, which should give us coverage of the entire election
# number_of_polling_locations_to_retrieve = int(.1 * polling_location_count)
for polling_location in polling_location_list:
success = False
# Get the address for this polling place, and then retrieve the ballot from Google Civic API
text_for_map_search = polling_location.get_text_for_map_search()
one_ballot_results = retrieve_one_ballot_from_google_civic_api(
text_for_map_search, election_on_stage.google_civic_election_id)
if one_ballot_results['success']:
one_ballot_json = one_ballot_results['structured_json']
store_one_ballot_results = store_one_ballot_from_google_civic_api(one_ballot_json, 0,
polling_location.we_vote_id)
if store_one_ballot_results['success']:
success = True
if success:
ballots_retrieved += 1
else:
ballots_not_retrieved += 1
if one_ballot_results['contests_retrieved']:
ballots_with_contests_retrieved += 1
# We used to only retrieve up to 500 locations from each state, but we don't limit now
# # Break out of this loop, assuming we have a minimum number of ballots with contests retrieved
# # If we don't achieve the minimum number of ballots_with_contests_retrieved, break out at the emergency level
# emergency = (ballots_retrieved + ballots_not_retrieved) >= (3 * number_of_polling_locations_to_retrieve)
# if ((ballots_retrieved + ballots_not_retrieved) >= number_of_polling_locations_to_retrieve and
# ballots_with_contests_retrieved > 20) or emergency:
# break
if ballots_retrieved > 0:
total_retrieved = ballots_retrieved + ballots_not_retrieved
messages.add_message(request, messages.INFO,
'Ballot data retrieved from Google Civic for the {election_name}. '
'(ballots retrieved: {ballots_retrieved} '
'(with contests: {ballots_with_contests_retrieved}), '
'not retrieved: {ballots_not_retrieved}, '
'total: {total})'.format(
ballots_retrieved=ballots_retrieved,
ballots_not_retrieved=ballots_not_retrieved,
ballots_with_contests_retrieved=ballots_with_contests_retrieved,
election_name=election_on_stage.election_name,
total=total_retrieved))
else:
messages.add_message(request, messages.ERROR,
'Ballot data NOT retrieved from Google Civic for the {election_name}.'
' (not retrieved: {ballots_not_retrieved})'.format(
ballots_not_retrieved=ballots_not_retrieved,
election_name=election_on_stage.election_name))
return HttpResponseRedirect(reverse('election:election_summary', args=(election_local_id,)))
@login_required
def election_edit_view(request, election_local_id):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
election_local_id = convert_to_int(election_local_id)
election_on_stage_found = False
election_on_stage = Election()
if positive_value_exists(election_local_id):
try:
election_on_stage = Election.objects.get(id=election_local_id)
election_on_stage_found = True
except Election.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Election.DoesNotExist:
# This is fine, create new
pass
else:
# If here we are creating a
pass
if election_on_stage_found:
template_values = {
'messages_on_stage': messages_on_stage,
'election': election_on_stage,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, "election/election_edit.html", template_values)
@login_required()
def election_edit_process_view(request):
"""
Process the new or edit election forms
:param request:
:return:
"""
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
election_local_id = convert_to_int(request.POST.get('election_local_id', 0))
election_name = request.POST.get('election_name', False)
election_day_text = request.POST.get('election_day_text', False)
state_code = request.POST.get('state_code', False)
election_on_stage = Election()
election_changed = False
# Check to see if this election is already being used anywhere
election_on_stage_found = False
try:
election_query = Election.objects.filter(id=election_local_id)
if len(election_query):
election_on_stage = election_query[0]
election_on_stage_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
try:
if election_on_stage_found:
if convert_to_int(election_on_stage.google_civic_election_id) < 1000000:
# If here, this is an election created by Google Civic and we limit what fields to update
# Update
if state_code is not False:
election_on_stage.state_code = state_code
election_changed = True
if election_changed:
election_on_stage.save()
messages.add_message(request, messages.INFO, 'Google Civic-created election updated.')
else:
# If here, this is a We Vote created election
# Update
if election_name is not False:
election_on_stage.election_name = election_name
election_changed = True
if election_day_text is not False:
election_on_stage.election_day_text = election_day_text
election_changed = True
if state_code is not False:
election_on_stage.state_code = state_code
election_changed = True
if election_changed:
election_on_stage.save()
messages.add_message(request, messages.INFO, 'We Vote-created election updated.')
else:
# Create new
next_local_election_id_integer = fetch_next_we_vote_election_id_integer()
election_on_stage = Election(
google_civic_election_id=next_local_election_id_integer,
election_name=election_name,
election_day_text=election_day_text,
state_code=state_code,
)
election_on_stage.save()
messages.add_message(request, messages.INFO, 'New election saved.')
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
messages.add_message(request, messages.ERROR, 'Could not save election.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
@login_required()
def election_list_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
election_list_query = Election.objects.all()
election_list_query = election_list_query.order_by('election_day_text').reverse()
election_list = election_list_query
template_values = {
'messages_on_stage': messages_on_stage,
'election_list': election_list,
}
return render(request, 'election/election_list.html', template_values)
@login_required()
def election_remote_retrieve_view(request):
"""
Reach out to Google and retrieve the latest list of available elections
:param request:
:return:
"""
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
results = election_remote_retrieve()
if not results['success']:
messages.add_message(request, messages.INFO, results['status'])
else:
messages.add_message(request, messages.INFO, 'Upcoming elections retrieved from Google Civic.')
return HttpResponseRedirect(reverse('election:election_list', args=()))
@login_required()
def election_summary_view(request, election_local_id):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
election_local_id = convert_to_int(election_local_id)
election_on_stage_found = False
election_on_stage = Election()
try:
election_on_stage = Election.objects.get(id=election_local_id)
election_on_stage_found = True
except Election.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except Election.DoesNotExist:
# This is fine, proceed anyways
pass
if election_on_stage_found:
ballot_returned_list_manager = BallotReturnedListManager()
ballot_returned_list_results = ballot_returned_list_manager.retrieve_ballot_returned_list_for_election(
election_on_stage.google_civic_election_id)
if ballot_returned_list_results['success']:
ballot_returned_list = ballot_returned_list_results['ballot_returned_list']
else:
ballot_returned_list = []
template_values = {
'messages_on_stage': messages_on_stage,
'election': election_on_stage,
'ballot_returned_list': ballot_returned_list,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'election/election_summary.html', template_values)
# TODO Which of these two do we standardize on?
class ElectionsSyncOutView(APIView):
"""
Export raw voter data to JSON format
"""
def get(self, request): # Removed: , format=None
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
results = elections_sync_out_list_for_api(voter_device_id)
if 'success' not in results:
json_data = results['json_data']
return HttpResponse(json.dumps(json_data), content_type='application/json')
elif not results['success']:
json_data = results['json_data']
return HttpResponse(json.dumps(json_data), content_type='application/json')
else:
election_list = results['election_list']
serializer = ElectionSerializer(election_list, many=True)
return Response(serializer.data)
# This page does not need to be protected.
class ExportElectionDataView(APIView):
def get(self, request, format=None):
election_list = Election.objects.all()
serializer = ElectionSerializer(election_list, many=True)
return Response(serializer.data)
@login_required
def elections_import_from_master_server_view(request):
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = elections_import_from_master_server()
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO, 'Elections import completed. '
'Saved: {saved}, Updated: {updated}, '
'Not processed: {not_processed}'
''.format(saved=results['saved'],
updated=results['updated'],
not_processed=results['not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
@login_required()
def election_migration_view(request):
authority_required = {'admin'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
election_manager = ElectionManager()
we_vote_election = Election()
office_list_manager = ContestOfficeListManager()
candidate_list_manager = CandidateCampaignListManager()
position_list_manager = PositionListManager()
we_vote_election_office_list = []
google_civic_election_office_list = []
results = election_manager.retrieve_we_vote_elections()
we_vote_election_list = results['election_list']
state_code_list = []
for election in we_vote_election_list:
if election.state_code not in state_code_list:
state_code_list.append(election.state_code)
google_civic_election = Election()
results = election_manager.retrieve_google_civic_elections_in_state_list(state_code_list)
google_civic_election_list = results['election_list']
we_vote_election_id = convert_to_int(request.GET.get('we_vote_election_id', 0))
if not positive_value_exists(we_vote_election_id):
we_vote_election_id = convert_to_int(request.POST.get('we_vote_election_id', 0))
if positive_value_exists(we_vote_election_id):
results = election_manager.retrieve_election(we_vote_election_id)
if results['election_found']:
we_vote_election = results['election']
return_list_of_objects = True
results = office_list_manager.retrieve_all_offices_for_upcoming_election(we_vote_election_id,
return_list_of_objects)
if results['office_list_found']:
we_vote_election_office_list = results['office_list_objects']
# Go through each office and attach a list of candidates under this office
we_vote_election_office_list_new = []
for one_office in we_vote_election_office_list:
candidate_results = candidate_list_manager.retrieve_all_candidates_for_office(0, one_office.we_vote_id)
if candidate_results['candidate_list_found']:
candidate_list = candidate_results['candidate_list']
new_candidate_list = []
# Go through candidate_list and find the number of positions saved for each candidate
for candidate in candidate_list:
retrieve_public_positions = True # The alternate is positions for friends-only
position_list = position_list_manager.retrieve_all_positions_for_candidate_campaign(
retrieve_public_positions, 0, candidate.we_vote_id)
candidate.position_count = len(position_list) # This is wasteful (instead of using count), but ok
# Now find the candidates from the Google Civic Election that we might want to transfer data to
new_candidate_list.append(candidate)
one_office.candidate_list = new_candidate_list
else:
one_office.candidate_list = []
we_vote_election_office_list_new.append(one_office)
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
if not positive_value_exists(google_civic_election_id):
google_civic_election_id = convert_to_int(request.POST.get('google_civic_election_id', 0))
if positive_value_exists(google_civic_election_id):
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
google_civic_election = results['election']
return_list_of_objects = True
results = office_list_manager.retrieve_all_offices_for_upcoming_election(google_civic_election_id,
return_list_of_objects)
if results['office_list_found']:
google_civic_election_office_list = results['office_list_objects']
# We want to transfer the
transfer_array = {}
transfer_array['wv01off1461'] = "wv02off269"
template_values = {
'messages_on_stage': messages_on_stage,
'we_vote_election': we_vote_election,
'we_vote_election_id': we_vote_election_id,
'we_vote_election_list': we_vote_election_list,
'we_vote_election_office_list': we_vote_election_office_list_new,
'google_civic_election': google_civic_election,
'google_civic_election_id': google_civic_election_id,
'google_civic_election_list': google_civic_election_list,
'google_civic_election_office_list': google_civic_election_office_list,
}
return render(request, 'election/election_migration.html', template_values)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.ops.attention_wrapper."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import collections
import functools
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.contrib.seq2seq.python.ops import attention_wrapper as wrapper
from tensorflow.contrib.seq2seq.python.ops import helper as helper_py
from tensorflow.contrib.seq2seq.python.ops import basic_decoder
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import variables
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import test
from tensorflow.python.util import nest
# pylint: enable=g-import-not-at-top
# for testing
AttentionWrapperState = wrapper.AttentionWrapperState # pylint: disable=invalid-name
LSTMStateTuple = rnn_cell.LSTMStateTuple # pylint: disable=invalid-name
BasicDecoderOutput = basic_decoder.BasicDecoderOutput # pylint: disable=invalid-name
float32 = np.float32
int32 = np.int32
array = np.array
dtype = np.dtype
class ResultSummary(
collections.namedtuple('ResultSummary', ('shape', 'dtype', 'mean'))):
pass
def get_result_summary(x):
if isinstance(x, np.ndarray):
return ResultSummary(x.shape, x.dtype, x.mean())
return x
class AttentionWrapperTest(test.TestCase):
def assertAllCloseOrEqual(self, x, y, **kwargs):
if isinstance(x, np.ndarray) or isinstance(x, float):
return super(AttentionWrapperTest, self).assertAllClose(
x, y, atol=1e-4, **kwargs)
else:
self.assertAllEqual(x, y, **kwargs)
def testAttentionWrapperState(self):
num_fields = len(wrapper.AttentionWrapperState._fields) # pylint: disable=protected-access
state = wrapper.AttentionWrapperState(*([None] * num_fields))
new_state = state.clone(time=1)
self.assertEqual(state.time, None)
self.assertEqual(new_state.time, 1)
def _testWithAttention(self,
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=3,
alignment_history=False,
expected_final_alignment_history=None,
attention_layer_size=6,
name=''):
self._testWithMaybeMultiAttention(
is_multi=False,
create_attention_mechanisms=[create_attention_mechanism],
expected_final_output=expected_final_output,
expected_final_state=expected_final_state,
attention_mechanism_depths=[attention_mechanism_depth],
alignment_history=alignment_history,
expected_final_alignment_history=expected_final_alignment_history,
attention_layer_sizes=[attention_layer_size],
name=name)
def _testWithMaybeMultiAttention(self,
is_multi,
create_attention_mechanisms,
expected_final_output,
expected_final_state,
attention_mechanism_depths,
alignment_history=False,
expected_final_alignment_history=None,
attention_layer_sizes=None,
name=''):
# Allow is_multi to be True with a single mechanism to enable test for
# passing in a single mechanism in a list.
assert len(create_attention_mechanisms) == 1 or is_multi
encoder_sequence_length = [3, 2, 3, 1, 1]
decoder_sequence_length = [2, 0, 1, 2, 3]
batch_size = 5
encoder_max_time = 8
decoder_max_time = 4
input_depth = 7
encoder_output_depth = 10
cell_depth = 9
if attention_layer_sizes is None:
attention_depth = encoder_output_depth * len(create_attention_mechanisms)
else:
# Compute sum of attention_layer_sizes. Use encoder_output_depth if None.
attention_depth = sum([attention_layer_size or encoder_output_depth
for attention_layer_size in attention_layer_sizes])
decoder_inputs = array_ops.placeholder_with_default(
np.random.randn(batch_size, decoder_max_time,
input_depth).astype(np.float32),
shape=(None, None, input_depth))
encoder_outputs = array_ops.placeholder_with_default(
np.random.randn(batch_size, encoder_max_time,
encoder_output_depth).astype(np.float32),
shape=(None, None, encoder_output_depth))
attention_mechanisms = [
creator(num_units=depth,
memory=encoder_outputs,
memory_sequence_length=encoder_sequence_length)
for creator, depth in zip(create_attention_mechanisms,
attention_mechanism_depths)]
with self.test_session(use_gpu=True) as sess:
with vs.variable_scope(
'root',
initializer=init_ops.random_normal_initializer(stddev=0.01, seed=3)):
cell = rnn_cell.LSTMCell(cell_depth)
cell = wrapper.AttentionWrapper(
cell,
attention_mechanisms if is_multi else attention_mechanisms[0],
attention_layer_size=(attention_layer_sizes if is_multi
else attention_layer_sizes[0]),
alignment_history=alignment_history)
helper = helper_py.TrainingHelper(decoder_inputs,
decoder_sequence_length)
my_decoder = basic_decoder.BasicDecoder(
cell=cell,
helper=helper,
initial_state=cell.zero_state(
dtype=dtypes.float32, batch_size=batch_size))
final_outputs, final_state, _ = decoder.dynamic_decode(my_decoder)
self.assertTrue(
isinstance(final_outputs, basic_decoder.BasicDecoderOutput))
self.assertTrue(
isinstance(final_state, wrapper.AttentionWrapperState))
self.assertTrue(
isinstance(final_state.cell_state, rnn_cell.LSTMStateTuple))
self.assertEqual((batch_size, None, attention_depth),
tuple(final_outputs.rnn_output.get_shape().as_list()))
self.assertEqual((batch_size, None),
tuple(final_outputs.sample_id.get_shape().as_list()))
self.assertEqual((batch_size, attention_depth),
tuple(final_state.attention.get_shape().as_list()))
self.assertEqual((batch_size, cell_depth),
tuple(final_state.cell_state.c.get_shape().as_list()))
self.assertEqual((batch_size, cell_depth),
tuple(final_state.cell_state.h.get_shape().as_list()))
if alignment_history:
if is_multi:
state_alignment_history = []
for history_array in final_state.alignment_history:
history = history_array.stack()
self.assertEqual(
(None, batch_size, None),
tuple(history.get_shape().as_list()))
state_alignment_history.append(history)
state_alignment_history = tuple(state_alignment_history)
else:
state_alignment_history = final_state.alignment_history.stack()
self.assertEqual(
(None, batch_size, None),
tuple(state_alignment_history.get_shape().as_list()))
# Remove the history from final_state for purposes of the
# remainder of the tests.
final_state = final_state._replace(alignment_history=()) # pylint: disable=protected-access
else:
state_alignment_history = ()
sess.run(variables.global_variables_initializer())
sess_results = sess.run({
'final_outputs': final_outputs,
'final_state': final_state,
'state_alignment_history': state_alignment_history,
})
final_output_info = nest.map_structure(get_result_summary,
sess_results['final_outputs'])
final_state_info = nest.map_structure(get_result_summary,
sess_results['final_state'])
print(name)
print('Copy/paste:\nexpected_final_output = %s' % str(final_output_info))
print('expected_final_state = %s' % str(final_state_info))
nest.map_structure(self.assertAllCloseOrEqual, expected_final_output,
final_output_info)
nest.map_structure(self.assertAllCloseOrEqual, expected_final_state,
final_state_info)
if alignment_history: # by default, the wrapper emits attention as output
final_alignment_history_info = nest.map_structure(
get_result_summary, sess_results['state_alignment_history'])
print('expected_final_alignment_history = %s' %
str(final_alignment_history_info))
nest.map_structure(
self.assertAllCloseOrEqual,
# outputs are batch major but the stacked TensorArray is time major
expected_final_alignment_history,
final_alignment_history_info)
def testBahdanauNotNormalized(self):
create_attention_mechanism = wrapper.BahdanauAttention
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0052250605),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.4))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040092287),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0020015112)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.0052052638),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.12500001)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testBahdanauNotNormalized')
def testBahdanauNormalized(self):
create_attention_mechanism = functools.partial(
wrapper.BahdanauAttention, normalize=True)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.00597103),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.4))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040052128),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019996136)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.00595117),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
name='testBahdanauNormalized')
def testLuongNotNormalized(self):
create_attention_mechanism = wrapper.LuongAttention
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0052615386),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.4666666666666666))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.004009536),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0020016613)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.0051812846),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
name='testLuongNotNormalized')
def testLuongScaled(self):
create_attention_mechanism = functools.partial(
wrapper.LuongAttention, scale=True)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0052615386),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.4666666666666666))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.004009536),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0020016613)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.0051812846),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
name='testLuongScaled')
def testNotUseAttentionLayer(self):
create_attention_mechanism = wrapper.BahdanauAttention
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 10), dtype=dtype('float32'), mean=0.117389656),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=4.5999999999999996))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0063607907),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.00323448)),
attention=ResultSummary(
shape=(5, 10), dtype=dtype('float32'), mean=0.117389656,),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.125),
alignment_history=())
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_layer_size=None,
name='testNotUseAttentionLayer')
def test_safe_cumprod(self):
# Create some random test input
test_input = np.random.uniform(size=(10, 20))
for axis in [0, 1]:
for exclusive in [True, False]:
with self.test_session():
# Compute cumprod with regular tf.cumprod
cumprod_output = math_ops.cumprod(
test_input, axis=axis, exclusive=exclusive).eval()
# Compute cumprod with safe_cumprod
safe_cumprod_output = wrapper.safe_cumprod(
test_input, axis=axis, exclusive=exclusive).eval()
for x, y in zip(cumprod_output.shape, safe_cumprod_output.shape):
self.assertEqual(x, y)
for x, y in zip(cumprod_output.flatten(),
safe_cumprod_output.flatten()):
# Use assertAlmostEqual for the actual values due to floating point
self.assertAlmostEqual(x, y, places=5)
def test_monotonic_attention(self):
def monotonic_attention_explicit(p_choose_i, previous_attention):
"""Explicitly compute monotonic attention distribution using numpy."""
# Base case for recurrence relation
out = [previous_attention[0]]
# Explicitly follow the recurrence relation
for j in range(1, p_choose_i.shape[0]):
out.append((1 - p_choose_i[j - 1])*out[j - 1] + previous_attention[j])
return p_choose_i*np.array(out)
# Generate a random batch of choosing probabilities for seq. len. 20
p_choose_i = np.random.uniform(size=(10, 20)).astype(np.float32)
# Generate random previous attention distributions
previous_attention = np.random.uniform(size=(10, 20)).astype(np.float32)
previous_attention /= previous_attention.sum(axis=1).reshape((-1, 1))
# Create the output to test against
explicit_output = np.array([
monotonic_attention_explicit(p, a)
for p, a in zip(p_choose_i, previous_attention)])
# Compute output with TensorFlow function, for both calculation types
with self.test_session():
recursive_output = wrapper.monotonic_attention(
p_choose_i, previous_attention, 'recursive').eval()
self.assertEqual(recursive_output.ndim, explicit_output.ndim)
for x, y in zip(recursive_output.shape, explicit_output.shape):
self.assertEqual(x, y)
for x, y in zip(recursive_output.flatten(), explicit_output.flatten()):
# Use assertAlmostEqual for the actual values due to floating point
self.assertAlmostEqual(x, y, places=5)
# Generate new p_choose_i for parallel, which is unstable when p_choose_i[n]
# is close to 1
p_choose_i = np.random.uniform(0, 0.9, size=(10, 20)).astype(np.float32)
# Create new output to test against
explicit_output = np.array([
monotonic_attention_explicit(p, a)
for p, a in zip(p_choose_i, previous_attention)])
# Compute output with TensorFlow function, for both calculation types
with self.test_session():
parallel_output = wrapper.monotonic_attention(
p_choose_i, previous_attention, 'parallel').eval()
self.assertEqual(parallel_output.ndim, explicit_output.ndim)
for x, y in zip(parallel_output.shape, explicit_output.shape):
self.assertEqual(x, y)
for x, y in zip(parallel_output.flatten(), explicit_output.flatten()):
# Use assertAlmostEqual for the actual values due to floating point
self.assertAlmostEqual(x, y, places=5)
# Now, test hard mode, where probabilities must be 0 or 1
p_choose_i = np.random.choice(np.array([0, 1], np.float32), (10, 20))
previous_attention = np.zeros((10, 20), np.float32)
# Randomly choose input sequence indices at each timestep
random_idx = np.random.randint(0, previous_attention.shape[1],
previous_attention.shape[0])
previous_attention[np.arange(previous_attention.shape[0]), random_idx] = 1
# Create the output to test against
explicit_output = np.array([
monotonic_attention_explicit(p, a)
for p, a in zip(p_choose_i, previous_attention)])
# Compute output with TensorFlow function, for both calculation types
with self.test_session():
hard_output = wrapper.monotonic_attention(
# TensorFlow is unhappy when these are not wrapped as tf.constant
constant_op.constant(p_choose_i),
constant_op.constant(previous_attention),
'hard').eval()
self.assertEqual(hard_output.ndim, explicit_output.ndim)
for x, y in zip(hard_output.shape, explicit_output.shape):
self.assertEqual(x, y)
for x, y in zip(hard_output.flatten(), explicit_output.flatten()):
# Use assertAlmostEqual for the actual values due to floating point
self.assertAlmostEqual(x, y, places=5)
# Now, test recursively computing attention distributions vs. sampling
def sample(p_choose_i):
"""Generate a sequence of emit-ingest decisions from p_choose_i."""
output = np.zeros(p_choose_i.shape)
t_im1 = 0
for i in range(p_choose_i.shape[0]):
for j in range(t_im1, p_choose_i.shape[1]):
if np.random.uniform() <= p_choose_i[i, j]:
output[i, j] = 1
t_im1 = j
break
else:
t_im1 = p_choose_i.shape[1]
return output
# Now, the first axis is output timestep and second is input timestep
p_choose_i = np.random.uniform(size=(4, 5)).astype(np.float32)
# Generate the average of a bunch of samples
n_samples = 100000
sampled_output = np.mean(
[sample(p_choose_i) for _ in range(n_samples)], axis=0)
# Create initial previous_attention base case
recursive_output = [np.array([1] + [0]*(p_choose_i.shape[1] - 1),
np.float32)]
# Compute output with TensorFlow function, for both calculation types
with self.test_session():
for j in range(p_choose_i.shape[0]):
# Compute attention distribution for this output time step
recursive_output.append(wrapper.monotonic_attention(
# newaxis is for adding the expected batch dimension
p_choose_i[j][np.newaxis],
recursive_output[-1][np.newaxis], 'recursive').eval()[0])
# Stack together distributions; remove basecase
recursive_output = np.array(recursive_output[1:])
self.assertEqual(recursive_output.ndim, sampled_output.ndim)
for x, y in zip(recursive_output.shape, sampled_output.shape):
self.assertEqual(x, y)
for x, y in zip(recursive_output.flatten(), sampled_output.flatten()):
# Use a very forgiving threshold since we are sampling
self.assertAlmostEqual(x, y, places=2)
def testBahdanauMonotonicNotNormalized(self):
create_attention_mechanism = functools.partial(
wrapper.BahdanauMonotonicAttention, sigmoid_noise=1.0,
sigmoid_noise_seed=3)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.002122893),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.7333333333333334))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040002423),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019968653)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-5.9313523e-05),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032228071),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.050430927)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testBahdanauMonotonicNotNormalized')
def testBahdanauMonotonicNormalized(self):
create_attention_mechanism = functools.partial(
wrapper.BahdanauMonotonicAttention, normalize=True,
sigmoid_noise=1.0, sigmoid_noise_seed=3)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0025896581),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.8666666666666667))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040013152),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019973689)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-0.00069823361),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.028698336),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.046009291)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testBahdanauMonotonicNormalized')
def testLuongMonotonicNotNormalized(self):
create_attention_mechanism = functools.partial(
wrapper.LuongMonotonicAttention, sigmoid_noise=1.0,
sigmoid_noise_seed=3)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0021257224),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.7333333333333334))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040003359),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.001996913)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-5.2024145e-05),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032198936),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.050387777)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testLuongMonotonicNotNormalized')
def testLuongMonotonicScaled(self):
create_attention_mechanism = functools.partial(
wrapper.LuongMonotonicAttention, scale=True, sigmoid_noise=1.0,
sigmoid_noise_seed=3)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 6), dtype=dtype('float32'), mean=-0.0021257224),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.7333333333333334))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040003359),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.001996913)),
attention=ResultSummary(
shape=(5, 6), dtype=dtype('float32'), mean=-5.2024145e-05),
time=3,
alignments=ResultSummary(
shape=(5, 8), dtype=dtype('float32'), mean=0.032198936),
alignment_history=())
expected_final_alignment_history = ResultSummary(
shape=(3, 5, 8), dtype=dtype('float32'), mean=0.050387777)
self._testWithAttention(
create_attention_mechanism,
expected_final_output,
expected_final_state,
attention_mechanism_depth=9,
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testLuongMonotonicScaled')
def testMultiAttention(self):
create_attention_mechanisms = (
wrapper.BahdanauAttention, wrapper.LuongAttention)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 7), dtype=dtype('float32'), mean=0.0011709079),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=3.2000000000000002))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0038725811),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019329828)),
attention=ResultSummary(
shape=(5, 7), dtype=dtype('float32'), mean=0.001174294),
time=3,
alignments=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125)),
alignment_history=())
expected_final_alignment_history = (
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125))
self._testWithMaybeMultiAttention(
True,
create_attention_mechanisms,
expected_final_output,
expected_final_state,
attention_mechanism_depths=[9, 9],
attention_layer_sizes=[3, 4],
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testMultiAttention')
def testMultiAttentionNoAttentionLayer(self):
create_attention_mechanisms = (
wrapper.BahdanauAttention, wrapper.LuongAttention)
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 20), dtype=dtype('float32'), mean=0.11691988),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=7.2666666666666666))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0036486709),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0018835809)),
attention=ResultSummary(
shape=(5, 20), dtype=dtype('float32'), mean=0.11680689),
time=3,
alignments=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125)),
alignment_history=())
expected_final_alignment_history = (
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125),
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125))
self._testWithMaybeMultiAttention(
is_multi=True,
create_attention_mechanisms=create_attention_mechanisms,
expected_final_output=expected_final_output,
expected_final_state=expected_final_state,
attention_mechanism_depths=[9, 9],
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testMultiAttention')
def testSingleAttentionAsList(self):
create_attention_mechanisms = [wrapper.BahdanauAttention]
expected_final_output = BasicDecoderOutput(
rnn_output=ResultSummary(
shape=(5, 3, 3), dtype=dtype('float32'), mean=-0.0098485695),
sample_id=ResultSummary(
shape=(5, 3), dtype=dtype('int32'), mean=1.8))
expected_final_state = AttentionWrapperState(
cell_state=LSTMStateTuple(
c=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0040023471),
h=ResultSummary(
shape=(5, 9), dtype=dtype('float32'), mean=-0.0019979973)),
attention=ResultSummary(
shape=(5, 3), dtype=dtype('float32'), mean=-0.0098808752),
time=3,
alignments=(
ResultSummary(shape=(5, 8), dtype=dtype('float32'), mean=0.125),),
alignment_history=())
expected_final_alignment_history = (
ResultSummary(shape=(3, 5, 8), dtype=dtype('float32'), mean=0.125),)
self._testWithMaybeMultiAttention(
is_multi=True, # pass the AttentionMechanism wrapped in a list
create_attention_mechanisms=create_attention_mechanisms,
expected_final_output=expected_final_output,
expected_final_state=expected_final_state,
attention_mechanism_depths=[9],
attention_layer_sizes=[3],
alignment_history=True,
expected_final_alignment_history=expected_final_alignment_history,
name='testMultiAttention')
if __name__ == '__main__':
test.main()
|
|
from direct.interval.IntervalGlobal import *
from pandac.PandaModules import *
from DistributedNPCToonBase import *
from toontown.chat.ChatGlobals import *
from toontown.hood import ZoneUtil
from toontown.nametag.NametagGlobals import *
from toontown.quest import MultiTrackChoiceGui
from toontown.quest import QuestChoiceGui
from toontown.quest import QuestParser
from toontown.toonbase import TTLocalizer
from toontown.toontowngui import TeaserPanel
ChoiceTimeout = 20
class DistributedNPCToon(DistributedNPCToonBase):
def __init__(self, cr):
DistributedNPCToonBase.__init__(self, cr)
self.curQuestMovie = None
self.questChoiceGui = None
self.trackChoiceGui = None
def allowedToTalk(self):
return True
def delayDelete(self):
DistributedNPCToonBase.delayDelete(self)
if self.curQuestMovie:
curQuestMovie = self.curQuestMovie
self.curQuestMovie = None
curQuestMovie.timeout(fFinish=1)
curQuestMovie.cleanup()
def disable(self):
self.cleanupMovie()
DistributedNPCToonBase.disable(self)
def cleanupMovie(self):
self.clearChat()
self.ignore('chooseQuest')
if self.questChoiceGui:
self.questChoiceGui.destroy()
self.questChoiceGui = None
self.ignore(self.uniqueName('doneChatPage'))
if self.curQuestMovie:
self.curQuestMovie.timeout(fFinish=1)
self.curQuestMovie.cleanup()
self.curQuestMovie = None
if self.trackChoiceGui:
self.trackChoiceGui.destroy()
self.trackChoiceGui = None
def handleCollisionSphereEnter(self, collEntry):
base.cr.playGame.getPlace().fsm.request('quest', [self])
self.sendUpdate('avatarEnter', [])
self.nametag3d.setDepthTest(0)
self.nametag3d.setBin('fixed', 0)
def handleOkTeaser(self):
self.dialog.destroy()
del self.dialog
place = base.cr.playGame.getPlace()
if place:
place.fsm.request('walk')
def finishMovie(self, av, isLocalToon, elapsedTime):
self.cleanupMovie()
av.startLookAround()
self.startLookAround()
self.detectAvatars()
self.initPos()
if isLocalToon:
self.showNametag2d()
taskMgr.remove(self.uniqueName('lerpCamera'))
base.localAvatar.posCamera(0, 0)
base.cr.playGame.getPlace().setState('walk')
self.sendUpdate('setMovieDone', [])
self.nametag3d.clearDepthTest()
self.nametag3d.clearBin()
def setupCamera(self, mode):
camera.wrtReparentTo(render)
if mode == NPCToons.QUEST_MOVIE_QUEST_CHOICE or mode == NPCToons.QUEST_MOVIE_TRACK_CHOICE:
camera.posQuatInterval(1, (5, 9, self.getHeight() - 0.5), (155, -2, 0), other=self, blendType='easeOut').start()
else:
camera.posQuatInterval(1, (-5, 9, self.getHeight() - 0.5), (-150, -2, 0), other=self, blendType='easeOut').start()
def setMovie(self, mode, npcId, avId, quests, timestamp):
isLocalToon = avId == base.localAvatar.doId
if mode == NPCToons.QUEST_MOVIE_CLEAR:
self.cleanupMovie()
return
if mode == NPCToons.QUEST_MOVIE_TIMEOUT:
self.cleanupMovie()
if isLocalToon:
self.freeAvatar()
self.setPageNumber(0, -1)
self.clearChat()
self.startLookAround()
self.detectAvatars()
return
av = base.cr.doId2do.get(avId)
if av is None:
self.notify.warning('Avatar %d not found in doId' % avId)
return
if mode == NPCToons.QUEST_MOVIE_REJECT:
rejectString = Quests.chooseQuestDialogReject()
rejectString = Quests.fillInQuestNames(rejectString, avName=av.name)
self.setChatAbsolute(rejectString, CFSpeech | CFTimeout)
if isLocalToon:
base.localAvatar.posCamera(0, 0)
base.cr.playGame.getPlace().setState('walk')
return
if mode == NPCToons.QUEST_MOVIE_TIER_NOT_DONE:
rejectString = Quests.chooseQuestDialogTierNotDone()
rejectString = Quests.fillInQuestNames(rejectString, avName=av.name)
self.setChatAbsolute(rejectString, CFSpeech | CFTimeout)
if isLocalToon:
base.localAvatar.posCamera(0, 0)
base.cr.playGame.getPlace().setState('walk')
return
self.setupAvatars(av)
fullString = ''
toNpcId = None
if isLocalToon:
self.hideNametag2d()
if mode == NPCToons.QUEST_MOVIE_COMPLETE:
questId, rewardId, toNpcId = quests
scriptId = 'quest_complete_' + str(questId)
if QuestParser.questDefined(scriptId):
self.curQuestMovie = QuestParser.NPCMoviePlayer(scriptId, av, self)
self.curQuestMovie.play()
return
if isLocalToon:
self.setupCamera(mode)
greetingString = Quests.chooseQuestDialog(questId, Quests.GREETING)
if greetingString:
fullString += greetingString + '\x07'
fullString += Quests.chooseQuestDialog(questId, Quests.COMPLETE) + '\x07'
if rewardId:
fullString += Quests.getReward(rewardId).getString()
leavingString = Quests.chooseQuestDialog(questId, Quests.LEAVING)
if leavingString:
fullString += '\x07' + leavingString
elif mode == NPCToons.QUEST_MOVIE_QUEST_CHOICE_CANCEL:
fullString = TTLocalizer.QuestMovieQuestChoiceCancel
elif mode == NPCToons.QUEST_MOVIE_TRACK_CHOICE_CANCEL:
fullString = TTLocalizer.QuestMovieTrackChoiceCancel
elif mode == NPCToons.QUEST_MOVIE_INCOMPLETE:
questId, completeStatus, toNpcId = quests
scriptId = 'quest_incomplete_' + str(questId)
if QuestParser.questDefined(scriptId):
if self.curQuestMovie:
self.curQuestMovie.timeout()
self.curQuestMovie.cleanup()
self.curQuestMovie = None
self.curQuestMovie = QuestParser.NPCMoviePlayer(scriptId, av, self)
self.curQuestMovie.play()
return
if isLocalToon:
self.setupCamera(mode)
greetingString = Quests.chooseQuestDialog(questId, Quests.GREETING)
if greetingString:
fullString += greetingString + '\x07'
fullString += Quests.chooseQuestDialog(questId, completeStatus)
leavingString = Quests.chooseQuestDialog(questId, Quests.LEAVING)
if leavingString:
fullString += '\x07' + leavingString
elif mode == NPCToons.QUEST_MOVIE_ASSIGN:
questId, rewardId, toNpcId = quests
scriptId = 'quest_assign_' + str(questId)
if QuestParser.questDefined(scriptId):
if self.curQuestMovie:
self.curQuestMovie.timeout()
self.curQuestMovie.cleanup()
self.curQuestMovie = None
self.curQuestMovie = QuestParser.NPCMoviePlayer(scriptId, av, self)
self.curQuestMovie.play()
return
if isLocalToon:
self.setupCamera(mode)
fullString += Quests.chooseQuestDialog(questId, Quests.QUEST)
leavingString = Quests.chooseQuestDialog(questId, Quests.LEAVING)
if leavingString:
fullString += '\x07' + leavingString
elif mode == NPCToons.QUEST_MOVIE_QUEST_CHOICE:
if isLocalToon:
self.setupCamera(mode)
self.setChatAbsolute(TTLocalizer.QuestMovieQuestChoice, CFSpeech)
if isLocalToon:
self.acceptOnce('chooseQuest', self.sendChooseQuest)
self.questChoiceGui = QuestChoiceGui.QuestChoiceGui()
print 'setquestgui'
self.questChoiceGui.setQuests(quests, npcId, ChoiceTimeout)
print 'gui setQuests'
return
elif mode == NPCToons.QUEST_MOVIE_TRACK_CHOICE:
if isLocalToon:
self.setupCamera(mode)
tracks = quests
self.setChatAbsolute(TTLocalizer.QuestMovieTrackChoice, CFSpeech)
if isLocalToon:
self.acceptOnce('chooseTrack', self.sendChooseTrack)
print 'loading gui'
self.trackChoiceGui = MultiTrackChoiceGui.MultiTrackChoiceGui(tracks, ChoiceTimeout)
print 'loaded'
return
fullString = Quests.fillInQuestNames(fullString, avName=av.name, fromNpcId=npcId, toNpcId=toNpcId)
self.acceptOnce(self.uniqueName('doneChatPage'), self.finishMovie, extraArgs=[av, isLocalToon])
self.clearChat()
self.setPageChat(avId, 0, fullString, 1)
def sendChooseQuest(self, questId):
if self.questChoiceGui:
self.questChoiceGui.destroy()
self.questChoiceGui = None
self.sendUpdate('chooseQuest', [questId])
def sendChooseTrack(self, trackId):
if self.trackChoiceGui:
self.trackChoiceGui.destroy()
self.trackChoiceGui = None
self.sendUpdate('chooseTrack', [trackId])
|
|
#!/usr/bin/env @PYTHON_EXECUTABLE@
"""
Description: Export a Siconos mechanics-IO HDF5 file in VTK format.
"""
# Lighter imports before command line parsing
from __future__ import print_function
import sys
import os
import getopt
#
# a replacement for vview --vtk-export
#
def usage(long=False):
print(__doc__); print()
print('Usage: {0} [--help] [--version] [--ascii] <HDF5>'
.format(os.path.split(sys.argv[0])[1]))
if long:
print()
print("""Options:
--help display this message
--version display version information
--ascii export file in ascii format
""")
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], '',
['help','version','ascii'])
except getopt.GetoptError as err:
sys.stderr.write('{0}\n'.format(str(err)))
usage()
exit(2)
ascii_mode = False
for o, a in opts:
if o == '--help':
usage(long=True)
exit(0)
if o == '--version':
print('{0} @SICONOS_VERSION@'.format(os.path.split(sys.argv[0])[1]))
exit(0)
if o in ('--ascii'):
ascii_mode = True
min_time = None
max_time = None
cf_scale_factor = 1
normalcone_ratio = 1
time_scale_factor = 1
vtk_export_mode = True
if len(args) > 0:
io_filename = args[0]
else:
usage()
exit(1)
# Heavier imports after command line parsing
import vtk
from vtk.util import numpy_support
from math import atan2, pi
import bisect
from numpy.linalg import norm
import numpy
import random
from siconos.io.mechanics_hdf5 import MechanicsHdf5
# attach velocity
# contact points and associated forces are embedded in on a PolyData source
class UnstructuredGridSource(vtk.vtkProgrammableSource):
def GetOutputPort(self):
# 3: UnstructuredGridOutput for vtkProgrammableSource
return vtk.vtkProgrammableSource.GetOutputPort(self, 3)
class ConvexSource(UnstructuredGridSource):
def __init__(self, convex, points):
self._convex = convex
self._points = points
self.SetExecuteMethod(self.method)
def method(self):
output = self.GetUnstructuredGridOutput()
output.Allocate(1, 1)
output.InsertNextCell(
convex.GetCellType(), self._convex.GetPointIds())
output.SetPoints(self._points)
def add_compatiblity_methods(obj):
"""
Add missing methods in previous VTK versions.
"""
if hasattr(obj, 'SetInput'):
obj.SetInputData = obj.SetInput
if hasattr(obj, 'AddInput'):
obj.AddInputData = obj.AddInput
transforms = dict()
transformers = dict()
data_connectors_v = dict()
data_connectors_t = dict()
data_connectors_d = dict()
big_data_source = vtk.vtkMultiBlockDataGroupFilter()
add_compatiblity_methods(big_data_source)
big_data_writer = vtk.vtkXMLMultiBlockDataWriter()
add_compatiblity_methods(big_data_writer)
contactors = dict()
offsets = dict()
vtkmath = vtk.vtkMath()
class Quaternion():
def __init__(self, *args):
self._data = vtk.vtkQuaternion[float](*args)
def __mul__(self, q):
r = Quaternion()
vtkmath.MultiplyQuaternion(self._data, q._data, r._data)
return r
def __getitem__(self, i):
return self._data[i]
def conjugate(self):
r = Quaternion((self[0], self[1], self[2], self[3]))
r._data.Conjugate()
return r
def rotate(self, v):
pv = Quaternion((0, v[0], v[1], v[2]))
rv = self * pv * self.conjugate()
# assert(rv[0] == 0)
return [rv[1], rv[2], rv[3]]
def axisAngle(self):
r = [0, 0, 0]
a = self._data.GetRotationAngleAndAxis(r)
return r, a
def set_position(instance, q0, q1, q2, q3, q4, q5, q6):
q = Quaternion((q3, q4, q5, q6))
for transform, offset in zip(transforms[instance], offsets[instance]):
p = q.rotate(offset[0])
r = q * Quaternion(offset[1])
transform.Identity()
transform.Translate(q0 + p[0], q1 + p[1], q2 + p[2])
axis, angle = r.axisAngle()
transform.RotateWXYZ(angle * 180. / pi,
axis[0],
axis[1],
axis[2])
set_positionv = numpy.vectorize(set_position)
def build_set_velocity(dico):
def set_velocity(instance, v0, v1, v2, v3, v4, v5):
if instance in dico:
dico[instance]._data[:] = [v0, v1, v2, v3, v4, v5]
dico[instance]._connector.Update()
set_velocityv = numpy.vectorize(set_velocity)
return set_velocityv
def build_set_translation(dico):
def set_translation(instance, x0, x1, x2 ):
if instance in dico:
dico[instance]._data[:] = [x0, x1, x2]
dico[instance]._connector.Update()
set_translationv = numpy.vectorize(set_translation)
return set_translationv
def build_set_displacement(dico):
def set_displacement(instance, x0, x1, x2 ):
if instance in dico:
dico[instance]._data[:] = [x0, x1, x2]
dico[instance]._connector.Update()
set_displacementv = numpy.vectorize(set_displacement)
return set_displacementv
def step_reader(step_string):
from OCC.StlAPI import StlAPI_Writer
from OCC.STEPControl import STEPControl_Reader
from OCC.BRep import BRep_Builder
from OCC.TopoDS import TopoDS_Compound
from OCC.IFSelect import IFSelect_RetDone, IFSelect_ItemsByEntity
builder = BRep_Builder()
comp = TopoDS_Compound()
builder.MakeCompound(comp)
stl_writer = StlAPI_Writer()
stl_writer.SetASCIIMode(True)
with io.tmpfile(contents=io.shapes()[shape_name][:][0]) as tmpfile:
step_reader = STEPControl_Reader()
status = step_reader.ReadFile(tmpfile[1])
if status == IFSelect_RetDone: # check status
failsonly = False
step_reader.PrintCheckLoad(failsonly, IFSelect_ItemsByEntity)
step_reader.PrintCheckTransfer(failsonly, IFSelect_ItemsByEntity)
ok = step_reader.TransferRoot(1)
nbs = step_reader.NbShapes()
l = []
for i in range(1, nbs + 1):
shape = step_reader.Shape(i)
builder.Add(comp, shape)
with io.tmpfile(suffix='.stl') as tmpf:
stl_writer.Write(comp, tmpf[1])
tmpf[0].flush()
reader = vtk.vtkSTLReader()
reader.SetFileName(tmpf[1])
reader.Update()
return reader
def brep_reader(brep_string, indx):
from OCC.StlAPI import StlAPI_Writer
from OCC.BRepTools import BRepTools_ShapeSet
shape_set = BRepTools_ShapeSet()
shape_set.ReadFromString(brep_string)
shape = shape_set.Shape(shape_set.NbShapes())
location = shape_set.Locations().Location(indx)
shape.Location(location)
stl_writer = StlAPI_Writer()
with io.tmpfile(suffix='.stl') as tmpf:
stl_writer.Write(shape, tmpf[1])
tmpf[0].flush()
reader = vtk.vtkSTLReader()
reader.SetFileName(tmpf[1])
reader.Update()
return reader
refs = []
refs_attrs = []
shape = dict()
pos = dict()
instances = dict()
with MechanicsHdf5(io_filename=io_filename, mode='r') as io:
def load():
ispos_data = io.static_data()
idpos_data = io.dynamic_data()
ivelo_data = io.velocities_data()
icf_data = io.contact_forces_data()[:]
isolv_data = io.solver_data()
return ispos_data, idpos_data, ivelo_data, icf_data, isolv_data
spos_data, dpos_data, velo_data, cf_data, solv_data = load()
class DataConnector():
def __init__(self, instance, data_name='velocity', data_size=6):
self._instance = instance
self._data_name = data_name
self._data_size = data_size
self._connector = vtk.vtkProgrammableFilter()
self._connector.SetExecuteMethod(self.method)
self._data = numpy.zeros(data_size)
self._vtk_data = vtk.vtkFloatArray()
self._vtk_data.SetName(data_name)
self._vtk_data.SetNumberOfComponents(data_size)
self._vtk_data.SetNumberOfTuples(1)
def method(self):
input = self._connector.GetInput()
output = self._connector.GetOutput()
output.ShallowCopy(input)
if output.GetFieldData().GetArray(self._data_name) is None:
output.GetFieldData().AddArray(self._vtk_data)
data = self._data
data_t = tuple(data[0:self._data_size])
output.GetFieldData().GetArray(self._data_name).SetTuple(
0, data_t)
# contact forces provider
class ContactInfoSource():
def __init__(self, data):
self._data = None
if data is not None:
if len(data) > 0:
self._data = data
else:
self._data = None
if self._data is not None:
self._time = min(self._data[:, 0])
else:
self._time = 0
self._contact_source_a = vtk.vtkProgrammableSource()
self._contact_source_b = vtk.vtkProgrammableSource()
self._contact_source_a.SetExecuteMethod(self.method)
self._contact_source_b.SetExecuteMethod(self.method)
def method(self):
# multiblock += contact points
output_a = self._contact_source_a.GetPolyDataOutput()
output_b = self._contact_source_b.GetPolyDataOutput()
id_f = numpy.where(
abs(self._data[:, 0] - self._time) < 1e-15)[0]
self.cpa_export = self._data[
id_f, 2:5].copy()
self.cpb_export = self._data[
id_f, 5:8].copy()
self.cn_export = self._data[
id_f, 8:11].copy()
self.cf_export = self._data[
id_f, 11:14].copy()
self.cpa_ = numpy_support.numpy_to_vtk(
self.cpa_export)
self.cpa_.SetName('contact_positions_A')
self.cpb_ = numpy_support.numpy_to_vtk(
self.cpb_export)
self.cpb_.SetName('contact_positions_B')
self.cn_ = numpy_support.numpy_to_vtk(
self.cn_export)
self.cn_.SetName('contact_normals')
self.cf_ = numpy_support.numpy_to_vtk(
self.cf_export)
self.cf_.SetName('contact_forces')
output_a.Allocate(len(self.cpa_export), 1)
cpa_points = vtk.vtkPoints()
cpa_points.SetNumberOfPoints(len(self.cpa_export))
cpa_points.SetData(self.cpa_)
output_a.SetPoints(cpa_points)
# normal and forces are attached to A points
output_a.GetPointData().AddArray(self.cn_)
output_a.GetPointData().AddArray(self.cf_)
output_b.Allocate(len(self.cpb_export), 1)
cpb_points = vtk.vtkPoints()
cpb_points.SetNumberOfPoints(len(self.cpb_export))
cpb_points.SetData(self.cpb_)
output_b.SetPoints(cpb_points)
# Step 2
#
#
readers = dict()
vtk_reader = {'vtp': vtk.vtkXMLPolyDataReader,
'stl': vtk.vtkSTLReader}
for shape_name in io.shapes():
shape_type = io.shapes()[shape_name].attrs['type']
if shape_type in ['vtp', 'stl']:
with io.tmpfile() as tmpf:
tmpf[0].write(str(io.shapes()[shape_name][:][0]))
tmpf[0].flush()
reader = vtk_reader[shape_type]()
reader.SetFileName(tmpf[1])
reader.Update()
readers[shape_name] = reader
elif shape_type in ['brep']:
# try to find an associated shape
if 'associated_shape' in io.shapes()[shape_name].attrs:
associated_shape = \
io.shapes()[shape_name].\
attrs['associated_shape']
else:
if 'brep' in io.shapes()[shape_name].attrs:
brep = io.shapes()[shape_name].attrs['brep']
else:
brep = shape_name
reader = brep_reader(str(io.shapes()[brep][:][0]),
io.shapes()[brep].attrs['occ_indx'])
readers[shape_name] = reader
elif shape_type in ['stp', 'step']:
# try to find an associated shape
if 'associated_shape' in io.shapes()[shape_name].attrs:
associated_shape = \
io.shapes()[shape_name].\
attrs['associated_shape']
# delayed
else:
reader = step_reader(str(io.shapes()[shape_name][:]))
readers[shape_name] = reader
elif shape_type == 'convex':
# a convex shape
points = vtk.vtkPoints()
convex = vtk.vtkConvexPointSet()
data = io.shapes()[shape_name][:]
convex.GetPointIds().SetNumberOfIds(data.shape[0])
for id_, vertice in enumerate(io.shapes()[shape_name][:]):
points.InsertNextPoint(vertice[0], vertice[1], vertice[2])
convex.GetPointIds().SetId(id_, id_)
readers[shape_name] = ConvexSource(convex, points)
else:
assert shape_type == 'primitive'
primitive = io.shapes()[shape_name].attrs['primitive']
attrs = io.shapes()[shape_name][:][0]
if primitive == 'Sphere':
source = vtk.vtkSphereSource()
source.SetRadius(attrs[0])
elif primitive == 'Cone':
source = vtk.vtkConeSource()
source.SetRadius(attrs[0])
source.SetHeight(attrs[1])
source.SetResolution(15)
source.SetDirection(0, 1, 0) # needed
elif primitive == 'Cylinder':
source = vtk.vtkCylinderSource()
source.SetResolution(15)
source.SetRadius(attrs[0])
source.SetHeight(attrs[1])
# source.SetDirection(0,1,0)
elif primitive == 'Box':
source = vtk.vtkCubeSource()
source.SetXLength(attrs[0])
source.SetYLength(attrs[1])
source.SetZLength(attrs[2])
elif primitive == 'Capsule':
sphere1 = vtk.vtkSphereSource()
sphere1.SetRadius(attrs[0])
sphere1.SetCenter(0, attrs[1] / 2, 0)
sphere1.SetThetaResolution(15)
sphere1.SetPhiResolution(15)
sphere1.Update()
sphere2 = vtk.vtkSphereSource()
sphere2.SetRadius(attrs[0])
sphere2.SetCenter(0, -attrs[1] / 2, 0)
sphere2.SetThetaResolution(15)
sphere2.SetPhiResolution(15)
sphere2.Update()
cylinder = vtk.vtkCylinderSource()
cylinder.SetRadius(attrs[0])
cylinder.SetHeight(attrs[1])
cylinder.SetResolution(15)
cylinder.Update()
data = vtk.vtkMultiBlockDataSet()
data.SetNumberOfBlocks(3)
data.SetBlock(0, sphere1.GetOutput())
data.SetBlock(1, sphere2.GetOutput())
data.SetBlock(2, cylinder.GetOutput())
source = vtk.vtkMultiBlockDataGroupFilter()
add_compatiblity_methods(source)
source.AddInputData(data)
readers[shape_name] = source
for instance_name in io.instances():
instance = int(io.instances()[instance_name].attrs['id'])
contactors[instance] = []
transforms[instance] = []
offsets[instance] = []
for contactor_instance_name in io.instances()[instance_name]:
contactor_name = io.instances()[instance_name][
contactor_instance_name].attrs['name']
contactors[instance].append(contactor_name)
transform = vtk.vtkTransform()
transformer = vtk.vtkTransformFilter()
if contactor_name in readers:
transformer.SetInputConnection(
readers[contactor_name].GetOutputPort())
else:
print ('WARNING: cannot find a shape source for instance:',
instance)
transformer.SetTransform(transform)
transformers[contactor_name] = transformer
data_connectors_v[instance] = DataConnector(instance)
data_connectors_v[instance]._connector.SetInputConnection(
transformer.GetOutputPort())
data_connectors_v[instance]._connector.Update()
big_data_source.AddInputConnection(
data_connectors_v[instance]._connector.GetOutputPort())
data_connectors_t[instance] = DataConnector(instance, data_name='translation', data_size=3)
data_connectors_t[instance]._connector.SetInputConnection(
transformer.GetOutputPort())
data_connectors_t[instance]._connector.Update()
big_data_source.AddInputConnection(
data_connectors_t[instance]._connector.GetOutputPort())
data_connectors_d[instance] = DataConnector(instance, data_name='displacement', data_size=3)
data_connectors_d[instance]._connector.SetInputConnection(
transformer.GetOutputPort())
data_connectors_d[instance]._connector.Update()
big_data_source.AddInputConnection(
data_connectors_d[instance]._connector.GetOutputPort())
transforms[instance].append(transform)
offsets[instance].append(
(io.instances()[
instance_name][
contactor_instance_name].attrs['translation'],
io.instances()[instance_name][contactor_instance_name].attrs['orientation']))
pos_data = dpos_data[:].copy()
spos_data = spos_data[:].copy()
velo_data = velo_data[:].copy()
set_velocityv = build_set_velocity(data_connectors_v)
set_translationv = build_set_translation(data_connectors_t)
set_displacementv = build_set_displacement(data_connectors_d)
times = list(set(dpos_data[:, 0]))
times.sort()
contact_info_source = ContactInfoSource(cf_data)
pveloa = DataConnector(0)
pvelob = DataConnector(0)
pveloa._connector.SetInputConnection(
contact_info_source._contact_source_a.GetOutputPort())
pvelob._connector.SetInputConnection(
contact_info_source._contact_source_a.GetOutputPort())
big_data_source.AddInputConnection(
pveloa._connector.GetOutputPort())
big_data_source.AddInputConnection(
pvelob._connector.GetOutputPort())
big_data_writer.SetInputConnection(big_data_source.GetOutputPort())
ntime = len(times)
k=0
packet= int(ntime/100)+1
for time in times:
k=k+1
if (k%packet == 0):
sys.stdout.write('.')
index = bisect.bisect_left(times, time)
index = max(0, index)
index = min(index, len(times) - 1)
contact_info_source._time = times[index]
# fix: should be called by contact_source?
contact_info_source.method()
id_t = numpy.where(pos_data[:, 0] == times[index])
if numpy.shape(spos_data)[0] > 0:
set_positionv(spos_data[:, 1], spos_data[:, 2],
spos_data[:, 3],
spos_data[:, 4], spos_data[:, 5],
spos_data[:, 6],
spos_data[:, 7], spos_data[:, 8])
set_positionv(
pos_data[id_t, 1], pos_data[id_t, 2], pos_data[id_t, 3],
pos_data[id_t, 4], pos_data[id_t, 5], pos_data[id_t, 6],
pos_data[id_t, 7], pos_data[id_t, 8])
id_tv = numpy.where(velo_data[:, 0] == times[index])
set_velocityv(
velo_data[id_tv, 1],
velo_data[id_tv, 2],
velo_data[id_tv, 3],
velo_data[id_tv, 4],
velo_data[id_tv, 5],
velo_data[id_tv, 6],
velo_data[id_tv, 7])
set_translationv(
pos_data[id_t, 1],
pos_data[id_t, 2],
pos_data[id_t, 3],
pos_data[id_t, 4],
)
# set_displacementv(
# pos_data[id_t, 1],
# pos_data[id_t, 2]- pos_data[0, 2],
# pos_data[id_t, 3]- pos_data[0, 3],
# pos_data[id_t, 4]- pos_data[0, 4]
# ) # should be w.r.t initial position
big_data_writer.SetFileName('{0}-{1}.{2}'.format(os.path.splitext(
os.path.basename(io_filename))[0],
index, big_data_writer.GetDefaultFileExtension()))
big_data_writer.SetTimeStep(times[index])
big_data_source.Update()
if ascii_mode:
big_data_writer.SetDataModeToAscii()
big_data_writer.Write()
print(' ')
|
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Builder class, a minimal prototype class to build more chart
types on top of it.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import numpy as np
from six import string_types
from .attributes import AttrSpec, ColorAttr, CatAttr
from .chart import Chart
from .data_source import ChartDataSource
from .models import CompositeGlyph
from .properties import Dimension, ColumnLabel
from .utils import collect_attribute_columns, label_from_index_dict, build_hover_tooltips
from .data_source import OrderedAssigner
from ..models.ranges import Range, Range1d, FactorRange
from ..models.sources import ColumnDataSource
from ..core.properties import (HasProps, Instance, List, String, Dict,
Color, Bool, Tuple, Either, Enum)
from ..core.enums import SortDirection
from ..util.deprecation import deprecated
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def create_and_build(builder_class, *data, **kws):
"""A factory function for handling Chart and Builder generation.
Returns:
:class:`Chart`
"""
if getattr(builder_class, 'dimensions') is None:
raise NotImplementedError('Each builder must specify its dimensions, %s does not.' % builder_class.__name__)
if getattr(builder_class, 'default_attributes') is None:
raise NotImplementedError('Each builder must specify its default_attributes, %s does not.' % builder_class.__name__)
builder_props = set(builder_class.properties()) | \
set(getattr(builder_class, "__deprecated_attributes__", []))
# append dimensions to the builder props
for dim in builder_class.dimensions:
builder_props.add(dim)
# append attributes to the builder props
for attr_name in builder_class.default_attributes.keys():
builder_props.add(attr_name)
# create the new builder
builder_kws = {k: v for k, v in kws.items() if k in builder_props}
builder = builder_class(*data, **builder_kws)
# create a chart to return, since there isn't one already
chart_kws = {k: v for k, v in kws.items() if k not in builder_props}
chart = Chart(**chart_kws)
chart.add_builder(builder)
chart.start_plot()
return chart
class Builder(HasProps):
""" A prototype class to inherit each new chart Builder type.
It provides useful methods to be used by the inherited builder classes,
in order to automate most of the charts creation tasks and leave the
core customization to specialized builder classes. In that pattern
inherited builders just need to provide the following methods:
Required:
* :meth:`~bokeh.charts.builder.Builder.yield_renderers`: yields the glyphs to be
rendered into the plot. Here you should call the
:meth:`~bokeh.charts.builder.Builder.add_glyph` method so that the builder can
setup the legend for you.
* :meth:`~bokeh.charts.builder.Builder.set_ranges`: setup the ranges for the
glyphs. This is called after glyph creation, so you are able to inspect the
comp_glyphs for their minimum and maximum values. See the
:meth:`~bokeh.charts.builder.Builder.create` method for more information on
when this is called and how the builder provides the ranges to the containing
:class:`Chart` using the :meth:`Chart.add_ranges` method.
Optional:
* :meth:`~bokeh.charts.builder.Builder.setup`: provides an area
where subclasses of builder can introspect properties, setup attributes, or change
property values. This is called before
:meth:`~bokeh.charts.builder.Builder.process_data`.
* :meth:`~bokeh.charts.builder.Builder.process_data`: provides an area
where subclasses of builder can manipulate the source data before renderers are
created.
"""
# Optional Inputs
x_range = Instance(Range)
y_range = Instance(Range)
xlabel = String()
ylabel = String()
xscale = String()
yscale = String()
palette = List(Color, help="""Optional input to override the default palette used
by any color attribute.
""")
# Dimension Configuration
"""
The dimension labels that drive the position of the
glyphs. Subclasses should implement this so that the Builder
base class knows which dimensions it needs to operate on.
An example for a builder working with cartesian x and y
coordinates would be dimensions = ['x', 'y']. You should
then instantiate the x and y dimensions as attributes of the
subclass of builder using the :class:`Dimension
<bokeh.charts.properties.Dimension>` class. One for x, as x
= Dimension(...), and one as y = Dimension(...).
"""
dimensions = None # None because it MUST be overridden
"""
The dimension labels that must exist to produce the
glyphs. This specifies what are the valid configurations for
the chart, with the option of specifying the type of the
columns. The
:class:`~bokeh.charts.data_source.ChartDataSource` will
inspect this property of your subclass of Builder and use
this to fill in any required dimensions if no keyword
arguments are used.
"""
req_dimensions = []
# Attribute Configuration
attributes = Dict(String, Instance(AttrSpec), help="""
The attribute specs used to group data. This is a mapping between the role of
the attribute spec (e.g. 'color') and the
:class:`~bokeh.charts.attributes.AttrSpec` class (e.g.,
:class:`~bokeh.charts.attributes.ColorAttr`). The Builder will use this
attributes property during runtime, which will consist of any attribute specs
that are passed into the chart creation function (e.g.,
:class:`~bokeh.charts.Bar`), ones that are created for the user from simple
input types (e.g. `Bar(..., color='red')` or `Bar(..., color=<column_name>)`),
or lastly, the attribute spec found in the default_attributes configured for
the subclass of :class:`~bokeh.charts.builder.Builder`.
""")
"""
The default attribute specs used to group data. This is
where the subclass of Builder should specify what the
default attributes are that will yield attribute values to
each group of data, and any specific configuration. For
example, the :class:`ColorAttr` utilizes a default palette
for assigning color based on groups of data. If the user
doesn't assign a column of the data to the associated
attribute spec, then the default attrspec is used, which
will yield a constant color value for each group of
data. This is by default the first color in the default
palette, but can be customized by setting the default color
in the ColorAttr.
"""
default_attributes = None # None because it MUST be overridden
# Derived properties (created by Builder at runtime)
attribute_columns = List(ColumnLabel, help="""
All columns used for specifying attributes for the Chart. The Builder will set
this value on creation so that the subclasses can know the distinct set of columns
that are being used to assign attributes.
""")
comp_glyphs = List(Instance(CompositeGlyph), help="""
A list of composite glyphs, where each represents a unique subset of data. The
composite glyph is a helper class that encapsulates all low level
:class:`~bokeh.models.glyphs.Glyph`, that represent a higher level group of
data. For example, the :class:`BoxGlyph` is a single class that yields
each :class:`GlyphRenderer` needed to produce a Box on a :class:`BoxPlot`. The
single Box represents a full array of values that are aggregated, and is made
up of multiple :class:`~bokeh.models.glyphs.Rect` and
:class:`~bokeh.models.glyphs.Segment` glyphs.
""")
labels = List(String, help="""Represents the unique labels to be used for legends.""")
"""List of attributes to use for legends."""
label_attributes = []
"""
Used to assign columns to dimensions when no selections have been provided. The
default behavior is provided by the :class:`OrderedAssigner`, which assigns
a single column to each dimension available in the `Builder`'s `dims` property.
"""
column_selector = OrderedAssigner
comp_glyph_types = List(Instance(CompositeGlyph))
sort_dim = Dict(String, Bool, default={})
sort_legend = List(Tuple(String, Bool), help="""
List of tuples to use for sorting the legend, in order that they should be
used for sorting. This sorting can be different than the sorting used for the
rest of the chart. For example, you might want to sort only on the column
assigned to the color attribute, or sort it descending. The order of each tuple
is (Column, Ascending).
""")
legend_sort_field = String(help="""
Attribute that should be used to sort the legend, for example: color,
dash, maker, etc. Valid values for this property depend on the type
of chart.
""")
legend_sort_direction = Enum(SortDirection, help="""
Sort direction to apply to :attr:`~bokeh.charts.builder.Builder.sort_legend`.
Valid values are: `ascending` or `descending`.
""")
source = Instance(ColumnDataSource)
tooltips = Either(List(Tuple(String, String)), List(String), Bool, default=None,
help="""
Tells the builder to add tooltips to the chart by either using the columns
specified to the chart attributes (True), or by generating tooltips for each
column specified (list(str)), or by explicit specification of the tooltips
using the valid input for the `HoverTool` tooltips kwarg.
""")
__deprecated_attributes__ = ('sort_legend',)
def __init__(self, *args, **kws):
"""Common arguments to be used by all the inherited classes.
Args:
data (:ref:`userguide_charts_data_types`): source data for the chart
legend (str, bool): the legend of your plot. The legend content is
inferred from incoming input.It can be ``top_left``,
``top_right``, ``bottom_left``, ``bottom_right``.
It is ``top_right`` is you set it as True.
Attributes:
source (obj): datasource object for your plot,
initialized as a dummy None.
x_range (obj): x-associated datarange object for you plot,
initialized as a dummy None.
y_range (obj): y-associated datarange object for you plot,
initialized as a dummy None.
groups (list): to be filled with the incoming groups of data.
Useful for legend construction.
data (dict): to be filled with the incoming data and be passed
to the ChartDataSource for each Builder class.
attr (list(AttrSpec)): to be filled with the new attributes created after
loading the data dict.
"""
data = None
if len(args) != 0 or len(kws) != 0:
# chart dimensions can be literal dimensions or attributes
attrs = list(self.default_attributes.keys())
dims = self.dimensions + attrs
# pop the dimension inputs from kwargs
data_args = {}
for dim in dims:
if dim in kws.keys():
data_args[dim] = kws[dim]
# build chart data source from inputs, given the dimension configuration
data_args['dims'] = tuple(dims)
data_args['required_dims'] = tuple(self.req_dimensions)
data_args['attrs'] = attrs
data_args['column_assigner'] = self.column_selector
data = ChartDataSource.from_data(*args, **data_args)
# make sure that the builder dimensions have access to the chart data source
for dim in self.dimensions:
getattr(getattr(self, dim), 'set_data')(data)
# handle input attrs and ensure attrs have access to data
attributes = self._setup_attrs(data, kws)
# remove inputs handled by dimensions and chart attributes
for dim in dims:
kws.pop(dim, None)
else:
attributes = dict()
kws['attributes'] = attributes
super(Builder, self).__init__(**kws)
# collect unique columns used for attributes
self.attribute_columns = collect_attribute_columns(**self.attributes)
for k in self.__deprecated_attributes__:
if k in kws:
setattr(self, k, kws[k])
self._data = data
self._legends = []
def _setup_attrs(self, data, kws):
"""Handle overridden attributes and initialize them with data.
Makes sure that all attributes have access to the data
source, which is used for mapping attributes to groups
of data.
Returns:
None
"""
source = ColumnDataSource(data.df)
attr_names = self.default_attributes.keys()
custom_palette = kws.get('palette')
attributes = dict()
for attr_name in attr_names:
attr = kws.pop(attr_name, None)
# if given an attribute use it
if isinstance(attr, AttrSpec):
attributes[attr_name] = attr
# if we are given columns, use those
elif isinstance(attr, (str, list)):
attributes[attr_name] = self.default_attributes[attr_name]._clone()
# override palette if available
if isinstance(attributes[attr_name], ColorAttr):
if custom_palette is not None:
attributes[attr_name].iterable = custom_palette
attributes[attr_name].setup(data=source, columns=attr)
else:
# override palette if available
if (isinstance(self.default_attributes[attr_name], ColorAttr) and
custom_palette is not None):
attributes[attr_name] = self.default_attributes[attr_name]._clone()
attributes[attr_name].iterable = custom_palette
else:
attributes[attr_name] = self.default_attributes[attr_name]._clone()
# make sure all have access to data source
for attr_name in attr_names:
attributes[attr_name].update_data(data=source)
return attributes
def setup(self):
"""Perform any initial pre-processing, attribute config.
Returns:
None
"""
pass
def process_data(self):
"""Make any global data manipulations before grouping.
It has to be implemented by any of the inherited class
representing each different chart type. It is the place
where we make specific calculations for each chart.
Returns:
None
"""
pass
def yield_renderers(self):
""" Generator that yields the glyphs to be draw on the plot
It has to be implemented by any of the inherited class
representing each different chart type.
Yields:
:class:`GlyphRenderer`
"""
raise NotImplementedError('Subclasses of %s must implement _yield_renderers.' %
self.__class__.__name__)
def set_ranges(self):
"""Calculate and set the x and y ranges.
It has to be implemented by any of the subclasses of builder
representing each different chart type, and is called after
:meth:`yield_renderers`.
Returns:
None
"""
raise NotImplementedError('Subclasses of %s must implement _set_ranges.' %
self.__class__.__name__)
def get_dim_extents(self):
"""Helper method to retrieve maximum extents of all the renderers.
Returns:
a dict mapping between dimension and value for x_max, y_max, x_min, y_min
"""
return {'x_max': max([renderer.x_max for renderer in self.comp_glyphs]),
'y_max': max([renderer.y_max for renderer in self.comp_glyphs]),
'x_min': min([renderer.x_min for renderer in self.comp_glyphs]),
'y_min': min([renderer.y_min for renderer in self.comp_glyphs])
}
def add_glyph(self, group, glyph):
"""Add a composite glyph.
Manages the legend, since the builder might not want all attribute types
used for the legend.
Args:
group (:class:`DataGroup`): the data the `glyph` is associated with
glyph (:class:`CompositeGlyph`): the glyph associated with the `group`
Returns:
None
"""
if isinstance(glyph, list):
for sub_glyph in glyph:
self.comp_glyphs.append(sub_glyph)
else:
self.comp_glyphs.append(glyph)
# handle cases where builders have specified which attributes to use for labels
label = None
if len(self.label_attributes) > 0:
for attr in self.label_attributes:
# this will get the last attribute group label for now
if self.attributes[attr].columns is not None:
label = self._get_group_label(group, attr=attr)
# if no special case for labeling, just use the group label
if label is None:
label = self._get_group_label(group, attr='label')
# add to legend if new and unique label
if str(label) not in self.labels and label is not None:
self._legends.append((label, glyph.renderers))
self.labels.append(label)
def _get_group_label(self, group, attr='label'):
"""Get the label of the group by the attribute name.
Args:
group (:attr:`DataGroup`: the group of data
attr (str, optional): the attribute name containing the label, defaults to
'label'.
Returns:
str: the label for the group
"""
if attr is 'label':
label = group.label
else:
label = group[attr]
if isinstance(label, dict):
label = tuple(label.values())
return self._get_label(label)
@staticmethod
def _get_label(raw_label):
"""Converts a label by string or tuple to a string representation.
Args:
raw_label (str or tuple(any, any)): a unique identifier for the data group
Returns:
str: a label that is usable in charts
"""
# don't convert None type to string so we can test for it later
if raw_label is None:
return None
if (isinstance(raw_label, tuple) or isinstance(raw_label, list)) and \
len(raw_label) == 1:
raw_label = raw_label[0]
elif isinstance(raw_label, dict):
raw_label = label_from_index_dict(raw_label)
return str(raw_label)
def collect_attr_kwargs(self):
if hasattr(super(self.__class__, self), 'default_attributes'):
attrs = set(self.default_attributes.keys()) - set(
(super(self.__class__, self).default_attributes or {}).keys())
else:
attrs = set()
return attrs
def get_group_kwargs(self, group, attrs):
return {attr: group[attr] for attr in attrs}
def create(self, chart=None):
"""Builds the renderers, adding them and other components to the chart.
Args:
chart (:class:`Chart`, optional): the chart that will contain the glyph
renderers that the `Builder` produces.
Returns:
:class:`Chart`
"""
# call methods that allow customized setup by subclasses
self.setup()
self.process_data()
# create and add renderers to chart
renderers = self.yield_renderers()
if chart is None:
chart = Chart()
chart.add_renderers(self, renderers)
# handle ranges after renders, since ranges depend on aggregations
# ToDo: should reconsider where this occurs
self.set_ranges()
chart.add_ranges('x', self.x_range)
chart.add_ranges('y', self.y_range)
# sort the legend if we are told to
self._legends = self._sort_legend(
self.legend_sort_field, self.legend_sort_direction,
self._legends, self.attributes)
# always contribute legends, let Chart sort it out
chart.add_legend(self._legends)
chart.add_labels('x', self.xlabel)
chart.add_labels('y', self.ylabel)
chart.add_scales('x', self.xscale)
chart.add_scales('y', self.yscale)
if self.tooltips is not None:
tooltips = build_hover_tooltips(hover_spec=self.tooltips,
chart_cols=self.attribute_columns)
chart.add_tooltips(tooltips)
return chart
@classmethod
def generate_help(cls):
help_str = ''
for comp_glyph in cls.comp_glyph_types:
help_str += str(comp_glyph.glyph_properties())
return help_str
@staticmethod
def _sort_legend(legend_sort_field, legend_sort_direction, legends, attributes):
"""Sort legends sorted by looping though sort_legend items (
see :attr:`Builder.sort_legend` for more details)
"""
if legend_sort_field:
if len(attributes[legend_sort_field].columns) > 0:
# TODO(fpliger): attributes should be consistent and not
# need any type checking but for
# the moment it is not, specially when going
# though a process like binning or when data
# is built for HeatMap, Scatter, etc...
item_order = [x[0] if isinstance(x, tuple) else x
for x in attributes[legend_sort_field].items]
item_order = [str(x) if not isinstance(x, string_types)
else x for x in item_order]
def foo(leg):
return item_order.index(leg[0])
reverse = legend_sort_direction == 'descending'
return list(sorted(legends, key=foo, reverse=reverse))
return legends
@property
def sort_legend(self):
deprecated((0, 12, 0), 'Chart.sort_legend', 'Chart.legend_sort_field')
return [(self.legend_sort_field, self.legend_sort_direction)]
@sort_legend.setter
def sort_legend(self, value):
deprecated((0, 12, 0), 'Chart.sort_legend', 'Chart.legend_sort_field')
self.legend_sort_field, direction = value[0]
if direction:
self.legend_sort_direction = "ascending"
else:
self.legend_sort_direction = "descending"
class XYBuilder(Builder):
"""Implements common functionality for XY Builders."""
x = Dimension('x')
y = Dimension('y')
dimensions = ['x', 'y']
req_dimensions = [['x'],
['y'],
['x', 'y']]
default_attributes = {'color': ColorAttr()}
def set_ranges(self):
"""Calculate and set the x and y ranges."""
# ToDo: handle when only single dimension is provided
extents = self.get_dim_extents()
endx = extents['x_max']
startx = extents['x_min']
self.x_range = self._get_range('x', startx, endx)
endy = extents['y_max']
starty = extents['y_min']
self.y_range = self._get_range('y', starty, endy)
if self.xlabel is None:
if self.x.selection is not None:
select = self.x.selection
if not isinstance(select, list):
select = [select]
else:
select = ['']
self.xlabel = ', '.join(select)
if self.ylabel is None:
if self.y.selection is not None:
select = self.y.selection
if not isinstance(select, list):
select = [select]
else:
select = ['']
self.ylabel = ', '.join(select)
def _get_range(self, dim, start, end):
"""Create a :class:`Range` for the :class:`Chart`.
Args:
dim (str): the name of the dimension, which is an attribute of the builder
start: the starting value of the range
end: the ending value of the range
Returns:
:class:`Range`
"""
dim_ref = getattr(self, dim)
values = dim_ref.data
dtype = dim_ref.dtype
sort = self.sort_dim.get(dim)
# object data or single value
if dtype.name == 'object':
factors = values.drop_duplicates()
if sort:
# TODO (fpliger): this handles pandas API change so users do not experience
# the related annoying deprecation warning. This is probably worth
# removing when pandas deprecated version (0.16) is "old" enough
try:
factors.sort_values(inplace=True)
except AttributeError:
factors.sort(inplace=True)
setattr(self, dim + 'scale', 'categorical')
return FactorRange(factors=factors.tolist())
elif np.issubdtype(dtype, np.datetime64):
setattr(self, dim + 'scale', 'datetime')
return Range1d(start=start, end=end)
else:
if end == 'None' or (end - start) == 0:
setattr(self, dim + 'scale', 'categorical')
return FactorRange(factors=['None'])
else:
diff = end - start
setattr(self, dim + 'scale', 'linear')
return Range1d(start=start - 0.1 * diff, end=end + 0.1 * diff)
class AggregateBuilder(Builder):
"""A base class for deriving specific builders performing aggregation with stats.
The typical AggregateBuilder takes a single dimension of values.
"""
values = Dimension('values')
default_attributes = {'label': CatAttr(),
'color': ColorAttr()}
|
|
"""
`amqplib`_ backend for carrot.
.. _`amqplib`: http://barryp.org/software/py-amqplib/
"""
import pylibrabbitmq as amqp
from pylibrabbitmq import ChannelError, ConnectionError
from carrot.backends.base import BaseMessage, BaseBackend
from itertools import count
import warnings
import weakref
DEFAULT_PORT = 5672
class Message(BaseMessage):
"""A message received by the broker.
Usually you don't insantiate message objects yourself, but receive
them using a :class:`carrot.messaging.Consumer`.
:param backend: see :attr:`backend`.
:param amqp_message: see :attr:`_amqp_message`.
.. attribute:: body
The message body.
.. attribute:: delivery_tag
The message delivery tag, uniquely identifying this message.
.. attribute:: backend
The message backend used.
A subclass of :class:`carrot.backends.base.BaseBackend`.
.. attribute:: _amqp_message
A :class:`amqplib.client_0_8.basic_message.Message` instance.
This is a private attribute and should not be accessed by
production code.
"""
def __init__(self, backend, amqp_message, **kwargs):
self._amqp_message = amqp_message
self.backend = backend
kwargs["body"] = amqp_message.body
properties = amqp_message.properties
kwargs["content_type"] = properties["content_type"]
kwargs["content_encoding"] = properties["content_encoding"]
kwargs["delivery_info"] = amqp_message.delivery_info
kwargs["delivery_tag"] = amqp_message.delivery_info["delivery_tag"]
super(Message, self).__init__(backend, **kwargs)
class Backend(BaseBackend):
"""amqplib backend
:param connection: see :attr:`connection`.
.. attribute:: connection
A :class:`carrot.connection.BrokerConnection` instance. An established
connection to the broker.
"""
default_port = DEFAULT_PORT
Message = Message
def __init__(self, connection, **kwargs):
self.connection = connection
self.default_port = kwargs.get("default_port", self.default_port)
self._channel_ref = None
@property
def _channel(self):
return callable(self._channel_ref) and self._channel_ref()
@property
def channel(self):
"""If no channel exists, a new one is requested."""
if not self._channel:
self._channel_ref = weakref.ref(self.connection.get_channel())
return self._channel
def establish_connection(self):
"""Establish connection to the AMQP broker."""
conninfo = self.connection
if not conninfo.hostname:
raise KeyError("Missing hostname for AMQP connection.")
if conninfo.userid is None:
raise KeyError("Missing user id for AMQP connection.")
if conninfo.password is None:
raise KeyError("Missing password for AMQP connection.")
if not conninfo.port:
conninfo.port = self.default_port
conn = amqp.Connection(host=conninfo.hostname,
port=conninfo.port,
userid=conninfo.userid,
password=conninfo.password,
virtual_host=conninfo.virtual_host)
return conn
def close_connection(self, connection):
"""Close the AMQP broker connection."""
connection.close()
def queue_exists(self, queue):
return True
def queue_delete(self, queue, if_unused=False, if_empty=False):
"""Delete queue by name."""
pass
def queue_purge(self, queue, **kwargs):
"""Discard all messages in the queue. This will delete the messages
and results in an empty queue."""
return self.channel.queue_purge(queue=queue)
def queue_declare(self, queue, durable, exclusive, auto_delete,
warn_if_exists=False):
"""Declare a named queue."""
return self.channel.queue_declare(queue=queue,
durable=durable,
exclusive=exclusive,
auto_delete=auto_delete)
def exchange_declare(self, exchange, type, durable, auto_delete):
"""Declare an named exchange."""
return self.channel.exchange_declare(exchange=exchange,
type=type,
durable=durable,
auto_delete=auto_delete)
def queue_bind(self, queue, exchange, routing_key, arguments=None):
"""Bind queue to an exchange using a routing key."""
return self.channel.queue_bind(queue=queue,
exchange=exchange,
routing_key=routing_key,
arguments=arguments)
def message_to_python(self, raw_message):
"""Convert encoded message body back to a Python value."""
return self.Message(backend=self, amqp_message=raw_message)
def get(self, queue, no_ack=False):
"""Receive a message from a declared queue by name.
:returns: A :class:`Message` object if a message was received,
``None`` otherwise. If ``None`` was returned, it probably means
there was no messages waiting on the queue.
"""
raw_message = self.channel.basic_get(queue, no_ack=no_ack)
if not raw_message:
return None
return self.message_to_python(raw_message)
def declare_consumer(self, queue, no_ack, callback, consumer_tag,
nowait=False):
"""Declare a consumer."""
return self.channel.basic_consume(queue=queue,
no_ack=no_ack,
callback=callback,
consumer_tag=consumer_tag)
def consume(self, limit=None):
"""Returns an iterator that waits for one message at a time."""
for total_message_count in count():
if limit and total_message_count >= limit:
raise StopIteration
if not self.channel.is_open:
raise StopIteration
self.channel.conn.drain_events()
yield True
def cancel(self, consumer_tag):
"""Cancel a channel by consumer tag."""
if not self.channel.conn:
return
self.channel.basic_cancel(consumer_tag)
def close(self):
"""Close the channel if open."""
if self._channel and self._channel.is_open:
self._channel.close()
self._channel_ref = None
def ack(self, delivery_tag):
"""Acknowledge a message by delivery tag."""
return self.channel.basic_ack(delivery_tag)
def reject(self, delivery_tag):
"""Reject a message by deliver tag."""
return self.channel.basic_reject(delivery_tag, requeue=False)
def requeue(self, delivery_tag):
"""Reject and requeue a message by delivery tag."""
return self.channel.basic_reject(delivery_tag, requeue=True)
def prepare_message(self, message_data, delivery_mode, priority=None,
content_type=None, content_encoding=None):
"""Encapsulate data into a AMQP message."""
return amqp.Message(message_data, properties={
"delivery_mode": delivery_mode,
"priority": priority,
"content_type": content_type,
"content_encoding": content_encoding})
def publish(self, message, exchange, routing_key, mandatory=None,
immediate=None, headers=None):
"""Publish a message to a named exchange."""
if headers:
message.properties["headers"] = headers
ret = self.channel.basic_publish(message, exchange=exchange,
routing_key=routing_key,
mandatory=mandatory,
immediate=immediate)
if mandatory or immediate:
self.close()
def qos(self, prefetch_size, prefetch_count, apply_global=False):
"""Request specific Quality of Service."""
pass
#self.channel.basic_qos(prefetch_size, prefetch_count,
# apply_global)
def flow(self, active):
"""Enable/disable flow from peer."""
pass
#self.channel.flow(active)
|
|
# Copyright 2020 The UniqueRandomizer Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Estimating means by sampling Gumbels in hindsight."""
import collections
import functools
from absl import logging
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import scipy.special
from unique_randomizer import unique_randomizer as ur
def gumbel_log_survival(x):
"""Returns log P(g > x) for a standard Gumbel g.
log P(g > x) = log(1 - P(g < x)) = log(1 - exp(-exp(-x))). The implementation
is more numerically robust than a naive implementation of that formula.
Args:
x: The cutoff Gumbel value.
"""
# Adapted from
# https://gist.github.com/wouterkool/a3bb2aae8d6a80f985daae95252a8aa8.
y = np.exp(-x)
return np.where(x >= 10,
-x - y / 2 + y ** 2 / 24 - y ** 4 / 2880,
np.log(-np.expm1(-np.exp(-x))))
def truncated_gumbel(log_probability, upper_bound):
"""Samples a Gumbel for a log_probability, given an upper bound."""
# Adapted from https://cmaddis.github.io/gumbel-machinery.
if log_probability == -float('inf'):
return -float('inf')
gumbel = np.random.gumbel(loc=log_probability)
return -scipy.special.logsumexp([-gumbel, -upper_bound])
def hindsight_gumbels(log_probabilities):
"""Returns Gumbels that could have produced the samples with probabilities.
The returned list will have one more element than the input probabilities,
the last one being the maximum Gumbel for the remaining unsampled items. If
the samples are exhaustive (probabilities sum to 1), then the last Gumbel is
-inf.
Args:
log_probabilities: The log probabilities of sampled items, in the order that
they were sampled from a probability proportional to size without
replacement (PPSWOR) scheme.
"""
gumbels = []
unsampled_log_probability = 0.0
# Sample the maximum Gumbel for all items.
max_gumbel = np.random.gumbel(loc=unsampled_log_probability)
for item_log_probability in log_probabilities:
# The Gumbel for the next sampled item is exactly the maximum Gumbel across
# all remaining items.
gumbels.append(max_gumbel)
# Update the unsampled probability, now that we've sampled the next item.
unsampled_log_probability = ur.log_subtract(unsampled_log_probability,
item_log_probability)
# Sample a maximum Gumbel for the remaining unsampled items. This must be at
# most the previous maximum Gumbel.
max_gumbel = truncated_gumbel(unsampled_log_probability, max_gumbel)
# Append the maximum Gumbel for the remaining (truly-unsampled) items.
gumbels.append(max_gumbel)
assert len(gumbels) == 1 + len(log_probabilities)
# Allow a tiny amount of error in case of numerical instability.
if not all(g1 >= g2 - 1e-5 for g1, g2 in zip(gumbels, gumbels[1:])):
message = ('Issue in hindsight_gumbels.\n'
'log_probabilities = {}\n'
'gumbels = {}').format(
log_probabilities, gumbels)
logging.warn(message)
print(message)
return gumbels
def setup_universe(universe_size):
"""Returns the universe of items, probabilities, and values."""
universe = list(range(universe_size))
probabilities = np.random.exponential(size=universe_size)
probabilities = probabilities ** 3 # Skew the probabilities.
probabilities /= np.sum(probabilities)
# Skew the values: items with larger probability likely have larger values.
values = np.random.normal(loc=np.log(probabilities), scale=0.5)
# Shift values so the minimum is zero.
values -= np.min(values)
return universe, probabilities, values
def ppswor_samples(universe, probabilities, num_samples):
"""Samples some items from the universe, using a PPSWOR scheme."""
results = []
not_sampled = list(universe)
for _ in range(num_samples):
unsampled_probs = probabilities[not_sampled]
normalized_probs = unsampled_probs / np.sum(unsampled_probs)
index = np.random.choice(np.arange(len(not_sampled)), p=normalized_probs)
sample = not_sampled[index]
results.append((sample, probabilities[sample], normalized_probs[index]))
not_sampled.remove(sample)
# This is a list of triples (sample, initial prob., conditional prob.).
return results
def hindsight_gumbel_estimation(
universe, probabilities, values, num_samples, normalize, all_samples=None):
"""Hindsight Gumbel Estimation."""
# Allow repeated_hindsight_gumbel_estimation.
if all_samples is None:
results = ppswor_samples(universe, probabilities, num_samples)
all_samples = [result[0] for result in results]
# Item probabilities and values, in the order that they were sampled.
ordered_probabilities = probabilities[all_samples]
ordered_values = values[all_samples]
num_samples = len(all_samples)
estimations = [] # One estimate for every k = 1, ..., num_samples.
gumbels = hindsight_gumbels(np.log(ordered_probabilities))
for k in range(1, num_samples + 1):
# Use the first k samples for estimation. The threshold Gumbel comes from
# the (k+1)-th sample, or equivalently the "remaining" probability mass
# (we don't actually need a concrete (k+1)-th sample).
threshold_gumbel = gumbels[k]
p = ordered_probabilities[:k]
if k == len(universe):
# Otherwise we'll get a warning, if gumbels[k] == -float('inf').
q = 1
else:
q = np.exp(gumbel_log_survival(threshold_gumbel - np.log(p)))
weight = p / q
if normalize:
weight /= np.sum(weight)
estimate = np.dot(weight, ordered_values[:k])
estimations.append(estimate)
return estimations
def repeated_hindsight_gumbel_estimation(
universe, probabilities, values, num_samples, normalize, repetitions):
"""Uses Hindsight Gumbel Estimation multiple times with different Gumbels."""
# Use the same samples for each repetition!
results = ppswor_samples(universe, probabilities, num_samples)
all_samples = [result[0] for result in results]
estimations_list = []
for _ in range(repetitions):
estimations = hindsight_gumbel_estimation(
universe, probabilities, values, num_samples, normalize,
all_samples=all_samples) # Provide consistent samples.
estimations_list.append(estimations)
return np.mean(estimations_list, axis=0)
def ppswor_priority_sampling(
universe, probabilities, values, num_samples, normalize):
"""Priority Sampling using a PPSWOR sampling scheme."""
# Adapted from
# https://github.com/timvieira/blog/blob/master/content/notebook/Priority%20Sampling.ipynb.
universe_size = len(universe)
p = probabilities
f = values
u = np.random.uniform(0, 1, size=universe_size)
key = -np.log(u) / p # ~ Exp(p[i])
# key = np.random.exponential(scale=1/p) # Equivalent to the line above.
order = np.argsort(key) # Item indices in the order they're chosen.
ordered_keys = key[order]
estimations = np.zeros(num_samples)
for k in range(1, num_samples + 1):
t = ordered_keys[k] if k < universe_size else np.inf # Threshold.
s = order[:k] # First k sampled items.
q = 1 - np.exp(-p*t) # = p(i in s | t).
weights_s = p[s] / q[s]
if normalize:
weights_s /= np.sum(weights_s)
estimations[k-1] = f[s].dot(weights_s)
return estimations
def monte_carlo_sampling(universe, probabilities, values, num_samples):
"""Traditional Monte Carlo sampling with replacement."""
# Adapted from
# https://github.com/timvieira/blog/blob/master/content/notebook/Priority%20Sampling.ipynb.
samples = np.random.choice(universe, size=num_samples, p=probabilities,
replace=True)
return np.cumsum(values[samples]) / (1 + np.arange(num_samples))
def create_plots(filename, seed=123):
"""Creates plots for the paper."""
np.random.seed(seed)
universe_size = 100
num_samples = 100
estimation_repetitions = 2000
universe, probabilities, original_values = setup_universe(universe_size)
# Manipulate values here.
values = original_values
exact = np.dot(probabilities, values)
print('Exact value: {}'.format(exact))
estimation_methods = [
('HGE',
functools.partial(hindsight_gumbel_estimation, normalize=False),
'#4285F4'), # Google blue.
('HGE, norm.',
functools.partial(hindsight_gumbel_estimation, normalize=True),
'#0F9D58'), # Google green.
('Repeated HGE (x10)',
functools.partial(repeated_hindsight_gumbel_estimation,
repetitions=10,
normalize=False),
'#F4B400'), # Google yellow.
('Repeated HGE (x10), norm.',
functools.partial(repeated_hindsight_gumbel_estimation,
repetitions=10,
normalize=True),
'#DB4437'), # Google red.
# ('PPSWOR Priority Sampling',
# functools.partial(ppswor_priority_sampling, normalize=False),
# 'red'),
# ('PPSWOR Priority Sampling, Normalized',
# functools.partial(ppswor_priority_sampling, normalize=True),
# 'darkorange'),
('Monte Carlo sampling', monte_carlo_sampling, '#9E9E9E') # Google gray.
]
estimations_k = list(range(1, num_samples + 1))
all_estimators_data = collections.defaultdict(list)
for _ in range(estimation_repetitions):
for name, method, _ in estimation_methods:
estimations = method(universe, probabilities, values, num_samples)
all_estimators_data[name].append(estimations)
matplotlib.rcParams.update({'font.size': 12})
plt.figure(facecolor='w', edgecolor='k', figsize=[6.4, 4.8])
for name, _, color in estimation_methods:
data = all_estimators_data[name]
# Cut off first point to reduce noise in the plot.
cut_data = [x[1:] for x in data]
cut_estimations_k = estimations_k[1:]
plt.plot(cut_estimations_k, np.percentile(cut_data, 95, axis=0),
color=color, linestyle=':', alpha=0.5)
plt.plot(cut_estimations_k, np.percentile(cut_data, 5, axis=0),
color=color, linestyle=':', alpha=0.5)
plt.plot(cut_estimations_k, np.percentile(cut_data, 25, axis=0),
color=color, linestyle='-', label=name)
plt.plot(cut_estimations_k, np.percentile(cut_data, 75, axis=0),
color=color, linestyle='-')
plt.title('HGE Variations on Synthetic Data')
plt.axhline(y=exact, color='k', linestyle='--', label='Exact value')
plt.ylim(exact - 1, exact + 1)
plt.ylabel('Estimate')
plt.xlim(0, num_samples)
plt.xlabel('Number of Samples')
plt.legend(loc='upper right', fontsize=10)
print('Saving plot to {}'.format(filename))
plt.savefig(filename)
|
|
"""
homeassistant.components.device_tracker.tplink
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Device tracker platform that supports scanning a TP-Link router for device
presence.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.tplink/
"""
import base64
import logging
import re
import threading
from datetime import timedelta
import requests
from homeassistant.components.device_tracker import DOMAIN
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import validate_config
from homeassistant.util import Throttle
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=5)
_LOGGER = logging.getLogger(__name__)
def get_scanner(hass, config):
""" Validates config and returns a TP-Link scanner. """
if not validate_config(config,
{DOMAIN: [CONF_HOST, CONF_USERNAME, CONF_PASSWORD]},
_LOGGER):
return None
scanner = Tplink3DeviceScanner(config[DOMAIN])
if not scanner.success_init:
scanner = Tplink2DeviceScanner(config[DOMAIN])
if not scanner.success_init:
scanner = TplinkDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class TplinkDeviceScanner(object):
"""
This class queries a wireless router running TP-Link firmware
for connected devices.
"""
def __init__(self, config):
host = config[CONF_HOST]
username, password = config[CONF_USERNAME], config[CONF_PASSWORD]
self.parse_macs = re.compile('[0-9A-F]{2}-[0-9A-F]{2}-[0-9A-F]{2}-' +
'[0-9A-F]{2}-[0-9A-F]{2}-[0-9A-F]{2}')
self.host = host
self.username = username
self.password = password
self.last_results = {}
self.lock = threading.Lock()
self.success_init = self._update_info()
def scan_devices(self):
"""
Scans for new devices and return a list containing found device ids.
"""
self._update_info()
return self.last_results
# pylint: disable=no-self-use
def get_device_name(self, device):
"""
The TP-Link firmware doesn't save the name of the wireless device.
"""
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""
Ensures the information from the TP-Link router is up to date.
Returns boolean if scanning successful.
"""
with self.lock:
_LOGGER.info("Loading wireless clients...")
url = 'http://{}/userRpm/WlanStationRpm.htm'.format(self.host)
referer = 'http://{}'.format(self.host)
page = requests.get(url, auth=(self.username, self.password),
headers={'referer': referer})
result = self.parse_macs.findall(page.text)
if result:
self.last_results = [mac.replace("-", ":") for mac in result]
return True
return False
class Tplink2DeviceScanner(TplinkDeviceScanner):
"""
This class queries a wireless router running newer version of TP-Link
firmware for connected devices.
"""
def scan_devices(self):
"""
Scans for new devices and return a list containing found device ids.
"""
self._update_info()
return self.last_results.keys()
# pylint: disable=no-self-use
def get_device_name(self, device):
"""
The TP-Link firmware doesn't save the name of the wireless device.
"""
return self.last_results.get(device)
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""
Ensures the information from the TP-Link router is up to date.
Returns boolean if scanning successful.
"""
with self.lock:
_LOGGER.info("Loading wireless clients...")
url = 'http://{}/data/map_access_wireless_client_grid.json' \
.format(self.host)
referer = 'http://{}'.format(self.host)
# Router uses Authorization cookie instead of header
# Let's create the cookie
username_password = '{}:{}'.format(self.username, self.password)
b64_encoded_username_password = base64.b64encode(
username_password.encode('ascii')
).decode('ascii')
cookie = 'Authorization=Basic {}' \
.format(b64_encoded_username_password)
response = requests.post(url, headers={'referer': referer,
'cookie': cookie})
try:
result = response.json().get('data')
except ValueError:
_LOGGER.error("Router didn't respond with JSON. "
"Check if credentials are correct.")
return False
if result:
self.last_results = {
device['mac_addr'].replace('-', ':'): device['name']
for device in result
}
return True
return False
class Tplink3DeviceScanner(TplinkDeviceScanner):
"""
This class queries the Archer C9 router running version 150811 or higher
of TP-Link firmware for connected devices.
"""
def __init__(self, config):
self.stok = ''
self.sysauth = ''
super(Tplink3DeviceScanner, self).__init__(config)
def scan_devices(self):
"""
Scans for new devices and return a list containing found device ids.
"""
self._update_info()
return self.last_results.keys()
# pylint: disable=no-self-use
def get_device_name(self, device):
"""
The TP-Link firmware doesn't save the name of the wireless device.
We are forced to use the MAC address as name here.
"""
return self.last_results.get(device)
def _get_auth_tokens(self):
"""
Retrieves auth tokens from the router.
"""
_LOGGER.info("Retrieving auth tokens...")
url = 'http://{}/cgi-bin/luci/;stok=/login?form=login' \
.format(self.host)
referer = 'http://{}/webpages/login.html'.format(self.host)
# if possible implement rsa encryption of password here
response = requests.post(url,
params={'operation': 'login',
'username': self.username,
'password': self.password},
headers={'referer': referer})
try:
self.stok = response.json().get('data').get('stok')
_LOGGER.info(self.stok)
regex_result = re.search('sysauth=(.*);',
response.headers['set-cookie'])
self.sysauth = regex_result.group(1)
_LOGGER.info(self.sysauth)
return True
except ValueError:
_LOGGER.error("Couldn't fetch auth tokens!")
return False
@Throttle(MIN_TIME_BETWEEN_SCANS)
def _update_info(self):
"""
Ensures the information from the TP-Link router is up to date.
Returns boolean if scanning successful.
"""
with self.lock:
if (self.stok == '') or (self.sysauth == ''):
self._get_auth_tokens()
_LOGGER.info("Loading wireless clients...")
url = ('http://{}/cgi-bin/luci/;stok={}/admin/wireless?'
'form=statistics').format(self.host, self.stok)
referer = 'http://{}/webpages/index.html'.format(self.host)
response = requests.post(url,
params={'operation': 'load'},
headers={'referer': referer},
cookies={'sysauth': self.sysauth})
try:
json_response = response.json()
if json_response.get('success'):
result = response.json().get('data')
else:
if json_response.get('errorcode') == 'timeout':
_LOGGER.info("Token timed out. "
"Relogging on next scan.")
self.stok = ''
self.sysauth = ''
return False
else:
_LOGGER.error("An unknown error happened "
"while fetching data.")
return False
except ValueError:
_LOGGER.error("Router didn't respond with JSON. "
"Check if credentials are correct.")
return False
if result:
self.last_results = {
device['mac'].replace('-', ':'): device['mac']
for device in result
}
return True
return False
|
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_serialization import jsonutils as json
from sahara.plugins import provisioning as p
from sahara.utils import files as f
CDH5_UBUNTU_REPO = ('deb [arch=amd64] http://archive.cloudera.com/cdh5'
'/ubuntu/precise/amd64/cdh precise-cdh5.0.0 contrib'
'\ndeb-src http://archive.cloudera.com/cdh5/ubuntu'
'/precise/amd64/cdh precise-cdh5.0.0 contrib')
DEFAULT_CDH5_UBUNTU_REPO_KEY_URL = ('http://archive.cloudera.com/cdh5/ubuntu'
'/precise/amd64/cdh/archive.key')
CM5_UBUNTU_REPO = ('deb [arch=amd64] http://archive.cloudera.com/cm5'
'/ubuntu/precise/amd64/cm precise-cm5.0.0 contrib'
'\ndeb-src http://archive.cloudera.com/cm5/ubuntu'
'/precise/amd64/cm precise-cm5.0.0 contrib')
DEFAULT_CM5_UBUNTU_REPO_KEY_URL = ('http://archive.cloudera.com/cm5/ubuntu'
'/precise/amd64/cm/archive.key')
CDH5_CENTOS_REPO = ('[cloudera-cdh5]'
'\nname=Cloudera\'s Distribution for Hadoop, Version 5'
'\nbaseurl=http://archive.cloudera.com/cdh5/redhat/6'
'/x86_64/cdh/5.0.0/'
'\ngpgkey = http://archive.cloudera.com/cdh5/redhat/6'
'/x86_64/cdh/RPM-GPG-KEY-cloudera'
'\ngpgcheck = 1')
CM5_CENTOS_REPO = ('[cloudera-manager]'
'\nname=Cloudera Manager'
'\nbaseurl=http://archive.cloudera.com/cm5/redhat/6'
'/x86_64/cm/5.0.0/'
'\ngpgkey = http://archive.cloudera.com/cm5/redhat/6'
'/x86_64/cm/RPM-GPG-KEY-cloudera'
'\ngpgcheck = 1')
DEFAULT_SWIFT_LIB_URL = ('https://repository.cloudera.com/artifactory/repo/org'
'/apache/hadoop/hadoop-openstack/2.3.0-cdh5.0.0'
'/hadoop-openstack-2.3.0-cdh5.0.0.jar')
DEFAULT_EXTJS_LIB_URL = 'http://extjs.com/deploy/ext-2.2.zip'
CDH5_REPO_URL = p.Config(
'CDH5 repo list URL', 'general', 'cluster', priority=1,
default_value="")
CDH5_REPO_KEY_URL = p.Config(
'CDH5 repo key URL (for debian-based only)', 'general', 'cluster',
priority=1, default_value="")
CM5_REPO_URL = p.Config(
'CM5 repo list URL', 'general', 'cluster', priority=1,
default_value="")
CM5_REPO_KEY_URL = p.Config(
'CM5 repo key URL (for debian-based only)', 'general', 'cluster',
priority=1, default_value="")
ENABLE_SWIFT = p.Config('Enable Swift', 'general', 'cluster',
config_type='bool', priority=1,
default_value=True)
ENABLE_HBASE_COMMON_LIB = p.Config('Enable HBase Common Lib',
'general', 'cluster', config_type='bool',
priority=1, default_value=True)
SWIFT_LIB_URL = p.Config(
'Hadoop OpenStack library URL', 'general', 'cluster', priority=1,
default_value=DEFAULT_SWIFT_LIB_URL,
description=("Library that adds Swift support to CDH. The file will be "
"downloaded from VM."))
EXTJS_LIB_URL = p.Config(
"ExtJS library URL", 'general', 'cluster', priority=1,
default_value=DEFAULT_EXTJS_LIB_URL,
description=("Ext 2.2 library is required for Oozie Web Console. "
"The file will be downloaded from VM with oozie."))
AWAIT_AGENTS_TIMEOUT = p.Config(
'Await Cloudera agents timeout', 'general', 'cluster', config_type='int',
priority=1, default_value=300, is_optional=True,
description="Timeout for Cloudera agents connecting to Coudera Manager, "
"in seconds")
AWAIT_MANAGER_STARTING_TIMEOUT = p.Config(
'Timeout for Cloudera Manager starting', 'general', 'cluster',
config_type='int', priority=1, default_value=300, is_optional=True,
description='Timeout for Cloudera Manager starting, in seconds')
def _get_cluster_plugin_configs():
return [CDH5_REPO_URL, CDH5_REPO_KEY_URL, CM5_REPO_URL, CM5_REPO_KEY_URL,
ENABLE_SWIFT, ENABLE_HBASE_COMMON_LIB, SWIFT_LIB_URL,
EXTJS_LIB_URL, AWAIT_MANAGER_STARTING_TIMEOUT,
AWAIT_AGENTS_TIMEOUT]
# ng wide configs
def _load_json(path_to_file):
data = f.get_file_text(path_to_file)
return json.loads(data)
path_to_config = 'plugins/cdh/v5/resources/'
hdfs_confs = _load_json(path_to_config + 'hdfs-service.json')
namenode_confs = _load_json(path_to_config + 'hdfs-namenode.json')
datanode_confs = _load_json(path_to_config + 'hdfs-datanode.json')
secnamenode_confs = _load_json(path_to_config + 'hdfs-secondarynamenode.json')
yarn_confs = _load_json(path_to_config + 'yarn-service.json')
resourcemanager_confs = _load_json(
path_to_config + 'yarn-resourcemanager.json')
nodemanager_confs = _load_json(path_to_config + 'yarn-nodemanager.json')
jobhistory_confs = _load_json(path_to_config + 'yarn-jobhistory.json')
oozie_service_confs = _load_json(path_to_config + 'oozie-service.json')
oozie_role_confs = _load_json(path_to_config + 'oozie-oozie.json')
hive_service_confs = _load_json(path_to_config + 'hive-service.json')
hive_metastore_confs = _load_json(path_to_config + 'hive-metastore.json')
hive_hiveserver_confs = _load_json(path_to_config + 'hive-hiveserver2.json')
hive_webhcat_confs = _load_json(path_to_config + 'hive-webhcat.json')
hue_service_confs = _load_json(path_to_config + 'hue-service.json')
hue_role_confs = _load_json(path_to_config + 'hue-hue.json')
spark_service_confs = _load_json(path_to_config + 'spark-service.json')
spark_role_confs = _load_json(path_to_config + 'spark-history.json')
zookeeper_server_confs = _load_json(path_to_config + 'zookeeper-server.json')
zookeeper_service_confs = _load_json(path_to_config + 'zookeeper-service.json')
hbase_confs = _load_json(path_to_config + 'hbase-service.json')
master_confs = _load_json(path_to_config + 'hbase-master.json')
regionserver_confs = _load_json(path_to_config + 'hbase-regionserver.json')
priority_one_confs = _load_json(path_to_config + 'priority-one-confs.json')
def _prepare_value(value):
if not value:
return ""
return value.replace('\n', ' ')
def _init_configs(confs, app_target, scope):
cfgs = []
for cfg in confs:
priority = 1 if cfg['name'] in priority_one_confs else 2
c = p.Config(cfg['name'], app_target, scope, priority=priority,
default_value=_prepare_value(cfg['value']),
description=cfg['desc'], is_optional=True)
cfgs.append(c)
return cfgs
def _get_ng_plugin_configs():
cfg = []
cfg += _init_configs(hdfs_confs, 'HDFS', 'cluster')
cfg += _init_configs(namenode_confs, 'NAMENODE', 'node')
cfg += _init_configs(datanode_confs, 'DATANODE', 'node')
cfg += _init_configs(secnamenode_confs, 'SECONDARYNAMENODE', 'node')
cfg += _init_configs(yarn_confs, 'YARN', 'cluster')
cfg += _init_configs(resourcemanager_confs, 'RESOURCEMANAGER', 'node')
cfg += _init_configs(nodemanager_confs, 'NODEMANAGER', 'node')
cfg += _init_configs(jobhistory_confs, 'JOBHISTORY', 'node')
cfg += _init_configs(oozie_service_confs, 'OOZIE', 'cluster')
cfg += _init_configs(oozie_role_confs, 'OOZIE', 'node')
cfg += _init_configs(hive_service_confs, 'HIVE', 'cluster')
cfg += _init_configs(hive_metastore_confs, 'HIVEMETASTORE', 'node')
cfg += _init_configs(hive_hiveserver_confs, 'HIVESERVER', 'node')
cfg += _init_configs(hive_webhcat_confs, 'WEBHCAT', 'node')
cfg += _init_configs(hue_service_confs, 'HUE', 'cluster')
cfg += _init_configs(hue_role_confs, 'HUE', 'node')
cfg += _init_configs(spark_service_confs, 'SPARK_ON_YARN', 'cluster')
cfg += _init_configs(spark_role_confs, 'SPARK_ON_YARN', 'node')
cfg += _init_configs(zookeeper_service_confs, 'ZOOKEEPER', 'cluster')
cfg += _init_configs(zookeeper_server_confs, 'ZOOKEEPER', 'node')
cfg += _init_configs(hbase_confs, 'HBASE', 'cluster')
cfg += _init_configs(master_confs, 'MASTER', 'node')
cfg += _init_configs(regionserver_confs, 'REGIONSERVER', 'node')
return cfg
def get_plugin_configs():
cluster_wide = _get_cluster_plugin_configs()
ng_wide = _get_ng_plugin_configs()
return cluster_wide + ng_wide
def _get_config_value(cluster, key):
return cluster.cluster_configs.get(
'general', {}).get(key.name, key.default_value)
def get_cdh5_repo_url(cluster):
return _get_config_value(cluster, CDH5_REPO_URL)
def get_cdh5_key_url(cluster):
return _get_config_value(cluster, CDH5_REPO_KEY_URL)
def get_cm5_repo_url(cluster):
return _get_config_value(cluster, CM5_REPO_URL)
def get_cm5_key_url(cluster):
return _get_config_value(cluster, CM5_REPO_KEY_URL)
def is_swift_enabled(cluster):
return _get_config_value(cluster, ENABLE_SWIFT)
def is_hbase_common_lib_enabled(cluster):
return _get_config_value(cluster, ENABLE_HBASE_COMMON_LIB)
def get_swift_lib_url(cluster):
return _get_config_value(cluster, SWIFT_LIB_URL)
def get_extjs_lib_url(cluster):
return _get_config_value(cluster, EXTJS_LIB_URL)
|
|
from decimal import Decimal
from django.utils.translation import ugettext_lazy as _
from payment.modules.base import BasePaymentProcessor, ProcessorResult
from satchmo_utils.numbers import trunc_decimal
from payflowpro.client import PayflowProClient
from payflowpro.classes import (CreditCard, Amount, Address, ShippingAddress,
CustomerInfo)
class PaymentProcessor(BasePaymentProcessor):
"""
PayflowPro payment processing module
You must have an account with PayPal in order to use this module.
"""
def __init__(self, settings):
super(PaymentProcessor, self).__init__('payflowpro', settings)
partner = self.settings.PARTNER.value
vendor = self.settings.VENDOR.value
username = self.settings.USER.value
password = self.settings.PASSWORD.value
testing = not self.settings.LIVE.value
if testing:
url_base = PayflowProClient.URL_BASE_TEST
else:
url_base = PayflowProClient.URL_BASE_LIVE
self.payflow = PayflowProClient(partner=partner, vendor=vendor,
username=username, password=password,
url_base=url_base)
def get_charge_data(self, amount=None):
"""
Build the dictionary needed to process a credit card charge.
Return: a dictionary with the following key-values:
* log_string: the transaction data without the sensible
buyer data. Suitable for logs.
* credit_card, amount, address, ship_address, customer_info :
the payflowpro.classes.* instances to be passed to
self.payflow
"""
order = self.order
if amount is None:
amount = order.balance
balance = trunc_decimal(amount, 2)
ret = {
'credit_card': CreditCard(
acct=order.credit_card.decryptedCC,
expdate="%02d%02d" % (order.credit_card.expire_month,
order.credit_card.expire_year % 100),
cvv2=order.credit_card.ccv,
),
'amount': Amount(amt=balance,),
'address': Address(
street=order.full_bill_street,
zip=order.bill_postal_code,
city=order.bill_city,
state=order.bill_state,
country=order.bill_country,
),
'ship_address': ShippingAddress(
shiptostreet=order.full_ship_street,
shiptocity=order.ship_city,
shiptofirstname=order.ship_first_name,
shiptolastname=order.ship_last_name,
shiptostate=order.ship_state,
shiptocountry=order.ship_country,
shiptozip=order.ship_postal_code,
),
'customer_info': CustomerInfo(
firstname=order.bill_first_name,
lastname=order.bill_last_name,
),
}
redacted_data = ret.copy()
redacted_data['credit_card'] = {
'acct': order.credit_card.display_cc,
'expdate': "%d%d" % (order.credit_card.expire_year,
order.credit_card.expire_month),
'cvv2': "REDACTED",
}
dicts = [getattr(d, 'data', d) for d in redacted_data.values()]
ret['log_string'] = "\n".join("%s: %s" % (k, v) for d in dicts
for k, v in d.items())
return ret
def _handle_unconsumed(self, unconsumed_data):
"""
Handler for when we've got unconsumed data from the response
"""
if unconsumed_data:
self.log.warn("Something went wrong with python-payflowpro. "
"We got some unconsumed data: %s" %
str(unconsumed_data))
def _log_responses(self, responses):
"""
Log the responses from PayflowPro for debugging
"""
self.log_extra("Response variables from payflowpro:")
for response in responses:
self.log_extra("%(classname)s: %(response_fields)s" % {
'classname': response.__class__.__name__,
'response_fields': "%s" % response.data })
def authorize_payment(self, order=None, amount=None, testing=False):
"""
Authorize a single payment.
Returns: ProcessorResult
"""
if order:
self.prepare_data(order)
else:
order = self.order
if order.paid_in_full:
self.log_extra('%s is paid in full, no authorization attempted.',
order)
result = ProcessorResult(self.key, True,
_("No charge needed, paid in full."))
else:
self.log_extra('Authorizing payment of %s for %s', amount, order)
data = self.get_charge_data(amount=amount)
data['extras'] = [data['address'], data['ship_address'],
data['customer_info'],]
result = self.send_post(data=data, testing=testing,
post_func=self.send_authorize_post,)
return result
def can_authorize(self):
return True
#def can_recur_bill(self):
# return True
def capture_authorized_payment(self, authorization, testing=False,
order=None, amount=None):
"""
Capture a single payment
"""
if order:
self.prepare_data(order)
else:
order = self.order
if order.authorized_remaining == Decimal('0.00'):
self.log_extra('No remaining authorizations on %s', order)
return ProcessorResult(self.key, True, _("Already complete"))
self.log_extra('Capturing Authorization #%i for %s',
authorization.id, order)
data = self.get_charge_data()
data['authorization_id'] = authorization.transaction_id
result = self.send_post(data=data, testing=testing,
post_func=self.send_capture_post,)
return result
def capture_payment(self, testing=False, order=None, amount=None):
"""
Process payments without an authorization step.
"""
if order:
self.prepare_data(order)
else:
order = self.order
if order.paid_in_full:
self.log_extra('%s is paid in full, no capture attempted.', order)
result = ProcessorResult(self.key, True, _("No charge needed, "
"paid in full."))
self.record_payment()
else:
self.log_extra('Capturing payment for %s', order)
data = self.get_charge_data(amount=amount)
data['extras'] = [data['address'], data['ship_address'],
data['customer_info'],]
result = self.send_post(data=data, post_func=self.send_sale_post,
testing=testing,)
return result
def release_authorized_payment(self, order=None, auth=None, testing=False):
"""Release a previously authorized payment."""
if order:
self.prepare_data(order)
else:
order = self.order
self.log_extra('Releasing Authorization #%i for %s', auth.id, order)
data = self.get_charge_data()
data['authorization_id'] = auth.transaction_id
result = self.send_post(data=data, post_func=self.send_release_post,
testing=testing)
if result.success:
auth.complete = True
auth.save()
return result
def send_authorize_post(self, data):
"""
Authorize sell with PayflowPro
"""
responses, unconsumed_data = self.payflow.authorization(
credit_card=data['credit_card'], amount=data['amount'],
extras=data['extras'])
return responses, unconsumed_data, self.record_authorization
def send_capture_post(self, data):
"""
Capture previously authorized sale
"""
responses, unconsumed_data = self.payflow.capture(
data['authorization_id'])
return responses, unconsumed_data, self.record_payment
def send_release_post(self, data):
"""
Release previously authorized sale
"""
responses, unconsumed_data = self.payflow.void(
data['authorization_id'])
def nothing(*args, **kwargs):
return None
return responses, unconsumed_data, nothing
def send_sale_post(self, data):
"""
Immediately charge a credit card
"""
responses, unconsumed_data = self.payflow.sale(
credit_card=data['credit_card'], amount=data['amount'],
extras=data['extras'])
return responses, unconsumed_data, self.record_payment
def send_post(self, data, post_func, testing=False):
"""
Execute the post to PayflowPro.
Params:
- data: the argument expected by `post_func`. Usually a dict which this
function knows how to use
- post_func: a function that takes `data` as argument, and sends the
actual request to the PayflowPro Gateway. It should return
a 3-tuple (responses, unconsumed_data, record_* function)
- testing: if true, then don't record the payment
Returns:
- ProcessorResult
"""
self.log_extra("About to send PayflowPro a request: %s",
data['log_string'])
if 'amount' in data:
amount = data['amount'].amt
else:
amount = self.order.balance
responses, unconsumed_data, record_function = post_func(data)
self._handle_unconsumed(unconsumed_data)
self._log_responses(responses)
response = responses[0]
success = response.result == '0'
transaction_id = response.pnref
response_text = response.respmsg
reason_code = response.result
if success:
# success!
self.log.info("successful %s for order #%d",
post_func.__name__, self.order.id)
if not testing:
self.log_extra("Success, calling %s", record_function.__name__)
payment = record_function(
order=self.order, amount=amount,
transaction_id=transaction_id, reason_code=reason_code)
else:
# failure =(
self.log.info("failed %s for order #%d",
post_func.__name__, self.order.id)
if not testing:
payment = self.record_failure(
amount=amount, transaction_id=transaction_id,
reason_code=reason_code, details=response_text)
self.log_extra("Returning success=%s, reason=%s, response_text=%s",
success, reason_code, response_text)
return ProcessorResult(self.key, success, response_text,
payment=payment)
if __name__ == "__main__":
"""
This is for testing - enabling you to run from the command line and make
sure everything is ok
"""
import os
from livesettings import config_get_group
# Set up some dummy classes to mimic classes being passed through Satchmo
class testContact(object):
pass
class testCC(object):
pass
class testOrder(object):
def __init__(self):
self.contact = testContact()
self.credit_card = testCC()
def order_success(self):
pass
if not os.environ.has_key("DJANGO_SETTINGS_MODULE"):
os.environ["DJANGO_SETTINGS_MODULE"] = "satchmo_store.settings"
settings_module = os.environ['DJANGO_SETTINGS_MODULE']
settingsl = settings_module.split('.')
settings = __import__(settings_module, {}, {}, settingsl[-1])
sampleOrder = testOrder()
sampleOrder.contact.first_name = 'Chris'
sampleOrder.contact.last_name = 'Smith'
sampleOrder.contact.primary_phone = '801-555-9242'
sampleOrder.full_bill_street = '123 Main Street'
sampleOrder.bill_postal_code = '12345'
sampleOrder.bill_state = 'TN'
sampleOrder.bill_city = 'Some City'
sampleOrder.bill_country = 'US'
sampleOrder.total = "27.01"
sampleOrder.balance = "27.01"
sampleOrder.credit_card.decryptedCC = '6011000000000012'
sampleOrder.credit_card.expirationDate = "10/11"
sampleOrder.credit_card.ccv = "144"
authorize_settings = config_get_group('PAYMENT_PAYFLOWPRO')
if authorize_settings.LIVE.value:
print ("Warning. You are submitting a live order. PAYFLOWPRO system "
"is set LIVE.")
processor = PaymentProcessor(authorize_settings)
processor.prepare_data(sampleOrder)
results = processor.process(testing=True)
print results
|
|
import sys
import os
import json
import re
import io
import urllib
import hashlib
import requests
import logging
import shutil
import time
import traceback
from requests_toolbelt import MultipartEncoder
from multiprocessing import Pool
#from functools import partial
import subprocess
from zipfile import ZipFile
from os import listdir
from os.path import isfile, join
try:
from biokbase.HandleService.Client import HandleService
except:
from biokbase.AbstractHandle.Client import AbstractHandle as HandleService
from biokbase.workspace.client import Workspace
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
def stderrlogger(name, level=logging.INFO):
"""
Return a standard python logger with a stderr handler attached and using a prefix
format that will make logging consistent between scripts.
"""
logger = logging.getLogger(name)
logger.setLevel(level)
# send messages to sys.stderr
streamHandler = logging.StreamHandler(sys.stderr)
formatter = logging.Formatter("%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s")
formatter.converter = time.gmtime
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
return logger
def stdoutlogger(name, level=logging.INFO):
"""
Return a standard python logger with a stdout handler attached and using a prefix
format that will make logging consistent between scripts.
"""
logger = logging.getLogger(name)
logger.setLevel(level)
# send messages to sys.stderr
streamHandler = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter("%(asctime)s - %(filename)s - %(lineno)d - %(levelname)s - %(message)s")
formatter.converter = time.gmtime
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
return logger
def zip_files(logger, src_path, output_fn):
"""
Compress all index files (not directory) into an output zip file on disk.
"""
files = [ f for f in listdir(src_path) if isfile(join(src_path,f)) ]
with ZipFile(output_fn, 'w') as izip:
for f in files:
izip.write(join(src_path,f),f)
def unzip_files(logger, src_fn, dst_path):
"""
Extract all index files into an output zip file on disk.
"""
with ZipFile(src_fn, 'r') as ozip:
ozip.extractall(dst_path)
def move_files(logger, src, dest):
"""
Move files from one folder to another.
"""
src_files = os.listdir(src)
for file_name in src_files:
full_file_name = os.path.join(src, file_name)
if (os.path.isfile(full_file_name)):
shutil.copy(full_file_name, dest)
def download_file_from_shock(logger,
shock_service_url = None,
shock_id = None,
filename = None,
directory = None,
filesize= None,
token = None):
"""
Given a SHOCK instance URL and a SHOCK node id, download the contents of that node
to a file on disk.
"""
header = dict()
header["Authorization"] = "Oauth {0}".format(token)
logger.info("Downloading shock node {0}/node/{1}".format(shock_service_url,shock_id))
metadata_response = requests.get("{0}/node/{1}?verbosity=metadata".format(shock_service_url, shock_id), headers=header, stream=True, verify=True)
shock_metadata = metadata_response.json()['data']
print "shock metadata is {0}".format(shock_metadata)
if shock_metadata is not None:
shockFileName = shock_metadata['file']['name']
shockFileSize = shock_metadata['file']['size']
metadata_response.close()
download_url = "{0}/node/{1}?download_raw".format(shock_service_url, shock_id)
print "download_url is {0}".format(download_url)
try:
data = requests.get(download_url, headers=header, stream=True, verify=True)
except Exception,e:
print(traceback.format_exc())
if filename is not None:
shockFileName = filename
if directory is not None:
filePath = os.path.join(directory, shockFileName)
else:
filePath = shockFileName
if filesize is not None:
shockFileSize = filesize
chunkSize = shockFileSize/4
maxChunkSize = 2**30
if chunkSize > maxChunkSize:
chunkSize = maxChunkSize
f = io.open(filePath, 'wb')
try:
for chunk in data.iter_content(chunkSize):
f.write(chunk)
finally:
data.close()
f.close()
def query_shock_node(logger,
shock_service_url = None,
condition = None,
token = None):
"""
Given a SHOCK instance URL and a SHOCK node id, download the contents of that node
to a file on disk.
"""
header = dict()
header["Authorization"] = "Oauth {0}".format(token)
query_str = urllib.urlencode(condition)
print query_str
logger.info("Querying shock node {0}/node/?query&{1}".format(shock_service_url,query_str))
query_response = requests.get("{0}/node/?query&{1}".format(shock_service_url, query_str), headers=header, stream=True, verify=True)
query_rst = query_response.json()['data']
query_response.close()
return query_rst
def upload_file_to_shock(logger,
shock_service_url = None,
filePath = None,
attributes = '{}',
ssl_verify = True,
token = None):
"""
Use HTTP multi-part POST to save a file to a SHOCK instance.
"""
if token is None:
raise Exception("Authentication token required!")
#build the header
header = dict()
header["Authorization"] = "Oauth {0}".format(token)
if filePath is None:
raise Exception("No file given for upload to SHOCK!")
dataFile = open(os.path.abspath(filePath), 'r')
m = MultipartEncoder(fields={'attributes_str': json.dumps(attributes), 'upload': (os.path.split(filePath)[-1], dataFile)})
header['Content-Type'] = m.content_type
logger.info("Sending {0} to {1}".format(filePath,shock_service_url))
try:
response = requests.post(shock_service_url + "/node", headers=header, data=m, allow_redirects=True, verify=ssl_verify)
dataFile.close()
except:
dataFile.close()
raise
if not response.ok:
response.raise_for_status()
result = response.json()
if result['error']:
raise Exception(result['error'][0])
else:
return result["data"]
def getHandles(logger = None,
shock_service_url = None,
handle_service_url = None,
shock_ids = None,
handle_ids = None,
token = None):
"""
Retrieve KBase handles for a list of shock ids or a list of handle ids.
"""
if token is None:
raise Exception("Authentication token required!")
hs = HandleService(url=handle_service_url, token=token)
handles = list()
if shock_ids is not None:
header = dict()
header["Authorization"] = "Oauth {0}".format(token)
for sid in shock_ids:
info = None
try:
logger.info("Found shock id {0}, retrieving information about the data.".format(sid))
response = requests.get("{0}/node/{1}".format(shock_service_url, sid), headers=header, verify=True)
info = response.json()["data"]
except:
logger.error("There was an error retrieving information about the shock node id {0} from url {1}".format(sid, shock_service_url))
try:
logger.info("Retrieving a handle id for the data.")
handle = hs.persist_handle({"id" : sid,
"type" : "shock",
"url" : shock_service_url,
"file_name": info["file"]["name"],
"remote_md5": info["file"]["checksum"]["md5"]})
handles.append(handle)
except:
try:
handle_id = hs.ids_to_handles([sid])[0]["hid"]
single_handle = hs.hids_to_handles([handle_id])
assert len(single_handle) != 0
if info is not None:
single_handle[0]["file_name"] = info["file"]["name"]
single_handle[0]["remote_md5"] = info["file"]["checksum"]["md5"]
logger.debug(single_handle)
handles.append(single_handle[0])
except:
logger.error("The input shock node id {} is already registered or could not be registered".format(sid))
hs = HandleService(url=handle_service_url, token=token)
all_handles = hs.list_handles()
for x in all_handles:
if x[0] == sid:
logger.info("FOUND shock id as existing handle")
logger.info(x)
break
else:
logger.info("Unable to find a handle containing shock id")
logger.info("Trying again to get a handle id for the data.")
handle_id = hs.persist_handle({"id" : sid,
"type" : "shock",
"url" : shock_service_url,
"file_name": info["file"]["name"],
"remote_md5": info["file"]["checksum"]["md5"]})
handles.append(handle_id)
raise
elif handle_ids is not None:
for hid in handle_ids:
try:
single_handle = hs.hids_to_handles([hid])
assert len(single_handle) != 0
handles.append(single_handle[0])
except:
logger.error("Invalid handle id {0}".format(hid))
raise
return handles
def get_obj_info(logger,ws_url,objects,ws_id,token):
"""
function to get the workspace object id from a object name
"""
ret = []
ws_client=Workspace(url=ws_url, token=token)
for obj in objects:
try:
obj_infos = ws_client.get_object_info_new({"objects": [{'name': obj, 'workspace': ws_id}]})
print obj_infos
ret.append("{0}/{1}/{2}".format(obj_infos[0][6],obj_infos[0][0],obj_infos[0][4]))
print ret
except Exception, e:
logger.error("Couldn't retrieve %s:%s from the workspace , %s " %(ws_id,obj,e))
return ret
def whereis(program):
"""
returns path of program if it exists in your ``$PATH`` variable or ``None`` otherwise
"""
for path in os.environ.get('PATH', '').split(':'):
if os.path.exists(os.path.join(path, program)) and not os.path.isdir(os.path.join(path, program)):
return os.path.join(path, program)
return None
def runProgram(logger=None,
progName=None,
argStr=None,
script_dir=None,
working_dir=None):
"""
Convenience func to handle calling and monitoring output of external programs.
:param progName: name of system program command
:param argStr: string containing command line options for ``progName``
:returns: subprocess.communicate object
"""
# Ensure program is callable.
if script_dir is not None:
progPath= os.path.join(script_dir,progName)
else:
progPath = progName
# progPath = whereis(progName)
# if not progPath:
# raise RuntimeError(None,'{0} command not found in your PATH environmental variable. {1}'.format(progName,os.environ.get('PATH', '')))
# Construct shell command
cmdStr = "%s %s" % (progPath,argStr)
# Set up process obj
process = subprocess.Popen(cmdStr,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=working_dir)
# Get results
result,stderr = process.communicate()
# keep this until your code is stable for easier debugging
if result is not None and len(result) > 0:
logger.info(result)
if stderr is not None and len(stderr) > 0:
logger.info(stderr)
# Check returncode for success/failure
if process.returncode != 0:
raise RuntimeError('Return Code : {0} , result {1} , progName {2}'.format(process.returncode,result[1],progName))
# Return result
return result
def hashfile(filepath):
sha1 = hashlib.sha1()
f = open(filepath, 'rb')
try:
sha1.update(f.read())
finally:
f.close()
return sha1.hexdigest()
def create_shock_handle(logger=None,
file_name=None,
shock_url=None,
handle_url=None,
obj_type=None,
token=None):
hs = HandleService(url=handle_url, token=token)
f_shock = upload_file_to_shock(logger,shock_url,file_name,'{}',True,token)
f_sha1 = hashfile(file_name)
hid = getHandles(logger,shock_url,handle_url,[f_shock['id']],None,token)[0]
handle = { 'hid' : hid ,
"file_name" : f_shock['file']['name'] ,
"id" : f_shock['id'] ,
"type" : obj_type ,
"url" : shock_url,
"remote_md5" : f_shock['file']['checksum']['md5'],
"remote_sha1" : f_sha1 }
return handle
def parallel_function(f):
def easy_parallize(f, sequence):
pool = Pool(processes=8)
# f is given sequence. guaranteed to be in order
result = pool.map(f, sequence)
cleaned = [x for x in result if not x is None]
cleaned = asarray(cleaned)
# not optimal but safe
pool.close()
pool.join()
return cleaned
from functools import partial
# this assumes f has one argument, fairly easy with Python's global scope
return partial(easy_parallize, f)
|
|
#!/usr/bin/env python3
import base64
import errno
import http.client
import json
import logging
import os
import pickle # nosec
import socket
import ssl
import sys
import threading
from typing import Any, Type
import ctf_gameserver.lib.flag
from ctf_gameserver.lib.checkresult import CheckResult
_TIMEOUT_SECONDS = 10.0 # Default timeout for socket operations
_LOCAL_STATE_PATH_TMPL = '_{team:d}_state.json'
_LOCAL_STATE_PATH = None
_ctrl_in = None # pylint: disable=invalid-name
_ctrl_out = None # pylint: disable=invalid-name
_ctrl_out_lock = None # pylint: disable=invalid-name
def _setup():
global _ctrl_in, _ctrl_out, _ctrl_out_lock # pylint: disable=invalid-name
if 'CTF_CHECKERSCRIPT' in os.environ:
# Launched by Checker Runner, we cannot just try to open the descriptors (and fallback if they don't
# exist) because execution environments like pytest might use them as well
_ctrl_in = os.fdopen(3, 'r')
_ctrl_out = os.fdopen(4, 'w')
else:
# Local execution without a Checker Runner
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
return
_ctrl_out_lock = threading.RLock()
class JsonHandler(logging.StreamHandler):
def __init__(self):
super().__init__(_ctrl_out)
def emit(self, record):
_ctrl_out_lock.acquire()
super().emit(record)
_ctrl_out_lock.release()
def format(self, record):
param = {
'message': super().format(record),
'levelno': record.levelno,
'pathname': record.pathname,
'lineno': record.lineno,
'funcName': record.funcName
}
json_message = {'action': 'LOG', 'param': param}
# Make sure that our JSON consists of just a single line
return json.dumps(json_message).replace('\n', '')
json_handler = JsonHandler()
logging.getLogger().addHandler(json_handler)
logging.getLogger().setLevel(logging.INFO)
socket.setdefaulttimeout(_TIMEOUT_SECONDS)
try:
import requests # pylint: disable=import-outside-toplevel
# Ugly monkey patch to set defaults for the timeouts in requests, because requests (resp. urllib3)
# always overwrites the default socket timeout
class TimeoutSoup(requests.adapters.TimeoutSauce):
def __init__(self, total=None, connect=None, read=None):
if total is None:
total = _TIMEOUT_SECONDS
if connect is None:
connect = _TIMEOUT_SECONDS
if read is None:
read = _TIMEOUT_SECONDS
super().__init__(total, connect, read)
requests.adapters.TimeoutSauce = TimeoutSoup
except ImportError:
pass
_setup()
class BaseChecker:
"""
Base class for individual Checker implementations. Checker Scripts must implement all methods.
Attributes:
ip: Vulnbox IP address of the team to be checked
team: Net number of the team to be checked
"""
def __init__(self, ip: str, team: int) -> None:
self.ip = ip
self.team = team
def place_flag(self, tick: int) -> CheckResult:
raise NotImplementedError('place_flag() must be implemented by the subclass')
def check_service(self) -> CheckResult:
raise NotImplementedError('check_service() must be implemented by the subclass')
def check_flag(self, tick: int) -> CheckResult:
raise NotImplementedError('check_flag() must be implemented by the subclass')
def get_flag(tick: int, payload: bytes = b'') -> str:
"""
May be called by Checker Scripts to get the flag for a given tick, for the team and service of the
current run. The returned flag can be used for both placement and checks.
"""
if _launched_without_runner():
try:
team = get_flag._team # pylint: disable=protected-access
except AttributeError:
raise Exception('get_flag() must be called through run_check()')
# Return dummy flag when launched locally
if payload == b'':
payload = None
return ctf_gameserver.lib.flag.generate(team, 42, b'TOPSECRET', payload=payload, timestamp=tick)
payload_b64 = base64.b64encode(payload).decode('ascii')
_send_ctrl_message({'action': 'FLAG', 'param': {'tick': tick, 'payload': payload_b64}})
result = _recv_ctrl_message()
return result['response']
def set_flagid(data: str) -> None:
"""
Stores the Flag ID for the current team and tick.
"""
if not _launched_without_runner():
_send_ctrl_message({'action': 'FLAGID', 'param': data})
# Wait for acknowledgement
_recv_ctrl_message()
else:
print('Storing Flag ID: {}'.format(data))
def store_state(key: str, data: Any) -> None:
"""
Allows a Checker Script to store arbitrary Python data persistently across runs. Data is stored per
service and team with the given key as an additional identifier.
"""
serialized_data = base64.b64encode(pickle.dumps(data)).decode('ascii')
if not _launched_without_runner():
message = {'key': key, 'data': serialized_data}
_send_ctrl_message({'action': 'STORE', 'param': message})
# Wait for acknowledgement
_recv_ctrl_message()
else:
try:
with open(_LOCAL_STATE_PATH, 'r') as f:
state = json.load(f)
except FileNotFoundError:
state = {}
state[key] = serialized_data
with open(_LOCAL_STATE_PATH, 'w') as f:
json.dump(state, f, indent=4)
def load_state(key: str) -> Any:
"""
Allows to retrieve data stored through store_state(). If no data exists for the given key (and the
current service and team), None is returned.
"""
if not _launched_without_runner():
_send_ctrl_message({'action': 'LOAD', 'param': key})
result = _recv_ctrl_message()
data = result['response']
if data is None:
return None
else:
try:
with open(_LOCAL_STATE_PATH, 'r') as f:
state = json.load(f)
except FileNotFoundError:
return None
try:
data = state[key]
except KeyError:
return None
return pickle.loads(base64.b64decode(data)) # nosec
def run_check(checker_cls: Type[BaseChecker]) -> None:
"""
Launch execution of the specified Checker implementation. Must be called by all Checker Scripts.
"""
if len(sys.argv) != 4:
raise Exception('Invalid arguments, usage: {} <ip> <team-net-no> <tick>'.format(sys.argv[0]))
ip = sys.argv[1]
team = int(sys.argv[2])
tick = int(sys.argv[3])
global _LOCAL_STATE_PATH
_LOCAL_STATE_PATH = _LOCAL_STATE_PATH_TMPL.format(team=team)
if _launched_without_runner():
# Hack because get_flag() only needs to know the team when launched locally
get_flag._team = team # pylint: disable=protected-access
checker = checker_cls(ip, team)
result = _run_check_steps(checker, tick)
if not _launched_without_runner():
_send_ctrl_message({'action': 'RESULT', 'param': result.value})
# Wait for acknowledgement
_recv_ctrl_message()
else:
print('Check result: {}'.format(result))
def _run_check_steps(checker, tick):
tick_lookback = 5
try:
logging.info('Placing flag')
result = checker.place_flag(tick)
logging.info('Flag placement result: %s', result)
if result != CheckResult.OK:
return result
logging.info('Checking service')
result = checker.check_service()
logging.info('Service check result: %s', result)
if result != CheckResult.OK:
return result
current_tick = tick
oldest_tick = max(tick-tick_lookback, 0)
recovering = False
while current_tick >= oldest_tick:
logging.info('Checking flag of tick %d', current_tick)
result = checker.check_flag(current_tick)
logging.info('Flag check result of tick %d: %s', current_tick, result)
if result != CheckResult.OK:
if current_tick != tick and result == CheckResult.FLAG_NOT_FOUND:
recovering = True
else:
return result
current_tick -= 1
if recovering:
return CheckResult.RECOVERING
else:
return CheckResult.OK
except Exception as e: # pylint: disable=broad-except
if _is_conn_error(e):
logging.warning('Connection error during check', exc_info=e)
return CheckResult.DOWN
else:
# Just let the Checker Script die, logging will be handled by the Runner
raise e
def _launched_without_runner():
"""
Returns True if the Checker Script has been launched locally (during development) and False if it has
been launched by the Checker Script Runner (during an actual competition).
"""
return _ctrl_in is None
def _recv_ctrl_message():
message_json = _ctrl_in.readline()
return json.loads(message_json)
def _send_ctrl_message(message):
# Make sure that our JSON consists of just a single line
message_json = json.dumps(message).replace('\n', '') + '\n'
_ctrl_out_lock.acquire()
_ctrl_out.write(message_json)
_ctrl_out.flush()
_ctrl_out_lock.release()
def _is_conn_error(exception):
"""
Checks if the given exception resembles an error in the network connection, e.g. a timeout or connection
abort.
"""
conn_exceptions = (
BrokenPipeError, # Raised on SIGPIPE
ConnectionAbortedError,
ConnectionResetError,
ConnectionRefusedError,
EOFError, # Raised by telnetlib on timeout
http.client.BadStatusLine,
http.client.ImproperConnectionState,
http.client.LineTooLong,
http.client.UnknownTransferEncoding,
socket.timeout,
ssl.SSLEOFError,
ssl.SSLWantReadError,
ssl.SSLWantWriteError,
ssl.SSLZeroReturnError
)
try:
import urllib3 # pylint: disable=import-outside-toplevel
conn_exceptions += (
urllib3.exceptions.ConnectionError,
urllib3.exceptions.DecodeError,
urllib3.exceptions.IncompleteRead,
urllib3.exceptions.ProtocolError,
urllib3.exceptions.SSLError,
urllib3.exceptions.TimeoutError
)
except ImportError:
pass
try:
import requests # pylint: disable=import-outside-toplevel
conn_exceptions += (
requests.Timeout,
requests.ConnectionError,
requests.packages.urllib3.exceptions.ConnectionError,
requests.packages.urllib3.exceptions.DecodeError,
requests.packages.urllib3.exceptions.IncompleteRead,
requests.packages.urllib3.exceptions.ProtocolError,
requests.packages.urllib3.exceptions.SSLError,
requests.packages.urllib3.exceptions.TimeoutError
)
except ImportError:
pass
try:
import nclib # pylint: disable=import-outside-toplevel
conn_exceptions += (nclib.NetcatError,)
except ImportError:
pass
if isinstance(exception, conn_exceptions):
return True
# (At least) urllib and urllib3 wrap other exceptions in a "reason" attribute
if hasattr(exception, 'reason') and isinstance(exception.reason, Exception):
return _is_conn_error(exception.reason)
if isinstance(exception, OSError):
return exception.errno in (
errno.EACCES,
errno.ECONNABORTED,
errno.ECONNREFUSED,
errno.ECONNRESET,
errno.EHOSTDOWN,
errno.EHOSTUNREACH,
errno.ENETDOWN,
errno.ENETRESET,
errno.ENETUNREACH,
errno.EPIPE,
errno.ETIMEDOUT
)
return False
|
|
"""The tests for the Group components."""
# pylint: disable=protected-access
from collections import OrderedDict
import homeassistant.components.group as group
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
EVENT_HOMEASSISTANT_START,
SERVICE_RELOAD,
STATE_HOME,
STATE_NOT_HOME,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
)
from homeassistant.core import CoreState
from homeassistant.helpers.event import TRACK_STATE_CHANGE_CALLBACKS
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import assert_setup_component
from tests.components.group import common
async def test_setup_group_with_mixed_groupable_states(hass):
"""Try to set up a group with mixed groupable states."""
hass.states.async_set("light.Bowl", STATE_ON)
hass.states.async_set("device_tracker.Paulus", STATE_HOME)
assert await async_setup_component(hass, "group", {})
await group.Group.async_create_group(
hass, "person_and_light", ["light.Bowl", "device_tracker.Paulus"]
)
await hass.async_block_till_done()
assert STATE_ON == hass.states.get(f"{group.DOMAIN}.person_and_light").state
async def test_setup_group_with_a_non_existing_state(hass):
"""Try to set up a group with a non existing state."""
hass.states.async_set("light.Bowl", STATE_ON)
assert await async_setup_component(hass, "group", {})
grp = await group.Group.async_create_group(
hass, "light_and_nothing", ["light.Bowl", "non.existing"]
)
assert STATE_ON == grp.state
async def test_setup_group_with_non_groupable_states(hass):
"""Test setup with groups which are not groupable."""
hass.states.async_set("cast.living_room", "Plex")
hass.states.async_set("cast.bedroom", "Netflix")
assert await async_setup_component(hass, "group", {})
grp = await group.Group.async_create_group(
hass, "chromecasts", ["cast.living_room", "cast.bedroom"]
)
assert grp.state is None
async def test_setup_empty_group(hass):
"""Try to set up an empty group."""
grp = await group.Group.async_create_group(hass, "nothing", [])
assert grp.state is None
async def test_monitor_group(hass):
"""Test if the group keeps track of states."""
hass.states.async_set("light.Bowl", STATE_ON)
hass.states.async_set("light.Ceiling", STATE_OFF)
assert await async_setup_component(hass, "group", {})
test_group = await group.Group.async_create_group(
hass, "init_group", ["light.Bowl", "light.Ceiling"], False
)
# Test if group setup in our init mode is ok
assert test_group.entity_id in hass.states.async_entity_ids()
group_state = hass.states.get(test_group.entity_id)
assert STATE_ON == group_state.state
assert group_state.attributes.get(group.ATTR_AUTO)
async def test_group_turns_off_if_all_off(hass):
"""Test if turn off if the last device that was on turns off."""
hass.states.async_set("light.Bowl", STATE_OFF)
hass.states.async_set("light.Ceiling", STATE_OFF)
assert await async_setup_component(hass, "group", {})
test_group = await group.Group.async_create_group(
hass, "init_group", ["light.Bowl", "light.Ceiling"], False
)
await hass.async_block_till_done()
group_state = hass.states.get(test_group.entity_id)
assert STATE_OFF == group_state.state
async def test_group_turns_on_if_all_are_off_and_one_turns_on(hass):
"""Test if turn on if all devices were turned off and one turns on."""
hass.states.async_set("light.Bowl", STATE_OFF)
hass.states.async_set("light.Ceiling", STATE_OFF)
assert await async_setup_component(hass, "group", {})
test_group = await group.Group.async_create_group(
hass, "init_group", ["light.Bowl", "light.Ceiling"], False
)
# Turn one on
hass.states.async_set("light.Ceiling", STATE_ON)
await hass.async_block_till_done()
group_state = hass.states.get(test_group.entity_id)
assert STATE_ON == group_state.state
async def test_allgroup_stays_off_if_all_are_off_and_one_turns_on(hass):
"""Group with all: true, stay off if one device turns on."""
hass.states.async_set("light.Bowl", STATE_OFF)
hass.states.async_set("light.Ceiling", STATE_OFF)
assert await async_setup_component(hass, "group", {})
test_group = await group.Group.async_create_group(
hass, "init_group", ["light.Bowl", "light.Ceiling"], False, mode=True
)
# Turn one on
hass.states.async_set("light.Ceiling", STATE_ON)
await hass.async_block_till_done()
group_state = hass.states.get(test_group.entity_id)
assert STATE_OFF == group_state.state
async def test_allgroup_turn_on_if_last_turns_on(hass):
"""Group with all: true, turn on if all devices are on."""
hass.states.async_set("light.Bowl", STATE_ON)
hass.states.async_set("light.Ceiling", STATE_OFF)
assert await async_setup_component(hass, "group", {})
test_group = await group.Group.async_create_group(
hass, "init_group", ["light.Bowl", "light.Ceiling"], False, mode=True
)
# Turn one on
hass.states.async_set("light.Ceiling", STATE_ON)
await hass.async_block_till_done()
group_state = hass.states.get(test_group.entity_id)
assert STATE_ON == group_state.state
async def test_expand_entity_ids(hass):
"""Test expand_entity_ids method."""
hass.states.async_set("light.Bowl", STATE_ON)
hass.states.async_set("light.Ceiling", STATE_OFF)
assert await async_setup_component(hass, "group", {})
test_group = await group.Group.async_create_group(
hass, "init_group", ["light.Bowl", "light.Ceiling"], False
)
assert sorted(["light.ceiling", "light.bowl"]) == sorted(
group.expand_entity_ids(hass, [test_group.entity_id])
)
async def test_expand_entity_ids_does_not_return_duplicates(hass):
"""Test that expand_entity_ids does not return duplicates."""
hass.states.async_set("light.Bowl", STATE_ON)
hass.states.async_set("light.Ceiling", STATE_OFF)
assert await async_setup_component(hass, "group", {})
test_group = await group.Group.async_create_group(
hass, "init_group", ["light.Bowl", "light.Ceiling"], False
)
assert ["light.bowl", "light.ceiling"] == sorted(
group.expand_entity_ids(hass, [test_group.entity_id, "light.Ceiling"])
)
assert ["light.bowl", "light.ceiling"] == sorted(
group.expand_entity_ids(hass, ["light.bowl", test_group.entity_id])
)
async def test_expand_entity_ids_recursive(hass):
"""Test expand_entity_ids method with a group that contains itself."""
hass.states.async_set("light.Bowl", STATE_ON)
hass.states.async_set("light.Ceiling", STATE_OFF)
assert await async_setup_component(hass, "group", {})
test_group = await group.Group.async_create_group(
hass,
"init_group",
["light.Bowl", "light.Ceiling", "group.init_group"],
False,
)
assert sorted(["light.ceiling", "light.bowl"]) == sorted(
group.expand_entity_ids(hass, [test_group.entity_id])
)
async def test_expand_entity_ids_ignores_non_strings(hass):
"""Test that non string elements in lists are ignored."""
assert [] == group.expand_entity_ids(hass, [5, True])
async def test_get_entity_ids(hass):
"""Test get_entity_ids method."""
hass.states.async_set("light.Bowl", STATE_ON)
hass.states.async_set("light.Ceiling", STATE_OFF)
assert await async_setup_component(hass, "group", {})
test_group = await group.Group.async_create_group(
hass, "init_group", ["light.Bowl", "light.Ceiling"], False
)
assert ["light.bowl", "light.ceiling"] == sorted(
group.get_entity_ids(hass, test_group.entity_id)
)
async def test_get_entity_ids_with_domain_filter(hass):
"""Test if get_entity_ids works with a domain_filter."""
hass.states.async_set("switch.AC", STATE_OFF)
assert await async_setup_component(hass, "group", {})
mixed_group = await group.Group.async_create_group(
hass, "mixed_group", ["light.Bowl", "switch.AC"], False
)
assert ["switch.ac"] == group.get_entity_ids(
hass, mixed_group.entity_id, domain_filter="switch"
)
async def test_get_entity_ids_with_non_existing_group_name(hass):
"""Test get_entity_ids with a non existing group."""
assert [] == group.get_entity_ids(hass, "non_existing")
async def test_get_entity_ids_with_non_group_state(hass):
"""Test get_entity_ids with a non group state."""
assert [] == group.get_entity_ids(hass, "switch.AC")
async def test_group_being_init_before_first_tracked_state_is_set_to_on(hass):
"""Test if the groups turn on.
If no states existed and now a state it is tracking is being added
as ON.
"""
assert await async_setup_component(hass, "group", {})
test_group = await group.Group.async_create_group(
hass, "test group", ["light.not_there_1"]
)
hass.states.async_set("light.not_there_1", STATE_ON)
await hass.async_block_till_done()
group_state = hass.states.get(test_group.entity_id)
assert STATE_ON == group_state.state
async def test_group_being_init_before_first_tracked_state_is_set_to_off(hass):
"""Test if the group turns off.
If no states existed and now a state it is tracking is being added
as OFF.
"""
assert await async_setup_component(hass, "group", {})
test_group = await group.Group.async_create_group(
hass, "test group", ["light.not_there_1"]
)
hass.states.async_set("light.not_there_1", STATE_OFF)
await hass.async_block_till_done()
group_state = hass.states.get(test_group.entity_id)
assert STATE_OFF == group_state.state
async def test_groups_get_unique_names(hass):
"""Two groups with same name should both have a unique entity id."""
assert await async_setup_component(hass, "group", {})
grp1 = await group.Group.async_create_group(hass, "Je suis Charlie")
grp2 = await group.Group.async_create_group(hass, "Je suis Charlie")
assert grp1.entity_id != grp2.entity_id
async def test_expand_entity_ids_expands_nested_groups(hass):
"""Test if entity ids epands to nested groups."""
assert await async_setup_component(hass, "group", {})
await group.Group.async_create_group(
hass, "light", ["light.test_1", "light.test_2"]
)
await group.Group.async_create_group(
hass, "switch", ["switch.test_1", "switch.test_2"]
)
await group.Group.async_create_group(
hass, "group_of_groups", ["group.light", "group.switch"]
)
assert [
"light.test_1",
"light.test_2",
"switch.test_1",
"switch.test_2",
] == sorted(group.expand_entity_ids(hass, ["group.group_of_groups"]))
async def test_set_assumed_state_based_on_tracked(hass):
"""Test assumed state."""
hass.states.async_set("light.Bowl", STATE_ON)
hass.states.async_set("light.Ceiling", STATE_OFF)
assert await async_setup_component(hass, "group", {})
test_group = await group.Group.async_create_group(
hass, "init_group", ["light.Bowl", "light.Ceiling", "sensor.no_exist"]
)
state = hass.states.get(test_group.entity_id)
assert not state.attributes.get(ATTR_ASSUMED_STATE)
hass.states.async_set("light.Bowl", STATE_ON, {ATTR_ASSUMED_STATE: True})
await hass.async_block_till_done()
state = hass.states.get(test_group.entity_id)
assert state.attributes.get(ATTR_ASSUMED_STATE)
hass.states.async_set("light.Bowl", STATE_ON)
await hass.async_block_till_done()
state = hass.states.get(test_group.entity_id)
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async def test_group_updated_after_device_tracker_zone_change(hass):
"""Test group state when device tracker in group changes zone."""
hass.states.async_set("device_tracker.Adam", STATE_HOME)
hass.states.async_set("device_tracker.Eve", STATE_NOT_HOME)
await hass.async_block_till_done()
assert await async_setup_component(hass, "group", {})
assert await async_setup_component(hass, "device_tracker", {})
await group.Group.async_create_group(
hass, "peeps", ["device_tracker.Adam", "device_tracker.Eve"]
)
hass.states.async_set("device_tracker.Adam", "cool_state_not_home")
await hass.async_block_till_done()
assert STATE_NOT_HOME == hass.states.get(f"{group.DOMAIN}.peeps").state
async def test_is_on(hass):
"""Test is_on method."""
hass.states.async_set("light.Bowl", STATE_ON)
hass.states.async_set("light.Ceiling", STATE_OFF)
assert group.is_on(hass, "group.none") is False
assert await async_setup_component(hass, "light", {})
assert await async_setup_component(hass, "group", {})
await hass.async_block_till_done()
test_group = await group.Group.async_create_group(
hass, "init_group", ["light.Bowl", "light.Ceiling"], False
)
await hass.async_block_till_done()
assert group.is_on(hass, test_group.entity_id) is True
hass.states.async_set("light.Bowl", STATE_OFF)
await hass.async_block_till_done()
assert group.is_on(hass, test_group.entity_id) is False
# Try on non existing state
assert not group.is_on(hass, "non.existing")
async def test_reloading_groups(hass):
"""Test reloading the group config."""
assert await async_setup_component(
hass,
"group",
{
"group": {
"second_group": {"entities": "light.Bowl", "icon": "mdi:work"},
"test_group": "hello.world,sensor.happy",
"empty_group": {"name": "Empty Group", "entities": None},
}
},
)
await hass.async_block_till_done()
await group.Group.async_create_group(
hass, "all tests", ["test.one", "test.two"], user_defined=False
)
await hass.async_block_till_done()
assert sorted(hass.states.async_entity_ids()) == [
"group.all_tests",
"group.empty_group",
"group.second_group",
"group.test_group",
]
assert hass.bus.async_listeners()["state_changed"] == 1
assert len(hass.data[TRACK_STATE_CHANGE_CALLBACKS]["hello.world"]) == 1
assert len(hass.data[TRACK_STATE_CHANGE_CALLBACKS]["light.bowl"]) == 1
assert len(hass.data[TRACK_STATE_CHANGE_CALLBACKS]["test.one"]) == 1
assert len(hass.data[TRACK_STATE_CHANGE_CALLBACKS]["test.two"]) == 1
with patch(
"homeassistant.config.load_yaml_config_file",
return_value={
"group": {"hello": {"entities": "light.Bowl", "icon": "mdi:work"}}
},
):
await hass.services.async_call(group.DOMAIN, SERVICE_RELOAD)
await hass.async_block_till_done()
assert sorted(hass.states.async_entity_ids()) == [
"group.all_tests",
"group.hello",
]
assert hass.bus.async_listeners()["state_changed"] == 1
assert len(hass.data[TRACK_STATE_CHANGE_CALLBACKS]["light.bowl"]) == 1
assert len(hass.data[TRACK_STATE_CHANGE_CALLBACKS]["test.one"]) == 1
assert len(hass.data[TRACK_STATE_CHANGE_CALLBACKS]["test.two"]) == 1
async def test_modify_group(hass):
"""Test modifying a group."""
group_conf = OrderedDict()
group_conf["modify_group"] = {
"name": "friendly_name",
"icon": "mdi:work",
"entities": None,
}
assert await async_setup_component(hass, "group", {"group": group_conf})
await hass.async_block_till_done()
assert hass.states.get(f"{group.DOMAIN}.modify_group")
# The old way would create a new group modify_group1 because
# internally it didn't know anything about those created in the config
common.async_set_group(hass, "modify_group", icon="mdi:play")
await hass.async_block_till_done()
group_state = hass.states.get(f"{group.DOMAIN}.modify_group")
assert group_state
assert hass.states.async_entity_ids() == ["group.modify_group"]
assert group_state.attributes.get(ATTR_ICON) == "mdi:play"
assert group_state.attributes.get(ATTR_FRIENDLY_NAME) == "friendly_name"
async def test_setup(hass):
"""Test setup method."""
hass.states.async_set("light.Bowl", STATE_ON)
hass.states.async_set("light.Ceiling", STATE_OFF)
group_conf = OrderedDict()
group_conf["test_group"] = "hello.world,sensor.happy"
group_conf["empty_group"] = {"name": "Empty Group", "entities": None}
assert await async_setup_component(hass, "light", {})
await hass.async_block_till_done()
assert await async_setup_component(hass, "group", {"group": group_conf})
await hass.async_block_till_done()
test_group = await group.Group.async_create_group(
hass, "init_group", ["light.Bowl", "light.Ceiling"], False
)
await group.Group.async_create_group(
hass,
"created_group",
["light.Bowl", f"{test_group.entity_id}"],
True,
"mdi:work",
)
await hass.async_block_till_done()
group_state = hass.states.get(f"{group.DOMAIN}.created_group")
assert STATE_ON == group_state.state
assert {test_group.entity_id, "light.bowl"} == set(
group_state.attributes["entity_id"]
)
assert group_state.attributes.get(group.ATTR_AUTO) is None
assert "mdi:work" == group_state.attributes.get(ATTR_ICON)
assert 3 == group_state.attributes.get(group.ATTR_ORDER)
group_state = hass.states.get(f"{group.DOMAIN}.test_group")
assert STATE_UNKNOWN == group_state.state
assert {"sensor.happy", "hello.world"} == set(group_state.attributes["entity_id"])
assert group_state.attributes.get(group.ATTR_AUTO) is None
assert group_state.attributes.get(ATTR_ICON) is None
assert 0 == group_state.attributes.get(group.ATTR_ORDER)
async def test_service_group_services(hass):
"""Check if service are available."""
with assert_setup_component(0, "group"):
await async_setup_component(hass, "group", {"group": {}})
assert hass.services.has_service("group", group.SERVICE_SET)
assert hass.services.has_service("group", group.SERVICE_REMOVE)
# pylint: disable=invalid-name
async def test_service_group_set_group_remove_group(hass):
"""Check if service are available."""
with assert_setup_component(0, "group"):
await async_setup_component(hass, "group", {"group": {}})
common.async_set_group(hass, "user_test_group", name="Test")
await hass.async_block_till_done()
group_state = hass.states.get("group.user_test_group")
assert group_state
assert group_state.attributes[group.ATTR_AUTO]
assert group_state.attributes["friendly_name"] == "Test"
common.async_set_group(hass, "user_test_group", entity_ids=["test.entity_bla1"])
await hass.async_block_till_done()
group_state = hass.states.get("group.user_test_group")
assert group_state
assert group_state.attributes[group.ATTR_AUTO]
assert group_state.attributes["friendly_name"] == "Test"
assert list(group_state.attributes["entity_id"]) == ["test.entity_bla1"]
common.async_set_group(
hass,
"user_test_group",
icon="mdi:camera",
name="Test2",
add=["test.entity_id2"],
)
await hass.async_block_till_done()
group_state = hass.states.get("group.user_test_group")
assert group_state
assert group_state.attributes[group.ATTR_AUTO]
assert group_state.attributes["friendly_name"] == "Test2"
assert group_state.attributes["icon"] == "mdi:camera"
assert sorted(list(group_state.attributes["entity_id"])) == sorted(
["test.entity_bla1", "test.entity_id2"]
)
common.async_remove(hass, "user_test_group")
await hass.async_block_till_done()
group_state = hass.states.get("group.user_test_group")
assert group_state is None
async def test_group_order(hass):
"""Test that order gets incremented when creating a new group."""
hass.states.async_set("light.bowl", STATE_ON)
assert await async_setup_component(hass, "light", {})
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {"entities": "light.Bowl", "icon": "mdi:work"},
"group_one": {"entities": "light.Bowl", "icon": "mdi:work"},
"group_two": {"entities": "light.Bowl", "icon": "mdi:work"},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").attributes["order"] == 0
assert hass.states.get("group.group_one").attributes["order"] == 1
assert hass.states.get("group.group_two").attributes["order"] == 2
async def test_group_order_with_dynamic_creation(hass):
"""Test that order gets incremented when creating a new group."""
hass.states.async_set("light.bowl", STATE_ON)
assert await async_setup_component(hass, "light", {})
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {"entities": "light.Bowl", "icon": "mdi:work"},
"group_one": {"entities": "light.Bowl", "icon": "mdi:work"},
"group_two": {"entities": "light.Bowl", "icon": "mdi:work"},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").attributes["order"] == 0
assert hass.states.get("group.group_one").attributes["order"] == 1
assert hass.states.get("group.group_two").attributes["order"] == 2
await hass.services.async_call(
group.DOMAIN,
group.SERVICE_SET,
{"object_id": "new_group", "name": "New Group", "entities": "light.bowl"},
)
await hass.async_block_till_done()
assert hass.states.get("group.new_group").attributes["order"] == 3
await hass.services.async_call(
group.DOMAIN,
group.SERVICE_REMOVE,
{
"object_id": "new_group",
},
)
await hass.async_block_till_done()
assert not hass.states.get("group.new_group")
await hass.services.async_call(
group.DOMAIN,
group.SERVICE_SET,
{"object_id": "new_group2", "name": "New Group 2", "entities": "light.bowl"},
)
await hass.async_block_till_done()
assert hass.states.get("group.new_group2").attributes["order"] == 4
async def test_group_persons(hass):
"""Test group of persons."""
hass.states.async_set("person.one", "Work")
hass.states.async_set("person.two", "Work")
hass.states.async_set("person.three", "home")
assert await async_setup_component(hass, "person", {})
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {"entities": "person.one, person.two, person.three"},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == "home"
async def test_group_persons_and_device_trackers(hass):
"""Test group of persons and device_tracker."""
hass.states.async_set("person.one", "Work")
hass.states.async_set("person.two", "Work")
hass.states.async_set("person.three", "Work")
hass.states.async_set("device_tracker.one", "home")
assert await async_setup_component(hass, "person", {})
assert await async_setup_component(hass, "device_tracker", {})
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {
"entities": "device_tracker.one, person.one, person.two, person.three"
},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == "home"
async def test_group_mixed_domains_on(hass):
"""Test group of mixed domains that is on."""
hass.states.async_set("lock.alexander_garage_exit_door", "locked")
hass.states.async_set("binary_sensor.alexander_garage_side_door_open", "on")
hass.states.async_set("cover.small_garage_door", "open")
for domain in ["lock", "binary_sensor", "cover"]:
assert await async_setup_component(hass, domain, {})
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {
"all": "true",
"entities": "lock.alexander_garage_exit_door, binary_sensor.alexander_garage_side_door_open, cover.small_garage_door",
},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == "on"
async def test_group_mixed_domains_off(hass):
"""Test group of mixed domains that is off."""
hass.states.async_set("lock.alexander_garage_exit_door", "unlocked")
hass.states.async_set("binary_sensor.alexander_garage_side_door_open", "off")
hass.states.async_set("cover.small_garage_door", "closed")
for domain in ["lock", "binary_sensor", "cover"]:
assert await async_setup_component(hass, domain, {})
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {
"all": "true",
"entities": "lock.alexander_garage_exit_door, binary_sensor.alexander_garage_side_door_open, cover.small_garage_door",
},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == "off"
async def test_group_locks(hass):
"""Test group of locks."""
hass.states.async_set("lock.one", "locked")
hass.states.async_set("lock.two", "locked")
hass.states.async_set("lock.three", "unlocked")
assert await async_setup_component(hass, "lock", {})
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {"entities": "lock.one, lock.two, lock.three"},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == "locked"
async def test_group_sensors(hass):
"""Test group of sensors."""
hass.states.async_set("sensor.one", "locked")
hass.states.async_set("sensor.two", "on")
hass.states.async_set("sensor.three", "closed")
assert await async_setup_component(hass, "sensor", {})
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {"entities": "sensor.one, sensor.two, sensor.three"},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == "unknown"
async def test_group_climate_mixed(hass):
"""Test group of climate with mixed states."""
hass.states.async_set("climate.one", "off")
hass.states.async_set("climate.two", "cool")
hass.states.async_set("climate.three", "heat")
assert await async_setup_component(hass, "climate", {})
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {"entities": "climate.one, climate.two, climate.three"},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == STATE_ON
async def test_group_climate_all_cool(hass):
"""Test group of climate all set to cool."""
hass.states.async_set("climate.one", "cool")
hass.states.async_set("climate.two", "cool")
hass.states.async_set("climate.three", "cool")
assert await async_setup_component(hass, "climate", {})
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {"entities": "climate.one, climate.two, climate.three"},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == STATE_ON
async def test_group_climate_all_off(hass):
"""Test group of climate all set to off."""
hass.states.async_set("climate.one", "off")
hass.states.async_set("climate.two", "off")
hass.states.async_set("climate.three", "off")
assert await async_setup_component(hass, "climate", {})
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {"entities": "climate.one, climate.two, climate.three"},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == STATE_OFF
async def test_group_alarm(hass):
"""Test group of alarm control panels."""
hass.states.async_set("alarm_control_panel.one", "armed_away")
hass.states.async_set("alarm_control_panel.two", "armed_home")
hass.states.async_set("alarm_control_panel.three", "armed_away")
hass.state = CoreState.stopped
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {
"entities": "alarm_control_panel.one, alarm_control_panel.two, alarm_control_panel.three"
},
}
},
)
assert await async_setup_component(hass, "alarm_control_panel", {})
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == STATE_ON
async def test_group_alarm_disarmed(hass):
"""Test group of alarm control panels disarmed."""
hass.states.async_set("alarm_control_panel.one", "disarmed")
hass.states.async_set("alarm_control_panel.two", "disarmed")
hass.states.async_set("alarm_control_panel.three", "disarmed")
assert await async_setup_component(hass, "alarm_control_panel", {})
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {
"entities": "alarm_control_panel.one, alarm_control_panel.two, alarm_control_panel.three"
},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == STATE_OFF
async def test_group_vacuum_off(hass):
"""Test group of vacuums."""
hass.states.async_set("vacuum.one", "docked")
hass.states.async_set("vacuum.two", "off")
hass.states.async_set("vacuum.three", "off")
hass.state = CoreState.stopped
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {"entities": "vacuum.one, vacuum.two, vacuum.three"},
}
},
)
assert await async_setup_component(hass, "vacuum", {})
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == STATE_OFF
async def test_group_vacuum_on(hass):
"""Test group of vacuums."""
hass.states.async_set("vacuum.one", "cleaning")
hass.states.async_set("vacuum.two", "off")
hass.states.async_set("vacuum.three", "off")
assert await async_setup_component(hass, "vacuum", {})
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {"entities": "vacuum.one, vacuum.two, vacuum.three"},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == STATE_ON
async def test_device_tracker_not_home(hass):
"""Test group of device_tracker not_home."""
hass.states.async_set("device_tracker.one", "not_home")
hass.states.async_set("device_tracker.two", "not_home")
hass.states.async_set("device_tracker.three", "not_home")
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {
"entities": "device_tracker.one, device_tracker.two, device_tracker.three"
},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == "not_home"
async def test_light_removed(hass):
"""Test group of lights when one is removed."""
hass.states.async_set("light.one", "off")
hass.states.async_set("light.two", "off")
hass.states.async_set("light.three", "on")
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {"entities": "light.one, light.two, light.three"},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == "on"
hass.states.async_remove("light.three")
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == "off"
async def test_switch_removed(hass):
"""Test group of switches when one is removed."""
hass.states.async_set("switch.one", "off")
hass.states.async_set("switch.two", "off")
hass.states.async_set("switch.three", "on")
hass.state = CoreState.stopped
assert await async_setup_component(
hass,
"group",
{
"group": {
"group_zero": {"entities": "switch.one, switch.two, switch.three"},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == "unknown"
assert await async_setup_component(hass, "switch", {})
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == "on"
hass.states.async_remove("switch.three")
await hass.async_block_till_done()
assert hass.states.get("group.group_zero").state == "off"
async def test_lights_added_after_group(hass):
"""Test lights added after group."""
entity_ids = [
"light.living_front_ri",
"light.living_back_lef",
"light.living_back_cen",
"light.living_front_le",
"light.living_front_ce",
"light.living_back_rig",
]
assert await async_setup_component(
hass,
"group",
{
"group": {
"living_room_downlights": {"entities": entity_ids},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.living_room_downlights").state == "unknown"
for entity_id in entity_ids:
hass.states.async_set(entity_id, "off")
await hass.async_block_till_done()
assert hass.states.get("group.living_room_downlights").state == "off"
async def test_lights_added_before_group(hass):
"""Test lights added before group."""
entity_ids = [
"light.living_front_ri",
"light.living_back_lef",
"light.living_back_cen",
"light.living_front_le",
"light.living_front_ce",
"light.living_back_rig",
]
for entity_id in entity_ids:
hass.states.async_set(entity_id, "off")
await hass.async_block_till_done()
assert await async_setup_component(
hass,
"group",
{
"group": {
"living_room_downlights": {"entities": entity_ids},
}
},
)
await hass.async_block_till_done()
assert hass.states.get("group.living_room_downlights").state == "off"
async def test_cover_added_after_group(hass):
"""Test cover added after group."""
entity_ids = [
"cover.upstairs",
"cover.downstairs",
]
assert await async_setup_component(hass, "cover", {})
assert await async_setup_component(
hass,
"group",
{
"group": {
"shades": {"entities": entity_ids},
}
},
)
await hass.async_block_till_done()
for entity_id in entity_ids:
hass.states.async_set(entity_id, "open")
await hass.async_block_till_done()
await hass.async_block_till_done()
assert hass.states.get("group.shades").state == "open"
for entity_id in entity_ids:
hass.states.async_set(entity_id, "closed")
await hass.async_block_till_done()
assert hass.states.get("group.shades").state == "closed"
async def test_group_that_references_a_group_of_lights(hass):
"""Group that references a group of lights."""
entity_ids = [
"light.living_front_ri",
"light.living_back_lef",
]
hass.state = CoreState.stopped
for entity_id in entity_ids:
hass.states.async_set(entity_id, "off")
await hass.async_block_till_done()
assert await async_setup_component(
hass,
"group",
{
"group": {
"living_room_downlights": {"entities": entity_ids},
"grouped_group": {
"entities": ["group.living_room_downlights", *entity_ids]
},
}
},
)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert hass.states.get("group.living_room_downlights").state == "off"
assert hass.states.get("group.grouped_group").state == "off"
async def test_group_that_references_a_group_of_covers(hass):
"""Group that references a group of covers."""
entity_ids = [
"cover.living_front_ri",
"cover.living_back_lef",
]
hass.state = CoreState.stopped
for entity_id in entity_ids:
hass.states.async_set(entity_id, "closed")
await hass.async_block_till_done()
assert await async_setup_component(
hass,
"group",
{
"group": {
"living_room_downcover": {"entities": entity_ids},
"grouped_group": {
"entities": ["group.living_room_downlights", *entity_ids]
},
}
},
)
assert await async_setup_component(hass, "cover", {})
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert hass.states.get("group.living_room_downcover").state == "closed"
assert hass.states.get("group.grouped_group").state == "closed"
async def test_group_that_references_two_groups_of_covers(hass):
"""Group that references a group of covers."""
entity_ids = [
"cover.living_front_ri",
"cover.living_back_lef",
]
hass.state = CoreState.stopped
for entity_id in entity_ids:
hass.states.async_set(entity_id, "closed")
await hass.async_block_till_done()
assert await async_setup_component(hass, "cover", {})
assert await async_setup_component(
hass,
"group",
{
"group": {
"living_room_downcover": {"entities": entity_ids},
"living_room_upcover": {"entities": entity_ids},
"grouped_group": {
"entities": [
"group.living_room_downlights",
"group.living_room_upcover",
]
},
}
},
)
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert hass.states.get("group.living_room_downcover").state == "closed"
assert hass.states.get("group.living_room_upcover").state == "closed"
assert hass.states.get("group.grouped_group").state == "closed"
async def test_group_that_references_two_types_of_groups(hass):
"""Group that references a group of covers and device_trackers."""
group_1_entity_ids = [
"cover.living_front_ri",
"cover.living_back_lef",
]
group_2_entity_ids = [
"device_tracker.living_front_ri",
"device_tracker.living_back_lef",
]
hass.state = CoreState.stopped
for entity_id in group_1_entity_ids:
hass.states.async_set(entity_id, "closed")
for entity_id in group_2_entity_ids:
hass.states.async_set(entity_id, "home")
await hass.async_block_till_done()
assert await async_setup_component(hass, "device_tracker", {})
assert await async_setup_component(
hass,
"group",
{
"group": {
"covers": {"entities": group_1_entity_ids},
"device_trackers": {"entities": group_2_entity_ids},
"grouped_group": {
"entities": ["group.covers", "group.device_trackers"]
},
}
},
)
assert await async_setup_component(hass, "cover", {})
await hass.async_block_till_done()
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert hass.states.get("group.covers").state == "closed"
assert hass.states.get("group.device_trackers").state == "home"
assert hass.states.get("group.grouped_group").state == "on"
async def test_plant_group(hass):
"""Test plant states can be grouped."""
entity_ids = [
"plant.upstairs",
"plant.downstairs",
]
assert await async_setup_component(
hass,
"plant",
{
"plant": {
"plantname": {
"sensors": {
"moisture": "sensor.mqtt_plant_moisture",
"battery": "sensor.mqtt_plant_battery",
"temperature": "sensor.mqtt_plant_temperature",
"conductivity": "sensor.mqtt_plant_conductivity",
"brightness": "sensor.mqtt_plant_brightness",
},
"min_moisture": 20,
"max_moisture": 60,
"min_battery": 17,
"min_conductivity": 500,
"min_temperature": 15,
"min_brightness": 500,
}
}
},
)
assert await async_setup_component(
hass,
"group",
{
"group": {
"plants": {"entities": entity_ids},
"plant_with_binary_sensors": {
"entities": [*entity_ids, "binary_sensor.planter"]
},
}
},
)
await hass.async_block_till_done()
hass.states.async_set("binary_sensor.planter", "off")
for entity_id in entity_ids:
hass.states.async_set(entity_id, "ok")
await hass.async_block_till_done()
await hass.async_block_till_done()
assert hass.states.get("group.plants").state == "ok"
assert hass.states.get("group.plant_with_binary_sensors").state == "off"
hass.states.async_set("binary_sensor.planter", "on")
for entity_id in entity_ids:
hass.states.async_set(entity_id, "problem")
await hass.async_block_till_done()
assert hass.states.get("group.plants").state == "problem"
assert hass.states.get("group.plant_with_binary_sensors").state == "on"
|
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cinder base exception handling.
Includes decorator for re-raising Cinder-type exceptions.
SHOULD include dedicated exception logging.
"""
import sys
from oslo_config import cfg
import six
import webob.exc
from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Make exception message format errors fatal.'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
class Error(Exception):
pass
class CinderException(Exception):
"""Base Cinder Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
self.kwargs['message'] = message
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
for k, v in self.kwargs.iteritems():
if isinstance(v, Exception):
self.kwargs[k] = six.text_type(v)
if self._should_format():
try:
message = self.message % kwargs
except Exception:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
if CONF.fatal_exception_format_errors:
raise exc_info[0], exc_info[1], exc_info[2]
# at least get the core message out if something happened
message = self.message
elif isinstance(message, Exception):
message = six.text_type(message)
# NOTE(luisg): We put the actual message in 'msg' so that we can access
# it, because if we try to access the message via 'message' it will be
# overshadowed by the class' message attribute
self.msg = message
super(CinderException, self).__init__(message)
def _should_format(self):
return self.kwargs['message'] is None or '%(message)' in self.message
def __unicode__(self):
return unicode(self.msg)
class VolumeBackendAPIException(CinderException):
message = _("Bad or unexpected response from the storage volume "
"backend API: %(data)s")
class VolumeDriverException(CinderException):
message = _("Volume driver reported an error: %(message)s")
class BackupDriverException(CinderException):
message = _("Backup driver reported an error: %(message)s")
class GlanceConnectionFailed(CinderException):
message = _("Connection to glance failed: %(reason)s")
class NotAuthorized(CinderException):
message = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges")
class PolicyNotAuthorized(NotAuthorized):
message = _("Policy doesn't allow %(action)s to be performed.")
class ImageNotAuthorized(CinderException):
message = _("Not authorized for image %(image_id)s.")
class DriverNotInitialized(CinderException):
message = _("Volume driver not ready.")
class Invalid(CinderException):
message = _("Unacceptable parameters.")
code = 400
class InvalidSnapshot(Invalid):
message = _("Invalid snapshot: %(reason)s")
class InvalidVolumeAttachMode(Invalid):
message = _("Invalid attaching mode '%(mode)s' for "
"volume %(volume_id)s.")
class VolumeAttached(Invalid):
message = _("Volume %(volume_id)s is still attached, detach volume first.")
class SfJsonEncodeFailure(CinderException):
message = _("Failed to load data into json format")
class InvalidResults(Invalid):
message = _("The results are invalid.")
class InvalidInput(Invalid):
message = _("Invalid input received: %(reason)s")
class InvalidVolumeType(Invalid):
message = _("Invalid volume type: %(reason)s")
class InvalidVolume(Invalid):
message = _("Invalid volume: %(reason)s")
class InvalidContentType(Invalid):
message = _("Invalid content type %(content_type)s.")
class InvalidHost(Invalid):
message = _("Invalid host: %(reason)s")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
message = _("%(err)s")
class InvalidAuthKey(Invalid):
message = _("Invalid auth key: %(reason)s")
class InvalidConfigurationValue(Invalid):
message = _('Value "%(value)s" is not valid for '
'configuration option "%(option)s"')
class ServiceUnavailable(Invalid):
message = _("Service is unavailable at this time.")
class ImageUnacceptable(Invalid):
message = _("Image %(image_id)s is unacceptable: %(reason)s")
class DeviceUnavailable(Invalid):
message = _("The device in the path %(path)s is unavailable: %(reason)s")
class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
class APIException(CinderException):
message = _("Error while requesting %(service)s API.")
def __init__(self, message=None, **kwargs):
if 'service' not in kwargs:
kwargs['service'] = 'unknown'
super(APIException, self).__init__(message, **kwargs)
class APITimeout(APIException):
message = _("Timeout while requesting %(service)s API.")
class NotFound(CinderException):
message = _("Resource could not be found.")
code = 404
safe = True
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class VolumeMetadataNotFound(NotFound):
message = _("Volume %(volume_id)s has no metadata with "
"key %(metadata_key)s.")
class VolumeAdminMetadataNotFound(NotFound):
message = _("Volume %(volume_id)s has no administration metadata with "
"key %(metadata_key)s.")
class InvalidVolumeMetadata(Invalid):
message = _("Invalid metadata: %(reason)s")
class InvalidVolumeMetadataSize(Invalid):
message = _("Invalid metadata size: %(reason)s")
class SnapshotMetadataNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s has no metadata with "
"key %(metadata_key)s.")
class VolumeTypeNotFound(NotFound):
message = _("Volume type %(volume_type_id)s could not be found.")
class VolumeTypeNotFoundByName(VolumeTypeNotFound):
message = _("Volume type with name %(volume_type_name)s "
"could not be found.")
class VolumeTypeAccessNotFound(NotFound):
message = _("Volume type access not found for %(volume_type_id)s / "
"%(project_id)s combination.")
class VolumeTypeExtraSpecsNotFound(NotFound):
message = _("Volume Type %(volume_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class VolumeTypeInUse(CinderException):
message = _("Volume Type %(volume_type_id)s deletion is not allowed with "
"volumes present with the type.")
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class ServerNotFound(NotFound):
message = _("Instance %(uuid)s could not be found.")
class VolumeIsBusy(CinderException):
message = _("deleting volume %(volume_name)s that has snapshot")
class SnapshotIsBusy(CinderException):
message = _("deleting snapshot %(snapshot_name)s that has "
"dependent volumes")
class ISCSITargetNotFoundForVolume(NotFound):
message = _("No target id found for volume %(volume_id)s.")
class InvalidImageRef(Invalid):
message = _("Invalid image href %(image_href)s.")
class ImageNotFound(NotFound):
message = _("Image %(image_id)s could not be found.")
class ServiceNotFound(NotFound):
message = _("Service %(service_id)s could not be found.")
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
class SchedulerHostFilterNotFound(NotFound):
message = _("Scheduler Host Filter %(filter_name)s could not be found.")
class SchedulerHostWeigherNotFound(NotFound):
message = _("Scheduler Host Weigher %(weigher_name)s could not be found.")
class HostBinaryNotFound(NotFound):
message = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
message = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class QuotaNotFound(NotFound):
message = _("Quota could not be found")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class ProjectQuotaNotFound(QuotaNotFound):
message = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
message = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
message = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
message = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(CinderException):
message = _("Quota exceeded for resources: %(overs)s")
class FileNotFound(NotFound):
message = _("File %(file_path)s could not be found.")
class Duplicate(CinderException):
pass
class VolumeTypeExists(Duplicate):
message = _("Volume Type %(id)s already exists.")
class VolumeTypeAccessExists(Duplicate):
message = _("Volume type access for %(volume_type_id)s / "
"%(project_id)s combination already exists.")
class VolumeTypeEncryptionExists(Invalid):
message = _("Volume type encryption for type %(type_id)s already exists.")
class VolumeTypeEncryptionNotFound(NotFound):
message = _("Volume type encryption for type %(type_id)s does not exist.")
class MalformedRequestBody(CinderException):
message = _("Malformed message body: %(reason)s")
class ConfigNotFound(NotFound):
message = _("Could not find config at %(path)s")
class ParameterNotFound(NotFound):
message = _("Could not find parameter %(param)s")
class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s")
class NoValidHost(CinderException):
message = _("No valid host was found. %(reason)s")
class NoMoreTargets(CinderException):
"""No more available targets."""
pass
class QuotaError(CinderException):
message = _("Quota exceeded: code=%(code)s")
code = 413
headers = {'Retry-After': 0}
safe = True
class VolumeSizeExceedsAvailableQuota(QuotaError):
message = _("Requested volume or snapshot exceeds allowed gigabytes "
"quota. Requested %(requested)sG, quota is %(quota)sG and "
"%(consumed)sG has been consumed.")
class VolumeBackupSizeExceedsAvailableQuota(QuotaError):
message = _("Requested backup exceeds allowed Backup gigabytes "
"quota. Requested %(requested)sG, quota is %(quota)sG and "
"%(consumed)sG has been consumed.")
class VolumeLimitExceeded(QuotaError):
message = _("Maximum number of volumes allowed (%(allowed)d) exceeded")
class SnapshotLimitExceeded(QuotaError):
message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded")
class BackupLimitExceeded(QuotaError):
message = _("Maximum number of backups allowed (%(allowed)d) exceeded")
class DuplicateSfVolumeNames(Duplicate):
message = _("Detected more than one volume with name %(vol_name)s")
class VolumeTypeCreateFailed(CinderException):
message = _("Cannot create volume_type with "
"name %(name)s and specs %(extra_specs)s")
class VolumeTypeUpdateFailed(CinderException):
message = _("Cannot update volume_type %(id)s")
class UnknownCmd(VolumeDriverException):
message = _("Unknown or unsupported command %(cmd)s")
class MalformedResponse(VolumeDriverException):
message = _("Malformed response to command %(cmd)s: %(reason)s")
class FailedCmdWithDump(VolumeDriverException):
message = _("Operation failed with status=%(status)s. Full dump: %(data)s")
class InvalidConnectorException(VolumeDriverException):
message = _("Connector doesn't have required information: %(missing)s")
class GlanceMetadataExists(Invalid):
message = _("Glance metadata cannot be updated, key %(key)s"
" exists for volume id %(volume_id)s")
class GlanceMetadataNotFound(NotFound):
message = _("Glance metadata for volume/snapshot %(id)s cannot be found.")
class ExportFailure(Invalid):
message = _("Failed to export for volume: %(reason)s")
class RemoveExportException(VolumeDriverException):
message = _("Failed to remove export for volume %(volume)s: %(reason)s")
class MetadataCreateFailure(Invalid):
message = _("Failed to create metadata for volume: %(reason)s")
class MetadataUpdateFailure(Invalid):
message = _("Failed to update metadata for volume: %(reason)s")
class MetadataCopyFailure(Invalid):
message = _("Failed to copy metadata to volume: %(reason)s")
class ImageCopyFailure(Invalid):
message = _("Failed to copy image to volume: %(reason)s")
class BackupInvalidCephArgs(BackupDriverException):
message = _("Invalid Ceph args provided for backup rbd operation")
class BackupOperationError(Invalid):
message = _("An error has occurred during backup operation")
class BackupMetadataUnsupportedVersion(BackupDriverException):
message = _("Unsupported backup metadata version requested")
class BackupVerifyUnsupportedDriver(BackupDriverException):
message = _("Unsupported backup verify driver")
class VolumeMetadataBackupExists(BackupDriverException):
message = _("Metadata backup already exists for this volume")
class BackupRBDOperationFailed(BackupDriverException):
message = _("Backup RBD operation failed")
class EncryptedBackupOperationFailed(BackupDriverException):
message = _("Backup operation of an encrypted volume failed.")
class BackupNotFound(NotFound):
message = _("Backup %(backup_id)s could not be found.")
class BackupFailedToGetVolumeBackend(NotFound):
message = _("Failed to identify volume backend.")
class InvalidBackup(Invalid):
message = _("Invalid backup: %(reason)s")
class SwiftConnectionFailed(BackupDriverException):
message = _("Connection to swift failed: %(reason)s")
class TransferNotFound(NotFound):
message = _("Transfer %(transfer_id)s could not be found.")
class VolumeMigrationFailed(CinderException):
message = _("Volume migration failed: %(reason)s")
class SSHInjectionThreat(CinderException):
message = _("SSH command injection detected: %(command)s")
class QoSSpecsExists(Duplicate):
message = _("QoS Specs %(specs_id)s already exists.")
class QoSSpecsCreateFailed(CinderException):
message = _("Failed to create qos_specs: "
"%(name)s with specs %(qos_specs)s.")
class QoSSpecsUpdateFailed(CinderException):
message = _("Failed to update qos_specs: "
"%(specs_id)s with specs %(qos_specs)s.")
class QoSSpecsNotFound(NotFound):
message = _("No such QoS spec %(specs_id)s.")
class QoSSpecsAssociateFailed(CinderException):
message = _("Failed to associate qos_specs: "
"%(specs_id)s with type %(type_id)s.")
class QoSSpecsDisassociateFailed(CinderException):
message = _("Failed to disassociate qos_specs: "
"%(specs_id)s with type %(type_id)s.")
class QoSSpecsKeyNotFound(NotFound):
message = _("QoS spec %(specs_id)s has no spec with "
"key %(specs_key)s.")
class InvalidQoSSpecs(Invalid):
message = _("Invalid qos specs: %(reason)s")
class QoSSpecsInUse(CinderException):
message = _("QoS Specs %(specs_id)s is still associated with entities.")
class KeyManagerError(CinderException):
msg_fmt = _("key manager error: %(reason)s")
class ManageExistingInvalidReference(CinderException):
message = _("Manage existing volume failed due to invalid backend "
"reference %(existing_ref)s: %(reason)s")
class ReplicationError(CinderException):
message = _("Volume %(volume_id)s replication "
"error: %(reason)s")
class ReplicationNotFound(NotFound):
message = _("Volume replication for %(volume_id)s "
"could not be found.")
class ManageExistingVolumeTypeMismatch(CinderException):
message = _("Manage existing volume failed due to volume type mismatch: "
"%(reason)s")
class ExtendVolumeError(CinderException):
message = _("Error extending volume: %(reason)s")
class EvaluatorParseException(Exception):
message = _("Error during evaluator parsing: %(reason)s")
class ObjectActionError(CinderException):
msg_fmt = _('Object action %(action)s failed because: %(reason)s')
class ObjectFieldInvalid(CinderException):
msg_fmt = _('Field %(field)s of %(objname)s is not an instance of Field')
class UnsupportedObjectError(CinderException):
msg_fmt = _('Unsupported object type %(objtype)s')
class OrphanedObjectError(CinderException):
msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object')
class IncompatibleObjectVersion(CinderException):
msg_fmt = _('Version %(objver)s of %(objname)s is not supported')
class ReadOnlyFieldError(CinderException):
msg_fmt = _('Cannot modify readonly field %(field)s')
# Driver specific exceptions
# Coraid
class CoraidException(VolumeDriverException):
message = _('Coraid Cinder Driver exception.')
class CoraidJsonEncodeFailure(CoraidException):
message = _('Failed to encode json data.')
class CoraidESMBadCredentials(CoraidException):
message = _('Login on ESM failed.')
class CoraidESMReloginFailed(CoraidException):
message = _('Relogin on ESM failed.')
class CoraidESMBadGroup(CoraidException):
message = _('Group with name "%(group_name)s" not found.')
class CoraidESMConfigureError(CoraidException):
message = _('ESM configure request failed: %(reason)s')
class CoraidESMNotAvailable(CoraidException):
message = _('Coraid ESM not available with reason: %(reason)s')
# Pure Storage
class PureDriverException(VolumeDriverException):
message = _("Pure Storage Cinder driver failure: %(reason)s")
# Zadara
class ZadaraException(VolumeDriverException):
message = _('Zadara Cinder Driver exception.')
class ZadaraServerCreateFailure(ZadaraException):
message = _("Unable to create server object for initiator %(name)s")
class ZadaraServerNotFound(ZadaraException):
message = _("Unable to find server object for initiator %(name)s")
class ZadaraVPSANoActiveController(ZadaraException):
message = _("Unable to find any active VPSA controller")
class ZadaraAttachmentsNotFound(ZadaraException):
message = _("Failed to retrieve attachments for volume %(name)s")
class ZadaraInvalidAttachmentInfo(ZadaraException):
message = _("Invalid attachment info for volume %(name)s: %(reason)s")
class BadHTTPResponseStatus(ZadaraException):
message = _("Bad HTTP response status %(status)s")
# SolidFire
class SolidFireAPIException(VolumeBackendAPIException):
message = _("Bad response from SolidFire API")
class SolidFireDriverException(VolumeDriverException):
message = _("SolidFire Cinder Driver exception")
class SolidFireAPIDataException(SolidFireAPIException):
message = _("Error in SolidFire API response: data=%(data)s")
class SolidFireAccountNotFound(SolidFireDriverException):
message = _("Unable to locate account %(account_name)s on "
"Solidfire device")
class SolidFireRetryableException(VolumeBackendAPIException):
message = _("Retryable SolidFire Exception encountered")
# HP 3Par
class Invalid3PARDomain(VolumeDriverException):
message = _("Invalid 3PAR Domain: %(err)s")
# RemoteFS drivers
class RemoteFSException(VolumeDriverException):
message = _("Unknown RemoteFS exception")
class RemoteFSNoSharesMounted(RemoteFSException):
message = _("No mounted shares found")
class RemoteFSNoSuitableShareFound(RemoteFSException):
message = _("There is no share which can host %(volume_size)sG")
# NFS driver
class NfsException(RemoteFSException):
message = _("Unknown NFS exception")
class NfsNoSharesMounted(RemoteFSNoSharesMounted):
message = _("No mounted NFS shares found")
class NfsNoSuitableShareFound(RemoteFSNoSuitableShareFound):
message = _("There is no share which can host %(volume_size)sG")
# Smbfs driver
class SmbfsException(RemoteFSException):
message = _("Unknown SMBFS exception.")
class SmbfsNoSharesMounted(RemoteFSNoSharesMounted):
message = _("No mounted SMBFS shares found.")
class SmbfsNoSuitableShareFound(RemoteFSNoSuitableShareFound):
message = _("There is no share which can host %(volume_size)sG.")
# Gluster driver
class GlusterfsException(RemoteFSException):
message = _("Unknown Gluster exception")
class GlusterfsNoSharesMounted(RemoteFSNoSharesMounted):
message = _("No mounted Gluster shares found")
class GlusterfsNoSuitableShareFound(RemoteFSNoSuitableShareFound):
message = _("There is no share which can host %(volume_size)sG")
# HP MSA
class HPMSAVolumeDriverException(VolumeDriverException):
message = _("HP MSA Volume Driver exception")
class HPMSAInvalidVDisk(HPMSAVolumeDriverException):
message = _("VDisk doesn't exist (%(vdisk)s)")
class HPMSAConnectionError(HPMSAVolumeDriverException):
message = _("Unable to connect to MSA array")
class HPMSANotEnoughSpace(HPMSAVolumeDriverException):
message = _("Not enough space on VDisk (%(vdisk)s)")
# Fibre Channel Zone Manager
class ZoneManagerException(CinderException):
message = _("Fibre Channel connection control failure: %(reason)s")
class FCZoneDriverException(CinderException):
message = _("Fibre Channel Zone operation failed: %(reason)s")
class FCSanLookupServiceException(CinderException):
message = _("Fibre Channel SAN Lookup failure: %(reason)s")
class BrocadeZoningCliException(CinderException):
message = _("Fibre Channel Zoning CLI error: %(reason)s")
class CiscoZoningCliException(CinderException):
message = _("Fibre Channel Zoning CLI error: %(reason)s")
class NetAppDriverException(VolumeDriverException):
message = _("NetApp Cinder Driver exception.")
class EMCVnxCLICmdError(VolumeBackendAPIException):
message = _("EMC VNX Cinder Driver CLI exception: %(cmd)s "
"(Return Code: %(rc)s) (Output: %(out)s).")
# ConsistencyGroup
class ConsistencyGroupNotFound(NotFound):
message = _("ConsistencyGroup %(consistencygroup_id)s could not be found.")
class InvalidConsistencyGroup(Invalid):
message = _("Invalid ConsistencyGroup: %(reason)s")
# CgSnapshot
class CgSnapshotNotFound(NotFound):
message = _("CgSnapshot %(cgsnapshot_id)s could not be found.")
class InvalidCgSnapshot(Invalid):
message = _("Invalid CgSnapshot: %(reason)s")
# Hitachi Block Storage Driver
class HBSDError(CinderException):
message = _("HBSD error occurs.")
class HBSDCmdError(HBSDError):
def __init__(self, message=None, ret=None, err=None):
self.ret = ret
self.stderr = err
super(HBSDCmdError, self).__init__(message=message)
class HBSDBusy(HBSDError):
message = "Device or resource is busy."
class HBSDNotFound(NotFound):
message = _("Storage resource could not be found.")
class HBSDVolumeIsBusy(VolumeIsBusy):
message = _("Volume %(volume_name)s is busy.")
# Datera driver
class DateraAPIException(VolumeBackendAPIException):
message = _("Bad response from Datera API")
# Target drivers
class ISCSITargetCreateFailed(CinderException):
message = _("Failed to create iscsi target for volume %(volume_id)s.")
class ISCSITargetRemoveFailed(CinderException):
message = _("Failed to remove iscsi target for volume %(volume_id)s.")
class ISCSITargetAttachFailed(CinderException):
message = _("Failed to attach iSCSI target for volume %(volume_id)s.")
class ISCSITargetDetachFailed(CinderException):
message = _("Failed to detach iSCSI target for volume %(volume_id)s.")
class ISCSITargetHelperCommandFailed(CinderException):
message = _("%(error_message)s")
# X-IO driver exception.
class XIODriverException(VolumeDriverException):
message = _("X-IO Volume Driver exception!")
# Violin Memory drivers
class ViolinInvalidBackendConfig(CinderException):
message = _("Volume backend config is invalid: %(reason)s")
class ViolinRequestRetryTimeout(CinderException):
message = _("Backend service retry timeout hit: %(timeout)s sec")
class ViolinBackendErr(CinderException):
message = _("Backend reports: %(message)s")
class ViolinBackendErrExists(CinderException):
message = _("Backend reports: item already exists")
class ViolinBackendErrNotFound(CinderException):
message = _("Backend reports: item not found")
# ZFSSA NFS driver exception.
class WebDAVClientError(CinderException):
message = _("The WebDAV request failed. Reason: %(msg)s, "
"Return code/reason: %(code)s, Source Volume: %(src)s, "
"Destination Volume: %(dst)s, Method: %(method)s.")
# XtremIO Drivers
class XtremIOAlreadyMappedError(CinderException):
message = _("Volume to Initiator Group mapping already exists")
|
|
# -*- coding: utf-8 -*-
"""
checks.py
Each function in here should
- Take a DataFrame as its first argument, maybe optional arguments
- Makes its assert on the result
- Return the original DataFrame
"""
import numpy as np
import pandas as pd
from engarde import generic
from engarde.generic import verify, verify_all, verify_any
def none_missing(df, columns=None):
"""
Asserts that there are no missing values (NaNs) in the DataFrame.
Parameters
----------
df : DataFrame
columns : list
list of columns to restrict the check to
Returns
-------
df : DataFrame
same as the original
"""
if columns is None:
columns = df.columns
try:
assert not df[columns].isnull().any().any()
except AssertionError as e:
missing = df[columns].isnull()
msg = generic.bad_locations(missing)
e.args = msg
raise
return df
def is_monotonic(df, items=None, increasing=None, strict=False):
"""
Asserts that the DataFrame is monotonic.
Parameters
==========
df : Series or DataFrame
items : dict
mapping columns to conditions (increasing, strict)
increasing : None or bool
None is either increasing or decreasing.
strict : whether the comparison should be strict
Returns
=======
df : DataFrame
"""
if items is None:
items = {k: (increasing, strict) for k in df}
for col, (increasing, strict) in items.items():
s = pd.Index(df[col])
if increasing:
good = getattr(s, 'is_monotonic_increasing')
elif increasing is None:
good = getattr(s, 'is_monotonic') | getattr(s, 'is_monotonic_decreasing')
else:
good = getattr(s, 'is_monotonic_decreasing')
if strict:
if increasing:
good = good & (s.to_series().diff().dropna() > 0).all()
elif increasing is None:
good = good & ((s.to_series().diff().dropna() > 0).all() |
(s.to_series().diff().dropna() < 0).all())
else:
good = good & (s.to_series().diff().dropna() < 0).all()
if not good:
raise AssertionError
return df
def is_shape(df, shape):
"""
Asserts that the DataFrame is of a known shape.
Parameters
==========
df : DataFrame
shape : tuple
(n_rows, n_columns). Use None or -1 if you don't care
about a dimension.
Returns
=======
df : DataFrame
"""
try:
check = np.all(np.equal(df.shape, shape) | (np.equal(shape, [-1, -1]) |
np.equal(shape, [None, None])))
assert check
except AssertionError as e:
msg = ("Expected shape: {}\n"
"\t\tActual shape: {}".format(shape, df.shape))
e.args = (msg,)
raise
return df
def unique_index(df):
"""
Assert that the index is unique
Parameters
==========
df : DataFrame
Returns
=======
df : DataFrame
"""
try:
assert df.index.is_unique
except AssertionError as e:
e.args = df.index.get_duplicates()
raise
return df
def within_set(df, items=None):
"""
Assert that df is a subset of items
Parameters
==========
df : DataFrame
items : dict
mapping of columns (k) to array-like of values (v) that
``df[k]`` is expected to be a subset of
Returns
=======
df : DataFrame
"""
for k, v in items.items():
if not df[k].isin(v).all():
bad = df.loc[~df[k].isin(v), k]
raise AssertionError('Not in set', bad)
return df
def within_range(df, items=None):
"""
Assert that a DataFrame is within a range.
Parameters
==========
df : DataFame
items : dict
mapping of columns (k) to a (low, high) tuple (v)
that ``df[k]`` is expected to be between.
Returns
=======
df : DataFrame
"""
for k, (lower, upper) in items.items():
if (lower > df[k]).any() or (upper < df[k]).any():
bad = (lower > df[k]) | (upper < df[k])
raise AssertionError("Outside range", bad)
return df
def within_n_std(df, n=3):
"""
Assert that every value is within ``n`` standard
deviations of its column's mean.
Parameters
==========
df : DataFame
n : int
number of standard devations from the mean
Returns
=======
df : DatFrame
"""
means = df.mean()
stds = df.std()
inliers = (np.abs(df - means) < n * stds)
if not np.all(inliers):
msg = generic.bad_locations(~inliers)
raise AssertionError(msg)
return df
def has_dtypes(df, items):
"""
Assert that a DataFrame has ``dtypes``
Parameters
==========
df: DataFrame
items: dict
mapping of columns to dtype.
Returns
=======
df : DataFrame
"""
dtypes = df.dtypes
for k, v in items.items():
if not dtypes[k] == v:
raise AssertionError("{} has the wrong dtype ({})".format(k, v))
return df
def one_to_many(df, unitcol, manycol):
"""
Assert that a many-to-one relationship is preserved between two
columns. For example, a retail store will have have distinct
departments, each with several employees. If each employee may
only work in a single department, then the relationship of the
department to the employees is one to many.
Parameters
==========
df : DataFrame
unitcol : str
The column that encapulates the groups in ``manycol``.
manycol : str
The column that must remain unique in the distict pairs
between ``manycol`` and ``unitcol``
Returns
=======
df : DataFrame
"""
subset = df[[manycol, unitcol]].drop_duplicates()
for many in subset[manycol].unique():
if subset[subset[manycol] == many].shape[0] > 1:
msg = "{} in {} has multiple values for {}".format(many, manycol, unitcol)
raise AssertionError(msg)
return df
__all__ = ['is_monotonic', 'is_shape', 'none_missing', 'unique_index', 'within_n_std',
'within_range', 'within_set', 'has_dtypes',
'verify', 'verify_all', 'verify_any']
|
|
'''
Implements the RTS ALUA Target Port Group class.
This file is part of RTSLib.
Copyright (c) 2016 by Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
'''
from .node import CFSNode
from .utils import RTSLibError, RTSLibALUANotSupported, fread, fwrite
import six
alua_rw_params = ['alua_access_state', 'alua_access_status',
'alua_write_metadata', 'alua_access_type', 'preferred',
'nonop_delay_msecs', 'trans_delay_msecs',
'implicit_trans_secs', 'alua_support_offline',
'alua_support_standby', 'alua_support_transitioning',
'alua_support_active_nonoptimized',
'alua_support_unavailable', 'alua_support_active_optimized']
alua_ro_params = ['tg_pt_gp_id', 'members', 'alua_support_lba_dependent']
alua_types = ['None', 'Implicit', 'Explicit', 'Implicit and Explicit']
alua_statuses = ['None', 'Altered by Explicit STPG', 'Altered by Implicit ALUA']
class ALUATargetPortGroup(CFSNode):
"""
ALUA Target Port Group interface
"""
def __repr__(self):
return "<ALUA TPG %s>" % self.name
def __init__(self, storage_object, name, tag=None):
"""
@param storage_object: backstore storage object to create ALUA group for
@param name: name of ALUA group
@param tag: target port group id. If not passed in, try to look
up existing ALUA TPG with the same name
"""
if storage_object.alua_supported is False:
raise RTSLibALUANotSupported("Backend does not support ALUA setup")
# default_tg_pt_gp takes tag 1
if tag is not None and (tag > 65535 or tag < 1):
raise RTSLibError("The TPG Tag must be between 1 and 65535")
super(ALUATargetPortGroup, self).__init__()
self.name = name
self.storage_object = storage_object
self._path = "%s/alua/%s" % (storage_object.path, name)
if tag is not None:
try:
self._create_in_cfs_ine('create')
except OSError as msg:
raise RTSLibError(msg)
try:
fwrite("%s/tg_pt_gp_id" % self._path, tag)
except IOError as msg:
self.delete()
raise RTSLibError("Cannot set id to %d: %s" % (tag, str(msg)))
else:
try:
self._create_in_cfs_ine('lookup')
except OSError as msg:
raise RTSLibError(msg)
# Public
def delete(self):
"""
Delete ALUA TPG and unmap from LUNs
"""
self._check_self()
# default_tg_pt_gp created by the kernel and cannot be deleted
if self.name == "default_tg_pt_gp":
raise RTSLibError("Can not delete default_tg_pt_gp")
# This will reset the ALUA tpg to default_tg_pt_gp
super(ALUATargetPortGroup, self).delete()
def _get_alua_access_state(self):
self._check_self()
path = "%s/alua_access_state" % self.path
return int(fread(path))
def _set_alua_access_state(self, newstate):
self._check_self()
path = "%s/alua_access_state" % self.path
try:
fwrite(path, str(int(newstate)))
except IOError as e:
raise RTSLibError("Cannot change ALUA state: %s" % e)
def _get_alua_access_status(self):
self._check_self()
path = "%s/alua_access_status" % self.path
status = fread(path)
return alua_statuses.index(status)
def _set_alua_access_status(self, newstatus):
self._check_self()
path = "%s/alua_access_status" % self.path
try:
fwrite(path, str(int(newstatus)))
except IOError as e:
raise RTSLibError("Cannot change ALUA status: %s" % e)
def _get_alua_access_type(self):
self._check_self()
path = "%s/alua_access_type" % self.path
alua_type = fread(path)
return alua_types.index(alua_type)
def _set_alua_access_type(self, access_type):
self._check_self()
path = "%s/alua_access_type" % self.path
try:
fwrite(path, str(int(access_type)))
except IOError as e:
raise RTSLibError("Cannot change ALUA access type: %s" % e)
def _get_preferred(self):
self._check_self()
path = "%s/preferred" % self.path
return int(fread(path))
def _set_preferred(self, pref):
self._check_self()
path = "%s/preferred" % self.path
try:
fwrite(path, str(int(pref)))
except IOError as e:
raise RTSLibError("Cannot set preferred: %s" % e)
def _get_alua_write_metadata(self):
self._check_self()
path = "%s/alua_write_metadata" % self.path
return int(fread(path))
def _set_alua_write_metadata(self, pref):
self._check_self()
path = "%s/alua_write_metadata" % self.path
try:
fwrite(path, str(int(pref)))
except IOError as e:
raise RTSLibError("Cannot set alua_write_metadata: %s" % e)
def _get_alua_support_active_nonoptimized(self):
self._check_self()
path = "%s/alua_support_active_nonoptimized" % self.path
return int(fread(path))
def _set_alua_support_active_nonoptimized(self, enabled):
self._check_self()
path = "%s/alua_support_active_nonoptimized" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_active_nonoptimized: %s" % e)
def _get_alua_support_active_optimized(self):
self._check_self()
path = "%s/alua_support_active_optimized" % self.path
return int(fread(path))
def _set_alua_support_active_optimized(self, enabled):
self._check_self()
path = "%s/alua_support_active_optimized" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_active_optimized: %s" % e)
def _get_alua_support_offline(self):
self._check_self()
path = "%s/alua_support_offline" % self.path
return int(fread(path))
def _set_alua_support_offline(self, enabled):
self._check_self()
path = "%s/alua_support_offline" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_offline: %s" % e)
def _get_alua_support_unavailable(self):
self._check_self()
path = "%s/alua_support_unavailable" % self.path
return int(fread(path))
def _set_alua_support_unavailable(self, enabled):
self._check_self()
path = "%s/alua_support_unavailable" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_unavailable: %s" % e)
def _get_alua_support_standby(self):
self._check_self()
path = "%s/alua_support_standby" % self.path
return int(fread(path))
def _set_alua_support_standby(self, enabled):
self._check_self()
path = "%s/alua_support_standby" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_standby: %s" % e)
def _get_alua_support_transitioning(self):
self._check_self()
path = "%s/alua_support_transitioning" % self.path
return int(fread(path))
def _set_alua_support_transitioning(self, enabled):
self._check_self()
path = "%s/alua_support_transitioning" % self.path
try:
fwrite(path, str(int(enabled)))
except IOError as e:
raise RTSLibError("Cannot set alua_support_transitioning: %s" % e)
def _get_alua_support_lba_dependent(self):
self._check_self()
path = "%s/alua_support_lba_dependent" % self.path
return int(fread(path))
def _get_members(self):
self._check_self()
path = "%s/members" % self.path
member_list = []
for member in fread(path).splitlines():
lun_path = member.split("/")
if len(lun_path) != 4:
continue
member_list.append({ 'driver': lun_path[0], 'target': lun_path[1],
'tpgt': int(lun_path[2].split("_", 1)[1]),
'lun': int(lun_path[3].split("_", 1)[1]) })
return member_list
def _get_tg_pt_gp_id(self):
self._check_self()
path = "%s/tg_pt_gp_id" % self.path
return int(fread(path))
def _get_trans_delay_msecs(self):
self._check_self()
path = "%s/trans_delay_msecs" % self.path
return int(fread(path))
def _set_trans_delay_msecs(self, secs):
self._check_self()
path = "%s/trans_delay_msecs" % self.path
try:
fwrite(path, str(int(secs)))
except IOError as e:
raise RTSLibError("Cannot set trans_delay_msecs: %s" % e)
def _get_implicit_trans_secs(self):
self._check_self()
path = "%s/implicit_trans_secs" % self.path
return int(fread(path))
def _set_implicit_trans_secs(self, secs):
self._check_self()
path = "%s/implicit_trans_secs" % self.path
try:
fwrite(path, str(int(secs)))
except IOError as e:
raise RTSLibError("Cannot set implicit_trans_secs: %s" % e)
def _get_nonop_delay_msecs(self):
self._check_self()
path = "%s/nonop_delay_msecs" % self.path
return int(fread(path))
def _set_nonop_delay_msecs(self, delay):
self._check_self()
path = "%s/nonop_delay_msecs" % self.path
try:
fwrite(path, str(int(delay)))
except IOError as e:
raise RTSLibError("Cannot set nonop_delay_msecs: %s" % e)
def dump(self):
d = super(ALUATargetPortGroup, self).dump()
d['name'] = self.name
d['tg_pt_gp_id'] = self.tg_pt_gp_id
for param in alua_rw_params:
d[param] = getattr(self, param, None)
return d
alua_access_state = property(_get_alua_access_state, _set_alua_access_state,
doc="Get or set ALUA state. "
"0 = Active/optimized, "
"1 = Active/non-optimized, "
"2 = Standby, "
"3 = Unavailable, "
"4 = LBA Dependent, "
"14 = Offline, "
"15 = Transitioning")
alua_access_type = property(_get_alua_access_type, _set_alua_access_type,
doc="Get or set ALUA access type. "
"1 = Implicit, 2 = Explicit, 3 = Both")
alua_access_status = property(_get_alua_access_status,
_set_alua_access_status,
doc="Get or set ALUA access status. "
"0 = None, "
"1 = Altered by Explicit STPG, "
"2 = Altered by Implicit ALUA")
preferred = property(_get_preferred, _set_preferred,
doc="Get or set preferred bit. 1 = Pref, 0 Not-Pre")
alua_write_metadata = property(_get_alua_write_metadata,
_set_alua_write_metadata,
doc="Get or set alua_write_metadata flag. "
"enable (1) or disable (0)")
tg_pt_gp_id = property(_get_tg_pt_gp_id, doc="Get ALUA Target Port Group ID")
members = property(_get_members, doc="Get LUNs in Target Port Group")
alua_support_active_nonoptimized = property(_get_alua_support_active_nonoptimized,
_set_alua_support_active_nonoptimized,
doc="Enable (1) or disable (0) "
"Active/non-optimized support")
alua_support_active_optimized = property(_get_alua_support_active_optimized,
_set_alua_support_active_optimized,
doc="Enable (1) or disable (0) "
"Active/optimized support")
alua_support_offline = property(_get_alua_support_offline,
_set_alua_support_offline,
doc="Enable (1) or disable (0) "
"offline support")
alua_support_unavailable = property(_get_alua_support_unavailable,
_set_alua_support_unavailable,
doc="enable (1) or disable (0) "
"unavailable support")
alua_support_standby = property(_get_alua_support_standby,
_set_alua_support_standby,
doc="enable (1) or disable (0) "
"standby support")
alua_support_lba_dependent = property(_get_alua_support_lba_dependent,
doc="show lba_dependent support "
"enabled (1) or disabled (0)")
alua_support_transitioning = property(_get_alua_support_transitioning,
_set_alua_support_transitioning,
doc="enable (1) or disable (0) "
"transitioning support")
trans_delay_msecs = property(_get_trans_delay_msecs,
_set_trans_delay_msecs,
doc="msecs to delay state transition")
implicit_trans_secs = property(_get_implicit_trans_secs,
_set_implicit_trans_secs,
doc="implicit transition time limit")
nonop_delay_msecs = property(_get_nonop_delay_msecs, _set_nonop_delay_msecs,
doc="msecs to delay IO when non-optimized")
@classmethod
def setup(cls, storage_obj, alua_tpg, err_func):
name = alua_tpg['name']
if name == 'default_tg_pt_gp':
return
alua_tpg_obj = cls(storage_obj, name, alua_tpg['tg_pt_gp_id'])
for param, value in six.iteritems(alua_tpg):
if param != 'name' and param != 'tg_pt_gp_id':
try:
setattr(alua_tpg_obj, param, value)
except:
raise RTSLibError("Could not set attribute '%s' for alua tpg '%s'"
% (param, alua_tpg['name']))
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron.common import exceptions as n_exc
from neutron import context
from neutron import manager
from neutron.objects import base as base_object
from neutron.objects.qos import policy as policy_object
from neutron.objects.qos import rule as rule_object
from neutron.plugins.common import constants
from neutron.services.qos import qos_consts
from neutron.tests.unit.services.qos import base
DB_PLUGIN_KLASS = 'neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
class TestQosPlugin(base.BaseQosTestCase):
def setUp(self):
super(TestQosPlugin, self).setUp()
self.setup_coreplugin()
mock.patch('neutron.objects.db.api.create_object').start()
mock.patch('neutron.objects.db.api.update_object').start()
mock.patch('neutron.objects.db.api.delete_object').start()
mock.patch('neutron.objects.db.api.get_object').start()
mock.patch(
'neutron.objects.qos.policy.QosPolicy.obj_load_attr').start()
# We don't use real models as per mocks above. We also need to mock-out
# methods that work with real data types
mock.patch(
'neutron.objects.base.NeutronDbObject.modify_fields_from_db'
).start()
cfg.CONF.set_override("core_plugin", DB_PLUGIN_KLASS)
cfg.CONF.set_override("service_plugins", ["qos"])
mgr = manager.NeutronManager.get_instance()
self.qos_plugin = mgr.get_service_plugins().get(
constants.QOS)
self.qos_plugin.notification_driver_manager = mock.Mock()
self.ctxt = context.Context('fake_user', 'fake_tenant')
mock.patch.object(self.ctxt.session, 'refresh').start()
mock.patch.object(self.ctxt.session, 'expunge').start()
self.policy_data = {
'policy': {'id': uuidutils.generate_uuid(),
'tenant_id': uuidutils.generate_uuid(),
'name': 'test-policy',
'description': 'Test policy description',
'shared': True}}
self.rule_data = {
'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(),
'max_kbps': 100,
'max_burst_kbps': 150},
'dscp_marking_rule': {'id': uuidutils.generate_uuid(),
'dscp_mark': 16}}
self.policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
self.rule = rule_object.QosBandwidthLimitRule(
self.ctxt, **self.rule_data['bandwidth_limit_rule'])
self.dscp_rule = rule_object.QosDscpMarkingRule(
self.ctxt, **self.rule_data['dscp_marking_rule'])
def _validate_notif_driver_params(self, method_name):
method = getattr(self.qos_plugin.notification_driver_manager,
method_name)
self.assertTrue(method.called)
self.assertIsInstance(
method.call_args[0][1], policy_object.QosPolicy)
@mock.patch(
'neutron.objects.rbac_db.RbacNeutronDbObjectMixin'
'.create_rbac_policy')
def test_add_policy(self, *mocks):
self.qos_plugin.create_policy(self.ctxt, self.policy_data)
self._validate_notif_driver_params('create_policy')
@mock.patch(
'neutron.objects.rbac_db.RbacNeutronDbObjectMixin'
'.create_rbac_policy')
def test_update_policy(self, *mocks):
fields = base_object.get_updatable_fields(
policy_object.QosPolicy, self.policy_data['policy'])
self.qos_plugin.update_policy(
self.ctxt, self.policy.id, {'policy': fields})
self._validate_notif_driver_params('update_policy')
@mock.patch('neutron.objects.db.api.get_object', return_value=None)
def test_delete_policy(self, *mocks):
self.qos_plugin.delete_policy(self.ctxt, self.policy.id)
self._validate_notif_driver_params('delete_policy')
def test_create_policy_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
self.qos_plugin.create_policy_bandwidth_limit_rule(
self.ctxt, self.policy.id, self.rule_data)
self._validate_notif_driver_params('update_policy')
def test_update_policy_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.rule])
self.qos_plugin.update_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
self._validate_notif_driver_params('update_policy')
def test_update_policy_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
n_exc.QosRuleNotFound,
self.qos_plugin.update_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id,
self.rule_data)
def test_delete_policy_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.rule])
self.qos_plugin.delete_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, _policy.id)
self._validate_notif_driver_params('update_policy')
def test_delete_policy_rule_bad_policy(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [])
self.assertRaises(
n_exc.QosRuleNotFound,
self.qos_plugin.delete_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, _policy.id)
def test_get_policy_bandwidth_limit_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosBandwidthLimitRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_bandwidth_limit_rule(
self.ctxt, self.rule.id, self.policy.id)
get_object_mock.assert_called_once_with(self.ctxt,
id=self.rule.id)
def test_get_policy_bandwidth_limit_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosBandwidthLimitRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_bandwidth_limit_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_bandwidth_limit_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosBandwidthLimitRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_bandwidth_limit_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.get_policy,
self.ctxt, self.policy.id)
def test_get_policy_bandwidth_limit_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id)
def test_get_policy_bandwidth_limit_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_bandwidth_limit_rules,
self.ctxt, self.policy.id)
def test_create_policy_dscp_marking_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.dscp_rule])
self.qos_plugin.create_policy_dscp_marking_rule(
self.ctxt, self.policy.id, self.rule_data)
self._validate_notif_driver_params('update_policy')
def test_update_policy_dscp_marking_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.dscp_rule])
self.qos_plugin.update_policy_dscp_marking_rule(
self.ctxt, self.dscp_rule.id, self.policy.id, self.rule_data)
self._validate_notif_driver_params('update_policy')
def test_delete_policy_dscp_marking_rule(self):
_policy = policy_object.QosPolicy(
self.ctxt, **self.policy_data['policy'])
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=_policy):
setattr(_policy, "rules", [self.dscp_rule])
self.qos_plugin.delete_policy_dscp_marking_rule(
self.ctxt, self.dscp_rule.id, self.policy.id)
self._validate_notif_driver_params('update_policy')
def test_get_policy_dscp_marking_rules(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosDscpMarkingRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_dscp_marking_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_dscp_marking_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosDscpMarkingRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_dscp_marking_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, qos_policy_id=self.policy.id,
_pager=mock.ANY, filter='filter_id')
def test_get_policy_dscp_marking_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_dscp_marking_rule,
self.ctxt, self.dscp_rule.id, self.policy.id)
def test_get_policy_dscp_marking_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_dscp_marking_rules,
self.ctxt, self.policy.id)
def test_get_policy_minimum_bandwidth_rule(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumBandwidthRule.'
'get_object') as get_object_mock:
self.qos_plugin.get_policy_minimum_bandwidth_rule(
self.ctxt, self.rule.id, self.policy.id)
get_object_mock.assert_called_once_with(self.ctxt,
id=self.rule.id)
def test_get_policy_minimum_bandwidth_rules_for_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumBandwidthRule.'
'get_objects') as get_objects_mock:
self.qos_plugin.get_policy_minimum_bandwidth_rules(
self.ctxt, self.policy.id)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY, qos_policy_id=self.policy.id)
def test_get_policy_minimum_bandwidth_rules_for_policy_with_filters(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=self.policy):
with mock.patch('neutron.objects.qos.rule.'
'QosMinimumBandwidthRule.'
'get_objects') as get_objects_mock:
filters = {'filter': 'filter_id'}
self.qos_plugin.get_policy_minimum_bandwidth_rules(
self.ctxt, self.policy.id, filters=filters)
get_objects_mock.assert_called_once_with(
self.ctxt, _pager=mock.ANY,
qos_policy_id=self.policy.id,
filter='filter_id')
def test_get_policy_minimum_bandwidth_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_bandwidth_rule,
self.ctxt, self.rule.id, self.policy.id)
def test_get_policy_minimum_bandwidth_rules_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.get_policy_minimum_bandwidth_rules,
self.ctxt, self.policy.id)
def test_create_policy_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.create_policy_bandwidth_limit_rule,
self.ctxt, self.policy.id, self.rule_data)
def test_update_policy_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.update_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id, self.rule_data)
def test_delete_policy_rule_for_nonexistent_policy(self):
with mock.patch('neutron.objects.qos.policy.QosPolicy.get_object',
return_value=None):
self.assertRaises(
n_exc.QosPolicyNotFound,
self.qos_plugin.delete_policy_bandwidth_limit_rule,
self.ctxt, self.rule.id, self.policy.id)
def test_verify_bad_method_call(self):
self.assertRaises(AttributeError, getattr, self.qos_plugin,
'create_policy_bandwidth_limit_rules')
def test_get_rule_types(self):
core_plugin = manager.NeutronManager.get_plugin()
rule_types_mock = mock.PropertyMock(
return_value=qos_consts.VALID_RULE_TYPES)
filters = {'type': 'type_id'}
with mock.patch.object(core_plugin, 'supported_qos_rule_types',
new_callable=rule_types_mock,
create=True):
types = self.qos_plugin.get_rule_types(self.ctxt, filters=filters)
self.assertEqual(sorted(qos_consts.VALID_RULE_TYPES),
sorted(type_['type'] for type_ in types))
|
|
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from iptest.assert_util import *
from iptest.warning_util import warning_trapper
import sys
if not is_silverlight:
from iptest.process_util import *
year = 2005
month = 3
day = 16
hour = 14
minute = 53
second = 24
if 1900 < year < 2100 and 1 <= month <= 12 \
and 1 <= day <= 31 and 0 <= hour < 24 \
and 0 <= minute < 60 and 0 <= second < 60: # Looks like a valid date
pass
# Testing the (expr) support
x = 10
AreEqual(x, 10)
del x
try: y = x
except NameError: pass
else: Fail("x not deleted")
(x) = 20
AreEqual((x), 20)
del (x)
try: y = x
except NameError: pass
else: Fail("x not deleted")
# this is comment \
a=10
AreEqual(a, 10)
x = "Th\
\
e \
qu\
ick\
br\
ow\
\
n \
fo\
\
x\
ju\
mp\
s \
ove\
\
r \
th\
e l\
az\
\
y d\
og\
.\
\
\
\
12\
34\
567\
89\
0"
y="\
The\
q\
ui\
\
c\
k b\
\
r\
o\
w\
n\
\
fo\
x\
\
jum\
ps\
ov\
er \
t\
he\
la\
\
\
zy\
\
\
d\
og\
. 1\
2\
\
3\
\
\
\
\
4\
567\
\
8\
\
90\
"
AreEqual(x, y)
AreEqual("\101", "A")
x='\a\b\c\d\e\f\g\h\i\j\k\l\m\n\o\p\q\r\s\t\\u\v\w\y\z'
y='\\u0007\\u0008\\\\u0063\\\\u0064\\\\u0065\\u000C\\\\u0067\\\\u0068\\\\u0069\\\\u006a\\\\u006b\\\\u006c\\\\u006d\\u000A\\\\u006f\\\\u0070\\\\u0071\\u000D\\\\u0073\\u0009\\\\u0075\\u000B\\\\u0077\\\\u0079\\\\u007a'
Assert(x == y)
AreEqual(x, y)
for a,b in zip(x,y):
AreEqual(a,b)
Assert((10==20)==(20==10))
AreEqual(10==20, 20==10)
AreEqual(4e4-4, 4e4 - 4)
c = compile("071 + 1", "Error", "eval")
AssertError(SyntaxError, compile, "088 + 1", "Error", "eval")
AssertError(SyntaxError, compile, "099 + 1", "Error", "eval")
AssertError(SyntaxError, compile, """
try:
pass
""", "Error", "single")
AssertError(SyntaxError, compile, "x=10\ny=x.", "Error", "exec")
def run_compile_test(code, msg, lineno, skipCpy):
if skipCpy and not is_cli:
return
filename = "the file name"
try:
compile(code, filename, "exec")
except SyntaxError as e:
AreEqual(e.msg, msg)
AreEqual(e.lineno, lineno)
AreEqual(e.filename, filename)
else:
Assert(False, "Expected exception, got none")
if is_ironpython:
_yield_msg = "can't assign to yield expression"
else:
_yield_msg = "assignment to yield expression not possible"
compile_tests = [
("for x notin []:\n pass", "unexpected token 'notin'", 1, True),
("global 1", "unexpected token '1'", 1, True),
("x=10\nyield x\n", "'yield' outside function", 2, False),
("return\n", "'return' outside function", 1, False),
#("print >> 1 ,\n", "unexpected token '<eof>'", 1, False),
("def f(x=10, y):\n pass", "default value must be specified here", 1, True),
("def f(for):\n pass", "unexpected token 'for'", 1, True),
("f(3 = )", "expected name", 1, True),
("dict(a=1,a=2)", "duplicate keyword argument", 1, True),
("def f(a,a): pass", "duplicate argument 'a' in function definition", 1, False),
("def f((a,b),(c,b)): pass", "duplicate argument 'b' in function definition", 1, False),
("x = 10\nx = x[]", "unexpected token ']'", 2, True),
("break", "'break' outside loop", 1, False),
("if 1:\n\tbreak", "'break' outside loop", 2, False),
("if 1:\n\tx+y=22", "can't assign to operator", 2, False),
("if 1:\n\tdel f()", "can't delete function call", 2, False),
("def a(x):\n def b():\n print x\n del x", "can not delete variable 'x' referenced in nested scope", 2, True),
("if 1:\nfoo()\n", "expected an indented block", 2, False),
("'abc'.1", "invalid syntax", 1, True),
("'abc'.1L", "invalid syntax", 1, False),
("'abc'.1j", "invalid syntax", 1, True),
("'abc'.0xFFFF", "invalid syntax", 1, False),
("'abc' 1L", "invalid syntax", 1, True),
("'abc' 1.0", "invalid syntax", 1, True),
("'abc' 0j", "invalid syntax", 1, True),
("x = 'abc'\nx.1", "invalid syntax", 2, False),
("x = 'abc'\nx 1L", "invalid syntax", 2, False),
("x = 'abc'\nx 1.0", "invalid syntax", 2, False),
("x = 'abc'\nx 0j", "invalid syntax", 2, False),
('def f():\n del (yield 5)\n', "can't delete yield expression", 2, False),
('a,b,c += 1,2,3', "illegal expression for augmented assignment", 1, False),
('def f():\n a = yield 3 = yield 4', _yield_msg, 2, False),
('((yield a), 2,3) = (2,3,4)', "can't assign to yield expression", 1, False),
('(2,3) = (3,4)', "can't assign to literal", 1, False),
("def e():\n break", "'break' outside loop", 2, False),
("def g():\n for x in range(10):\n print x\n break\n", "'break' outside loop", 4, False),
("def g():\n for x in range(10):\n print x\n if True:\n break\n", "'break' outside loop", 5, False),
("def z():\n if True:\n break\n", "'break' outside loop", 3, False),
('from import abc', "invalid syntax", 1, False),
('() = 1', "can't assign to ()", 1, False),
("""for x in range(100):\n"""
""" try:\n"""
""" [1,2][3]\n"""
""" except IndexError:\n"""
""" pass\n"""
""" finally:\n"""
""" continue\n""", "'continue' not supported inside 'finally' clause", 7, False)
#CodePlex 15428
#("'abc'.", "invalid syntax", 1),
]
compile_tests.append(("None = 2", "cannot assign to None", 1, False))
# different error messages, ok
for test in compile_tests:
run_compile_test(*test)
AreEqual(float(repr(2.5)), 2.5)
AreEqual(eval("1, 2, 3,"), (1, 2, 3))
# eval validates end of input
AssertError(SyntaxError, compile, "1+2 1", "Error", "eval")
# empty test list in for expression
AssertError(SyntaxError, compile, "for x in : print x", "Error", "exec")
AssertError(SyntaxError, compile, "for x in : print x", "Error", "eval")
AssertError(SyntaxError, compile, "for x in : print x", "Error", "single")
# empty backquote
AssertError(SyntaxError, compile, "``", "Error", "exec")
AssertError(SyntaxError, compile, "``", "Error", "eval")
AssertError(SyntaxError, compile, "``", "Error", "single")
# empty assignment expressions
AssertError(SyntaxError, compile, "x = ", "Error", "exec")
AssertError(SyntaxError, compile, "x = ", "Error", "eval")
AssertError(SyntaxError, compile, "x = ", "Error", "single")
AssertError(SyntaxError, compile, "x = y = ", "Error", "exec")
AssertError(SyntaxError, compile, "x = y = ", "Error", "eval")
AssertError(SyntaxError, compile, "x = y = ", "Error", "single")
AssertError(SyntaxError, compile, " = ", "Error", "exec")
AssertError(SyntaxError, compile, " = ", "Error", "eval")
AssertError(SyntaxError, compile, " = ", "Error", "single")
AssertError(SyntaxError, compile, " = 4", "Error", "exec")
AssertError(SyntaxError, compile, " = 4", "Error", "eval")
AssertError(SyntaxError, compile, " = 4", "Error", "single")
AssertError(SyntaxError, compile, "x <= ", "Error", "exec")
AssertError(SyntaxError, compile, "x <= ", "Error", "eval")
AssertError(SyntaxError, compile, "x <= ", "Error", "single")
#indentation errors - BUG 864
AssertError(IndentationError, compile, "class C:\nx=2\n", "Error", "exec")
AssertError(IndentationError, compile, "class C:\n\n", "Error", "single")
#allow \f
compile('\f\f\f\f\fclass C:\f\f\f pass', 'ok', 'exec')
compile('\f\f\f\f\fclass C:\n\f\f\f print "hello"\n\f\f\f\f\f\f\f\f\f\f print "goodbye"', 'ok', 'exec')
compile('class C:\n\f\f\f print "hello"\n\f\f\f\f\f\f\f\f\f\f print "goodbye"', 'ok', 'exec')
compile('class \f\f\f\fC:\n\f print "hello"\n\f\f\f\f\f\f\f\f\f\f print "goodbye"', 'ok', 'exec')
# multiline expression passed to exec (positive test)
s = """
title = "The Cat"
Assert(title.istitle())
x = 2 + 5
AreEqual(x, 7)
"""
exec(s)
if is_cpython:
# this seems to be a CPython bug, Guido says:
# I usually append some extra newlines before passing a string to compile(). That's the usual work-around.
# There's probably a subtle bug in the tokenizer when reading from a string -- if you find it,
# please upload a patch to the tracker!
# http://mail.python.org/pipermail/python-dev/2009-May/089793.html
AssertError(SyntaxError, compile, "def f(a):\n\treturn a\n\t", "", "single")
AssertError(SyntaxError, compile, "def f(a):\n\treturn a\n\t", "", "single", 0x200)
# should work
s = "def f():\n\treturn 3"
compile(s, "<string>", "single")
AssertError(SyntaxError, compile, s, "<string>", "single", 0x200)
# Assignment to None and constant
def NoneAssign():
exec('None = 2')
def LiteralAssign():
exec("'2' = '3'")
AssertError(SyntaxError, NoneAssign)
AssertError(SyntaxError, LiteralAssign)
# beginning of the file handling
c = compile(" # some comment here \nprint 10", "", "exec")
c = compile(" \n# some comment\n \nprint 10", "", "exec")
AssertError(SyntaxError, compile, " x = 10\n\n", "", "exec")
AssertError(SyntaxError, compile, " \n #comment\n x = 10\n\n", "", "exec")
if sys.platform == 'cli':
c = compile("\\u0391 = 10\nif \\u0391 != 10: 1/0", "", "exec")
exec(c)
# from __future__ tests
AssertError(SyntaxError, compile, "def f():\n from __future__ import division", "", "exec")
AssertError(SyntaxError, compile, "'doc'\n'doc2'\nfrom __future__ import division", "", "exec")
# del x
AssertError(SyntaxError, compile, "def f():\n del x\n def g():\n return x\n", "", "exec")
AssertError(SyntaxError, compile, "def f():\n def g():\n return x\n del x\n", "", "exec")
AssertError(SyntaxError, compile, "def f():\n class g:\n def h(self):\n print x\n pass\n del x\n", "", "exec")
# add global to the picture
c = compile("def f():\n x=10\n del x\n def g():\n global x\n return x\n return g\nf()()\n", "", "exec")
AssertError(NameError, eval, c)
c = compile("def f():\n global x\n x=10\n del x\n def g():\n return x\n return g\nf()()\n", "", "exec")
AssertError(NameError, eval, c)
# global following definition test
# affected by bug# 1145
c = compile("def f():\n global a\n global a\n a = 1\n", "", "exec")
# unqualified exec in nested function
AssertError(SyntaxError, compile, "def f():\n x = 1\n def g():\n exec 'pass'\n print x", "", "exec")
# correct case - qualified exec in nested function
c = compile("def f():\n x = 10\n def g():\n exec 'pass' in {}\n print x\n", "", "exec")
# private names test
class C:
__x = 10
class ___:
__y = 20
class D:
__z = 30
AreEqual(C._C__x, 10)
AreEqual(C.___.__y, 20)
AreEqual(C.D._D__z, 30)
class B(object):
def method(self, __a):
return __a
AreEqual(B().method("__a passed in"), "__a passed in")
class B(object):
def method(self, xxx_todo_changeme):
(__a, ) = xxx_todo_changeme
return __a
AreEqual(B().method(("__a passed in", )), "__a passed in")
class B(object):
def __f(self):
pass
Assert('_B__f' in dir(B))
class B(object):
class __C(object): pass
Assert('_B__C' in dir(B))
class B(object):
x = lambda self, __a : __a
AreEqual(B.x(B(), _B__a='value'), 'value')
#Hit negative case of 'sublist' in http://www.python.org/doc/2.5.1/ref/grammar.txt.
AssertError(SyntaxError, compile, "def f((1)): pass", "", "exec")
#
# Make sure that augmented assignment also binds in the given scope
#
augassign_code = """
x = 10
def f():
x %s 10
f()
"""
def test_augassign_binding():
for op in ["+=", "-=", "**=", "*=", "//=", "/=", "%=", "<<=", ">>=", "&=", "|=", "^="]:
code = augassign_code % op
try:
exec(code, {}, {})
except:
pass
else:
Assert(False, "augassign binding test didn't raise exception")
return True
Assert(test_augassign_binding())
# tests for multiline compound statements
class MyException(Exception): pass
def test_multiline_compound_stmts():
tests = [
"if False: print 'In IF'\nelse: x = 2; raise MyException('expected')",
"if False: print 'In IF'\nelif True: x = 2;raise MyException('expected')\nelse: print 'In ELSE'",
"for i in (1,2): x = i\nelse: x = 5; raise MyException('expected')",
"while 5 in (1,2): print i\nelse:x = 2;raise MyException('expected')",
"try: x = 2\nexcept: print 'In EXCEPT'\nelse: x=20;raise MyException('expected')",
]
for test in tests:
try:
c = compile(test,"","exec")
exec(c)
except MyException:
pass
else:
Assert(False, "multiline_compound stmt test did not raise exception. test = " + test)
test_multiline_compound_stmts()
# Generators cannot have return statements with values in them. SyntaxError is thrown in those cases.
def test_generator_with_nonempty_return():
tests = [
"def f():\n return 42\n yield 3",
"def f():\n yield 42\n return 3",
"def f():\n yield 42\n return None",
"def f():\n if True:\n return 42\n yield 42",
"def f():\n try:\n return 42\n finally:\n yield 23"
]
for test in tests:
#Merlin 148614 - Change it to AssertErrorWithMessage once bug is fixed.
AssertErrorWithPartialMessage(SyntaxError, "'return' with argument inside generator", compile, test, "", "exec")
#Verify that when there is no return value error is not thrown.
def f():
yield 42
return
test_generator_with_nonempty_return()
# compile function which returns from finally, but does not yield from finally.
c = compile("def f():\n try:\n pass\n finally:\n return 1", "", "exec")
def ret_from_finally():
try:
pass
finally:
return 1
return 2
AreEqual(ret_from_finally(), 1)
def ret_from_finally2(x):
if x:
try:
pass
finally:
return 1
else:
return 2
AreEqual(ret_from_finally2(True), 1)
AreEqual(ret_from_finally2(False), 2)
def ret_from_finally_x(x):
try:
1/0
finally:
return x
AreEqual(ret_from_finally_x("Hi"), "Hi")
def ret_from_finally_x2():
try:
1/0
finally:
raise AssertionError("This one")
try:
ret_from_finally_x2()
except AssertionError as e:
AreEqual(e.args[0], "This one")
else:
Fail("Expected AssertionError, got none")
try:
pass
finally:
pass
# The try block can only have one default except clause, and it must be last
try_syntax_error_tests = [
"""
try:
pass
except:
pass
except Exception, e:
pass
""",
"""
try:
pass
except Exception, e:
pass
except:
pass
except:
pass
""",
"""
try:
pass
except:
pass
except:
pass
"""
]
for code in try_syntax_error_tests:
AssertError(SyntaxError, compile, code, "code", "exec")
def test_break_in_else_clause():
def f():
exec ('''
while i >= 0:
pass
else:
break''')
AssertError(SyntaxError, f)
#Just make sure these don't throw
print("^L")
temp = 7
print(temp)
print("No ^L's...")
# keep this at the end of the file, do not insert anything below this line
def endoffile():
return "Hi" # and some comment here
def test_syntaxerror_text():
method_missing_colon = (" def MethodTwo(self)\n", """
class HasASyntaxException:
def MethodOne(self):
print 'hello'
print 'world'
print 'again'
def MethodTwo(self)
print 'world'""")
if is_cpython: #http://ironpython.codeplex.com/workitem/28380
function_missing_colon1 = ("def f()\n", "def f()")
else:
function_missing_colon1 = ("def f()", "def f()")
function_missing_colon2 = ("def f()\n", "def f()\n")
if is_cpython: #http://ironpython.codeplex.com/workitem/28380
function_missing_colon3 = ("def f()\n", "def f()\r\n")
function_missing_colon4 = ("def f()\n", "def f()\r")
else:
function_missing_colon3 = ("def f()\r\n", "def f()\r\n")
function_missing_colon4 = ("def f()\r", "def f()\r")
function_missing_colon2a = ("def f()\n", "print 1\ndef f()\nprint 3")
if is_cpython: #http://ironpython.codeplex.com/workitem/28380
function_missing_colon3a = ("def f()\n", "print 1\ndef f()\r\nprint 3")
function_missing_colon4a = ("def f()\n", "print 1\ndef f()\rprint 3")
else:
function_missing_colon3a = ("def f()\r\n", "print 1\ndef f()\r\nprint 3")
function_missing_colon4a = ("def f()\rprint 3", "print 1\ndef f()\rprint 3")
tests = (
method_missing_colon,
#function_missing_body,
function_missing_colon1,
function_missing_colon2,
function_missing_colon3,
function_missing_colon4,
function_missing_colon2a,
function_missing_colon3a,
function_missing_colon4a,
)
for expectedText, testCase in tests:
try:
exec(testCase)
except SyntaxError as e:
AreEqual(e.text, expectedText)
def test_error_parameters():
tests = [#("if 1:", 0x200, ('unexpected EOF while parsing', ('dummy', 1, 5, 'if 1:')) ),
("if 1:\n", 0x200, ('unexpected EOF while parsing', ('dummy', 1, 6, 'if 1:\n')) ),
#("if 1:", 0x000, ('unexpected EOF while parsing', ('dummy', 1, 5, 'if 1:')) ),
("if 1:\n", 0x000, ('unexpected EOF while parsing', ('dummy', 1, 6, 'if 1:\n')) ),
("if 1:\n\n", 0x200, ('expected an indented block', ('dummy', 2, 1, '\n')) ),
("if 1:\n\n", 0x000, ('expected an indented block', ('dummy', 2, 1, '\n')) ),
#("if 1:\n if 1:", 0x200, ('expected an indented block', ('dummy', 2, 7, ' if 1:')) ),
("if 1:\n if 1:\n", 0x200, ('expected an indented block', ('dummy', 2, 8, ' if 1:\n')) ),
#("if 1:\n if 1:", 0x000, ('expected an indented block', ('dummy', 2, 7, ' if 1:')) ),
("if 1:\n if 1:\n", 0x000, ('expected an indented block', ('dummy', 2, 8, ' if 1:\n')) ),
("if 1:\n if 1:\n\n", 0x200, ('expected an indented block', ('dummy', 3, 1, '\n')) ),
("if 1:\n if 1:\n\n", 0x000, ('expected an indented block', ('dummy', 3, 1, '\n')) ),
("class MyClass(object):\n\tabc = 42\n\tdef __new__(cls):\n", 0x200, ('expected an indented block', ('dummy', 3, 19, '\tdef __new__(cls):\n')) ),
("class MyClass(object):\n\tabc = 42\n\tdef __new__(cls):\n", 0x000, ('expected an indented block', ('dummy', 3, 19, '\tdef __new__(cls):\n')) ),
("def Foo():\n\n # comment\n\n Something = -1\n\n\n\n ", 0x000, ('unindent does not match any outer indentation level', ('dummy', 9, 2, ' '))),
("def Foo():\n\n # comment\n\n Something = -1\n\n\n\n ", 0x200, ('unindent does not match any outer indentation level', ('dummy', 9, 2, ' '))),
("def Foo():\n\n # comment\n\n Something = -1\n\n\n\n ", 0x000, ('unindent does not match any outer indentation level', ('dummy', 9, 3, ' '))),
("def Foo():\n\n # comment\n\n Something = -1\n\n\n\n ", 0x200, ('unindent does not match any outer indentation level', ('dummy', 9, 3, ' '))),
]
for input, flags, res in tests:
#print repr(input), flags
try:
code3 = compile(input, "dummy", "single", flags, 1)
AssertUnreachable()
except SyntaxError as err:
AreEqual(err.args, res)
try:
exec("""
def f():
x = 3
y = 5""")
AssertUnreachable()
except IndentationError as e:
AreEqual(e.lineno, 2)
@skip("win32", "silverlight") # no encoding.Default
def test_parser_recovery():
# bunch of test infrastructure...
import clr
clr.AddReference('IronPython')
clr.AddReference('Microsoft.Scripting')
clr.AddReference('Microsoft.Dynamic')
from Microsoft.Scripting import (
TextContentProvider, SourceCodeKind, SourceUnit, ErrorSink,
SourceCodeReader
)
from Microsoft.Scripting.Runtime import CompilerContext
from IronPython import PythonOptions
from IronPython.Compiler import Parser, Tokenizer, PythonCompilerOptions, Ast
from System.IO import StringReader
from System.Text import Encoding
class MyErrorSink(ErrorSink):
def __init__(self):
self.Errors = []
def Add(self, *args):
if type(args[0]) is str:
self.AddWithPath(*args)
else:
self.AddWithSourceUnit(*args)
def AddWithPath(self, message, path, code, line, span, error, severity):
err = (
message,
path,
span,
error
)
self.Errors.append(err)
def AddWithSourceUnit(self, source, message, span, errorCode, severity):
err = (
message,
source.Path,
span,
errorCode
)
self.Errors.append(err)
class MyTextContentProvider(TextContentProvider):
def __init__(self, text):
self.text = text
def GetReader(self):
return SourceCodeReader(StringReader(self.text), Encoding.Default)
def parse_text(text):
errorSink = MyErrorSink()
sourceUnit = SourceUnit(
clr.GetCurrentRuntime().GetLanguageByName('python'),
MyTextContentProvider(text),
'foo',
SourceCodeKind.File
)
parser = Parser.CreateParser(
CompilerContext(sourceUnit, PythonCompilerOptions(), errorSink),
PythonOptions()
)
parser.ParseFile(True)
return errorSink
def TestErrors(text, errors):
res = parse_text(text)
AreEqual(len(res.Errors), len(errors))
for curErr, expectedMsg in zip(res.Errors, errors):
AreEqual(curErr[0], expectedMsg)
def PrintErrors(text):
"""helper for creating new tests"""
errors = parse_text(text)
print()
for err in errors.Errors:
print(err)
TestErrors("""class
def x(self):
pass""", ["unexpected token '<newline>'"])
TestErrors("""class x
def x(self):
pass
""", ["unexpected token '<newline>'"])
TestErrors("""class x(
def x(self):
pass""", ["unexpected token 'def'"])
TestErrors("""class X:
if x:
def x(): pass""", ['expected an indented block'])
TestErrors("""class X:
if x is None:
x =
def x(self): pass""", ["unexpected token '<newline>'"])
TestErrors("""class X:
def f(
def g(self): pass""", ["unexpected token 'def'"])
TestErrors("""class X:
def f(*
def g(self): pass""", ["unexpected token 'def'"])
TestErrors("""class X:
def f(**
def g(self): pass""", ["unexpected token 'def'"])
TestErrors("""class X:
def f(*a, **
def g(self): pass""", ["unexpected token 'def'"])
TestErrors("""f() += 1""", ["illegal expression for augmented assignment"])
def test_syntax_warnings():
# syntax error warnings are outputted using warnings.showwarning. our own warning trapper therefore
# doesn't see them. So we trap stderr here instead. We could use CPython's warning trapper if we
# checked for the presence of the stdlib.
with stderr_trapper() as trapper:
compile("def f():\n a = 1\n global a\n", "", "exec")
AreEqual(trapper.messages, [":3: SyntaxWarning: name 'a' is assigned to before global declaration"])
with stderr_trapper() as trapper:
compile("def f():\n def a(): pass\n global a\n", "", "exec")
AreEqual(trapper.messages, [":3: SyntaxWarning: name 'a' is assigned to before global declaration"])
with stderr_trapper() as trapper:
compile("def f():\n for a in []: pass\n global a\n", "", "exec")
AreEqual(trapper.messages, [":3: SyntaxWarning: name 'a' is assigned to before global declaration"])
with stderr_trapper() as trapper:
compile("def f():\n global a\n a = 1\n global a\n", "", "exec")
AreEqual(trapper.messages, [":4: SyntaxWarning: name 'a' is assigned to before global declaration"])
with stderr_trapper() as trapper:
compile("def f():\n print a\n global a\n", "", "exec")
AreEqual(trapper.messages, [":3: SyntaxWarning: name 'a' is used prior to global declaration"])
with stderr_trapper() as trapper:
compile("def f():\n a = 1\n global a\n global a\n a = 1", "", "exec")
AreEqual(trapper.messages,
[":3: SyntaxWarning: name 'a' is assigned to before global declaration",
":4: SyntaxWarning: name 'a' is assigned to before global declaration"])
with stderr_trapper() as trapper:
compile("x = 10\nglobal x\n", "", "exec")
AreEqual(trapper.messages, [":2: SyntaxWarning: name 'x' is assigned to before global declaration"])
#--MAIN------------------------------------------------------------------------
run_test(__name__)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
try:
import simplejson as json
except Exception:
import json
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
from libcloud.common.types import InvalidCredsError
from libcloud.container.base import (Container, ContainerDriver,
ContainerImage, ContainerCluster)
from libcloud.container.providers import Provider
from libcloud.container.types import ContainerState
VALID_RESPONSE_CODES = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
ROOT_URL = '/api/'
class KubernetesResponse(JsonResponse):
valid_response_codes = [httplib.OK, httplib.ACCEPTED, httplib.CREATED,
httplib.NO_CONTENT]
def parse_error(self):
if self.status == 401:
raise InvalidCredsError('Invalid credentials')
return self.body
def success(self):
return self.status in self.valid_response_codes
class KubernetesException(Exception):
def __init__(self, code, message):
self.code = code
self.message = message
self.args = (code, message)
def __str__(self):
return "%s %s" % (self.code, self.message)
def __repr__(self):
return "KubernetesException %s %s" % (self.code, self.message)
class KubernetesConnection(ConnectionUserAndKey):
responseCls = KubernetesResponse
timeout = 60
def add_default_headers(self, headers):
"""
Add parameters that are necessary for every request
If user and password are specified, include a base http auth
header
"""
headers['Content-Type'] = 'application/json'
if self.key and self.secret:
user_b64 = base64.b64encode(b('%s:%s' % (self.key, self.secret)))
headers['Authorization'] = 'Basic %s' % (user_b64.decode('utf-8'))
return headers
class KubernetesPod(object):
def __init__(self, name, containers, namespace):
"""
A Kubernetes pod
"""
self.name = name
self.containers = containers
self.namespace = namespace
class KubernetesContainerDriver(ContainerDriver):
type = Provider.KUBERNETES
name = 'Kubernetes'
website = 'http://kubernetes.io'
connectionCls = KubernetesConnection
supports_clusters = True
def __init__(self, key=None, secret=None, secure=False, host='localhost',
port=4243):
"""
:param key: API key or username to used (required)
:type key: ``str``
:param secret: Secret password to be used (required)
:type secret: ``str``
:param secure: Whether to use HTTPS or HTTP. Note: Some providers
only support HTTPS, and it is on by default.
:type secure: ``bool``
:param host: Override hostname used for connections.
:type host: ``str``
:param port: Override port used for connections.
:type port: ``int``
:return: ``None``
"""
super(KubernetesContainerDriver, self).__init__(key=key, secret=secret,
secure=secure,
host=host,
port=port)
if host is not None:
if host.startswith('https://'):
secure = True
# strip the prefix
prefixes = ['http://', 'https://']
for prefix in prefixes:
if host.startswith(prefix):
host = host.strip(prefix)
self.connection.host = host
self.connection.port = port
self.connection.secure = secure
self.connection.key = key
self.connection.secret = secret
def list_containers(self, image=None, all=True):
"""
List the deployed container images
:param image: Filter to containers with a certain image
:type image: :class:`libcloud.container.base.ContainerImage`
:param all: Show all container (including stopped ones)
:type all: ``bool``
:rtype: ``list`` of :class:`libcloud.container.base.Container`
"""
try:
result = self.connection.request(
ROOT_URL + "v1/pods").object
except Exception as exc:
errno = getattr(exc, 'errno', None)
if errno == 111:
raise KubernetesException(
errno,
'Make sure kube host is accessible'
'and the API port is correct')
raise
pods = [self._to_pod(value) for value in result['items']]
containers = []
for pod in pods:
containers.extend(pod.containers)
return containers
def get_container(self, id):
"""
Get a container by ID
:param id: The ID of the container to get
:type id: ``str``
:rtype: :class:`libcloud.container.base.Container`
"""
containers = self.list_containers()
match = [container for container in containers if container.id == id]
return match[0]
def list_clusters(self):
"""
Get a list of namespaces that pods can be deployed into
:param location: The location to search in
:type location: :class:`libcloud.container.base.ClusterLocation`
:rtype: ``list`` of :class:`libcloud.container.base.ContainerCluster`
"""
try:
result = self.connection.request(
ROOT_URL + "v1/namespaces/").object
except Exception as exc:
errno = getattr(exc, 'errno', None)
if errno == 111:
raise KubernetesException(
errno,
'Make sure kube host is accessible'
'and the API port is correct')
raise
clusters = [self._to_cluster(value) for value in result['items']]
return clusters
def get_cluster(self, id):
"""
Get a cluster by ID
:param id: The ID of the cluster to get
:type id: ``str``
:rtype: :class:`libcloud.container.base.ContainerCluster`
"""
result = self.connection.request(ROOT_URL + "v1/namespaces/%s" %
id).object
return self._to_cluster(result)
def destroy_cluster(self, cluster):
"""
Delete a cluster (namespace)
:return: ``True`` if the destroy was successful, otherwise ``False``.
:rtype: ``bool``
"""
self.connection.request(ROOT_URL + "v1/namespaces/%s" %
cluster.id, method='DELETE').object
return True
def create_cluster(self, name, location=None):
"""
Create a container cluster (a namespace)
:param name: The name of the cluster
:type name: ``str``
:param location: The location to create the cluster in
:type location: :class:`.ClusterLocation`
:rtype: :class:`.ContainerCluster`
"""
request = {
'metadata': {
'name': name
}
}
result = self.connection.request(ROOT_URL + "v1/namespaces",
method='POST',
data=json.dumps(request)).object
return self._to_cluster(result)
def deploy_container(self, name, image, cluster=None,
parameters=None, start=True):
"""
Deploy an installed container image.
In kubernetes this deploys a single container Pod.
https://cloud.google.com/container-engine/docs/pods/single-container
:param name: The name of the new container
:type name: ``str``
:param image: The container image to deploy
:type image: :class:`.ContainerImage`
:param cluster: The cluster to deploy to, None is default
:type cluster: :class:`.ContainerCluster`
:param parameters: Container Image parameters
:type parameters: ``str``
:param start: Start the container on deployment
:type start: ``bool``
:rtype: :class:`.Container`
"""
if cluster is None:
namespace = 'default'
else:
namespace = cluster.id
request = {
"metadata": {
"name": name
},
"spec": {
"containers": [
{
"name": name,
"image": image.name
}
]
}
}
result = self.connection.request(ROOT_URL + "v1/namespaces/%s/pods"
% namespace,
method='POST',
data=json.dumps(request)).object
return self._to_cluster(result)
def destroy_container(self, container):
"""
Destroy a deployed container. Because the containers are single
container pods, this will delete the pod.
:param container: The container to destroy
:type container: :class:`.Container`
:rtype: ``bool``
"""
return self.ex_destroy_pod(container.extra['namespace'],
container.extra['pod'])
def ex_list_pods(self):
"""
List available Pods
:rtype: ``list`` of :class:`.KubernetesPod`
"""
result = self.connection.request(ROOT_URL + "v1/pods").object
return [self._to_pod(value) for value in result['items']]
def ex_destroy_pod(self, namespace, pod_name):
"""
Delete a pod and the containers within it.
"""
self.connection.request(
ROOT_URL + "v1/namespaces/%s/pods/%s" % (
namespace, pod_name),
method='DELETE').object
return True
def _to_pod(self, data):
"""
Convert an API response to a Pod object
"""
container_statuses = data['status']['containerStatuses']
containers = []
# response contains the status of the containers in a separate field
for container in data['spec']['containers']:
spec = list(filter(lambda i: i['name'] == container['name'],
container_statuses))[0]
containers.append(
self._to_container(container, spec, data)
)
return KubernetesPod(
name=data['metadata']['name'],
namespace=data['metadata']['namespace'],
containers=containers)
def _to_container(self, data, container_status, pod_data):
"""
Convert container in Container instances
"""
return Container(
id=container_status['containerID'],
name=data['name'],
image=ContainerImage(
id=container_status['imageID'],
name=data['image'],
path=None,
version=None,
driver=self.connection.driver),
ip_addresses=None,
state=ContainerState.RUNNING,
driver=self.connection.driver,
extra={
'pod': pod_data['metadata']['name'],
'namespace': pod_data['metadata']['namespace']
})
def _to_cluster(self, data):
"""
Convert namespace to a cluster
"""
metadata = data['metadata']
status = data['status']
return ContainerCluster(
id=metadata['name'],
name=metadata['name'],
driver=self.connection.driver,
extra={'phase': status['phase']})
def ts_to_str(timestamp):
"""
Return a timestamp as a nicely formated datetime string.
"""
date = datetime.datetime.fromtimestamp(timestamp)
date_string = date.strftime("%d/%m/%Y %H:%M %Z")
return date_string
|
|
from __future__ import absolute_import, unicode_literals
import idna
from .tasks import (
check_domain,
check_bulk_domain,
create_registrant,
create_registry_contact,
update_domain_registrant,
update_domain_registry_contact,
create_domain,
connect_domain,
check_host,
create_host,
connect_host,
update_domain,
local_update_domain,
init_update_domain,
)
from application.settings import get_logzio_sender
from .models import (
AccountDetail,
DefaultAccountTemplate,
DefaultAccountContact,
Registrant,
Contact,
)
import logging
log = logging.getLogger(__name__)
class Workflow(object):
"""
Steps needed for registry operations.
"""
def __init__(self):
"""
Initialise workflow object.
"""
self.workflow = []
self.registry = None
def append(self, callback):
"""
Append a callback to the internal workflow
:callback: function
"""
self.workflow.append(callback)
def check_domains(self, fqdn_list):
"""
Create chained workflows for set of domains.
:fqdn_list: list of fqdns
:returns: chain object for celery
"""
return check_bulk_domain.si(fqdn_list)
def fetch_registrant(self, data, user):
"""
Return account_detail for either the default registrant or
whatever user specified.
:user: request user
:data: request data
:returns: int id of registrant
"""
if "registrant" in data:
return data["registrant"]
default_registrant_set = AccountDetail.objects.filter(
default_registrant=True,
user=user
)
if default_registrant_set.exists():
return default_registrant_set.first().id
default_registrant = DefaultAccountTemplate.objects.get(
provider__slug=self.registry,
user=user
)
return default_registrant.account_template.id
def create_registrant_workflow(self, epp, data, user):
"""
Return an active registrant if one exists.
:account_detail: account detail id
:registry_id: str representing registry id
:returns:
"""
account_detail_id = self.fetch_registrant(data, user)
by_registry = Registrant.objects.filter(
provider__slug=self.registry,
user=user.id
)
if by_registry.exists():
# Check to see if the registrant submitted is actually
# the registry id. It will need to match with the user
# and registry.
registrant_set = by_registry.filter(
registry_id=account_detail_id,
)
if not registrant_set.exists():
registrant_set = by_registry.filter(
account_template=account_detail_id
)
if registrant_set.exists():
existing_registrant = registrant_set.first()
epp["registrant"] = existing_registrant.registry_id
return epp
self.append(
create_registrant.si(
epp,
person_id=account_detail_id,
registry=self.registry,
user=user.id
)
)
return None
def append_contacts_to_epp(self, epp, contacts):
"""
Append DefaultAccountDetail object to create contact workflow.
:template_id: int id of an AccountDetail object
"""
epp_contacts = epp.get("contact", [])
for contact in contacts:
log.info("Adding contact {!r}".format(contact))
epp_contacts.append(contact)
epp["contact"] = epp_contacts
def append_contact_workflow(self, epp, contacts, user_id):
"""
Append item to create contact workflow.
:epp: dict with epp data
:contact: Contact object
"""
existing_contacts = []
by_registry = Contact.objects.filter(
provider__slug=self.registry,
)
for contact in contacts:
(contact_type, person_id), = contact.items()
log.info("Checking if %s contact exists for %s" % (contact_type,
person_id))
# Check if user sent registry_id as contact id field.
by_registry_id = by_registry.filter(registry_id=person_id).distinct()
by_account_template = by_registry.filter(
account_template__id=person_id
).distinct()
if by_registry_id.exists():
contact_obj = by_registry_id.get(registry_id=person_id)
contact_dict = {contact_type: contact_obj.registry_id}
existing_contacts.append(contact_dict)
elif by_account_template.exists():
# Otherwise assume it was an account_id
contact_obj = by_account_template.first()
contact_dict = {contact_type: contact_obj.registry_id}
existing_contacts.append(contact_dict)
else:
log.info(
"Adding workflow to create %s contact: %s" % (contact_type,
person_id)
)
if len(self.workflow) == 1:
self.append(
create_registry_contact.si(
epp,
person_id=person_id,
registry=self.registry,
contact_type=contact_type,
user=user_id
)
)
else:
self.append(
create_registry_contact.s(
person_id=person_id,
registry=self.registry,
contact_type=contact_type,
user=user_id
)
)
if len(existing_contacts) > 0:
self.append_contacts_to_epp(epp, existing_contacts)
def create_contact_workflow(self, epp, data, user):
"""
Append create contact commands to workflow. Preferentially append
mandatory default contacts if there are any, followed by contacts
sent in create domain request and finally default non-mandatory
contacts.
:data: EPP datastructure
"""
default_contacts = DefaultAccountContact.objects.filter(
provider__slug=self.registry
)
mandatory_contacts = default_contacts.filter(mandatory=True)
contacts = []
if mandatory_contacts.exists():
log.info("Using mandatory contacts for %s registration" % self.registry)
contacts = [{i.contact_type.name: i.account_template.id}
for i in mandatory_contacts]
elif "contacts" in data:
log.info("Using submitted contacts for %s registration" % self.registry)
contacts = data["contacts"]
elif default_contacts.exists():
log.info("Using default contacts for %s registration" % self.registry)
contacts = [{i.contact_type.name: i.account_template.id}
for i in default_contacts]
self.append_contact_workflow(epp, contacts, user.id)
def create_domain(self, data, user):
"""
Set up workflow for creating a domain
:data: dict for an epp create domain request
:returns: chain object for celery
"""
epp = {
"name": data["domain"]
}
if "nameservers" in data:
epp["ns"] = data["nameservers"]
if "period" in data:
epp["period"] = data["period"]
self.append(check_domain.s(data["domain"]))
self.create_registrant_workflow(epp, data, user)
self.create_contact_workflow(epp, data, user)
if len(self.workflow) == 1:
self.append(create_domain.si(epp, self.registry))
else:
self.append(create_domain.s(self.registry))
self.append(connect_domain.s(self.registry))
return self.workflow
def check_add_contacts(self, contact_set, current_contacts, epp, user):
"""
Check set of contacts attached to domain to see what is being added.
:contact_set: Set of contacts in request
:current_contacts: contacts currently attached to domain
"""
for contact in contact_set:
self.process_add_contact(contact, current_contacts, epp, user)
def check_remove_contacts(self, contact_set, current_contacts, epp, user):
"""
Check set of contacts attached to domain to see what is being removed.
:contact_set: list of contacts to evaluate
:current_contacts: Query Set object (django db)
:epp: dict with raw EPP structure
:user: id of user
"""
for contact in current_contacts.all():
registry_id = contact.contact.registry_id
account_template_id = contact.contact.account_template.id
contact_type = contact.contact_type.name
check_acct_id = {contact_type: account_template_id}
is_acct_id = check_acct_id in contact_set
check_registry_id = {contact_type: registry_id}
is_reg_id = check_registry_id in contact_set
if not any([is_acct_id, is_reg_id]):
rem = epp.get("rem", {})
contact = rem.get("contact", [])
contact.append(check_registry_id)
rem["contact"] = contact
epp["rem"] = rem
def check_update_domain_contacts(self, contact_set, epp, domain, user):
"""
Compare submitted contacts with current set attached to domain.
:contact_set: list of dict objects with new contact handlers
:epp: dict EPP request structure
:domain: RegisteredDomain object
:user: HTTP request user
"""
current_contacts = domain.contacts.filter(
active=True
)
self.check_add_contacts(contact_set, current_contacts, epp, user)
self.check_remove_contacts(contact_set, current_contacts, epp, user)
def _is_account_detail(self, test_id, user):
"""
Test whether or not an id is an account detail
:test_id: str id
:returns: AccountDetail object
"""
# Intentionally causes an error if the contact_id isn't an integer.
account_template_id = int(test_id)
# Cause an error if the submitted account template id
# does not belong to requesting user.
return AccountDetail.objects.get(
pk=account_template_id,
user=user
)
def process_add_contact(self, contact, current_contacts, epp, user):
"""
Prepare to create a new contact to be added
"""
(contact_type, contact_id), = contact.items()
# TODO: this code might introduce a loophole around "mandatory"
# contacts.
try:
account_template = self._is_account_detail(contact_id, user)
contact_in_set = current_contacts.filter(
contact_type__name=contact_type,
contact__account_template=account_template,
)
if not contact_in_set.exists():
# No instance of {<contact_type>: <account_template_id>}
# exists. We must create a new contact to update this domain.
self.append(
update_domain_registry_contact.s(
person_id=account_template.id,
registry=self.registry,
contact_type=contact_type,
user=user.id
)
)
return
except AccountDetail.DoesNotExist as e:
log.error(str(e))
return
except ValueError:
log.warning(
"%s not an account detail id; checking registryid" % contact_id
)
log.debug("Checking for contact " + contact_id)
add = epp.get("add", {})
contacts = add.get("contact", [])
query_set = Contact.objects.filter(
provider__slug=self.registry,
)
if not user.groups.filter(name='admin').exists():
query_set = query_set.filter(user=user)
contact_query_set = query_set.filter(registry_id=contact_id)
# This contact exists
if contact_query_set.exists():
# Contact is not already a current contact
current_contact_instance = current_contacts.filter(
contact__registry_id=contact_id,
contact_type__name=contact_type
)
if not current_contact_instance.exists():
new_contact = {}
new_contact[contact_type] = contact_id
contacts.append(new_contact)
add["contact"] = contacts
epp["add"] = add
return
log.warning(
"Contact %s already exists for this domain" % contact_id
)
return
log.warning("Contact %s does not exist for user " % user.id)
return
def check_update_domain_registrant(self, new_registrant, epp, domain, user):
"""
Given a domain and registrant id or template id, figure out if we're
creating a new registrant or reusing an existing one.
:new_registrant: string or int representing an account_detail or
registrant
:epp: Datastructure of EPP command
:domain: registered domain object
:user: User object from http request
"""
current_registrant = domain.registrant
current_account_detail = current_registrant.account_template.id
try:
account_detail = self._is_account_detail(new_registrant, user)
if current_account_detail != account_detail.id:
# Add create contact command to workflow
self.append(
update_domain_registrant.s(
person_id=account_detail.id,
registry=self.registry,
user=user.id
)
)
return
except AccountDetail.DoesNotExist:
log.warning("Account detail does not exist for this user")
get_logzio_sender().append(
{
"message": "Account detail does not exist for this user",
"registrant": new_registrant,
"user": user,
"domain": str(domain)
}
)
except ValueError:
log.warning(
"%s not an account detail id;"
"checking registryid" % new_registrant
)
# Look for an existing registrant that belongs to this request user.
# This assumes that the new registrant id is a "registry id".
query_set = Registrant.objects.all()
if not user.groups.filter(name='admin').exists():
query_set = Registrant.objects.filter(user=user)
registrant_query_set = query_set.filter(
registry_id=new_registrant
)
# Selected registrant exists and belongs to request.user
if registrant_query_set.exists():
# Should throw an error if new registrant does not belong to user.
found_registrant = registrant_query_set.get(
registry_id=new_registrant
)
# The two are not equal so make change
if current_registrant != found_registrant:
if "chg" not in epp:
epp["chg"] = {}
epp["chg"]["registrant"] = new_registrant
def check_update_domain_nameservers(self, ns, epp, domain, user):
"""
Given a domain and a set of nameservers, determine if some are to be
added or removed.
:ns: list of nameservers
:epp: dict EPP data structure
:domain: RegisteredDomain object
:user: HTTP Request user
"""
current_nameservers = domain.nameservers
if current_nameservers is None:
current_nameservers = []
for ns_host in ns:
idn = idna.encode(ns_host, uts46=True).decode('ascii')
if idn not in current_nameservers:
add = epp.get("add", {})
add_ns = add.get("ns", [])
add_ns.append(idn)
add["ns"] = add_ns
epp["add"] = add
else:
log.debug("%s is a current nameserver" % idn)
for ns_host in current_nameservers:
idn = idna.encode(ns_host, uts46=True).decode('ascii')
idn_included = idn in ns
host_included = ns_host in ns
if not any([idn_included, host_included]):
rem = epp.get("rem", {})
rem_ns = rem.get("ns", [])
rem_ns.append(idn)
rem["ns"] = rem_ns
epp["rem"] = rem
else:
log.debug("Not removing %s" % idn)
def update_domain(self, data, domain, user):
"""
Set up workflow for updating a domain
:data: dict for an epp update domain request
:domain: registered domain object
:returns: response
"""
epp = {
"name": data["domain"]
}
if "registrant" in data:
self.check_update_domain_registrant(data["registrant"],
epp,
domain,
user)
if "contacts" in data:
self.check_update_domain_contacts(data["contacts"],
epp,
domain,
user)
if "nameservers" in data:
self.check_update_domain_nameservers(data["nameservers"],
epp,
domain,
user)
if "status" in data:
self.check_update_domain_change_status(data["status"],
epp,
domain,
user)
fields = ["add", "rem", "chg"]
if len(self.workflow) > 0 or any(k in epp for k in fields):
self.workflow.insert(0, init_update_domain.si(epp))
self.append(update_domain.s(registry=self.registry))
self.append(local_update_domain.s())
return self.workflow
log.warning("Nothing to update.")
return None
def process_host_addresses(self, addresses):
"""
Preprocess address set for hosts to make compatible with
nodepp.
Because of Python using "type" as a reserved word, I've chosen
to make that field "addr_type" for API requests
:addresses: list of addresses submitted via API
:returns: list of addresses with "addr_type" -> "type"
"""
result = []
for addr in addresses:
address = {"ip": addr["ip"]}
if "addr_type" in addr:
address["type"] = addr["addr_type"]
result.append(address)
return result
def create_host(self, data, user):
"""
Set up workflow for creating a host
:data: dict create host data
:user: user object
:returns: dict response returned by registry
"""
self.append(check_host.s(data["idn_host"]))
self.append(create_host.si(data))
self.append(connect_host.si(data, user.id))
return self.workflow
def check_update_domain_change_status(self, data, epp, domain, user):
"""
Evaluate the status sent with the request and determine if it should
go into the "chg" field
:data: TODO
:epp: TODO
:domain: TODO
:user: TODO
:returns: TODO
"""
pass
class CentralNic(Workflow):
"""
Registry operations specific for CentralNic
"""
def __init__(self):
super().__init__()
self.registry = 'centralnic-test'
class CoccaTest(Workflow):
"""
Registry operations specific for Cocca
"""
def __init__(self):
super().__init__()
self.registry = 'cocca-test'
class NzrsTest(Workflow):
"""
Registry operations specific for Cocca
"""
def __init__(self):
super().__init__()
self.registry = 'nzrs-test'
workflow_registries = {
"centralnic-test": CentralNic,
"cocca-test": CoccaTest,
"nzrs-test": NzrsTest,
}
def workflow_factory(registry):
"""
Return workflow manager for a given registry.
:registry: str or DomainProvider object
:returns: A subclass of DomainManager
"""
return workflow_registries[registry]
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Test single point logfiles in cclib."""
import os
import unittest
import numpy
from common import get_minimum_carbon_separation
from skip import skipForParser
from skip import skipForLogfile
__filedir__ = os.path.realpath(os.path.dirname(__file__))
class GenericSPTest(unittest.TestCase):
"""Generic restricted single point unittest"""
# Molecular mass of DVB in mD, and expected precision.
molecularmass = 130078.25
mass_precision = 0.10
# In STO-3G, H has 1, C has 5 (1 S and 4 SP).
nbasisdict = {1:1, 6:5}
# Approximate B3LYP energy of dvb after SCF in STO-3G.
b3lyp_energy = -10365
# Overlap first two atomic orbitals.
overlap01 = 0.24
def testnatom(self):
"""Is the number of atoms equal to 20?"""
self.assertEquals(self.data.natom, 20)
def testatomnos(self):
"""Are the atomnos correct?"""
# The nuclear charges should be integer values in a NumPy array.
self.failUnless(numpy.alltrue([numpy.issubdtype(atomno, int) for atomno in self.data.atomnos]))
self.assertEquals(self.data.atomnos.dtype.char, 'i')
self.assertEquals(self.data.atomnos.shape, (20,) )
self.assertEquals(sum(self.data.atomnos == 6) + sum(self.data.atomnos == 1), 20)
@skipForParser('DALTON', 'DALTON has a very low accuracy for the printed values of all populations (2 decimals rounded in a weird way), so let it slide for now')
@skipForLogfile('Jaguar/basicJaguar7', 'We did not print the atomic partial charges in the unit tests for this version')
@skipForLogfile('Molpro/basicMolpro2006', "These tests were run a long time ago and since we don't have access to Molpro 2006 anymore, we can skip this test (it is tested in 2012)")
@skipForLogfile('Psi/basicPsi3', 'Psi3 did not print partial atomic charges')
def testatomcharges(self):
"""Are atomcharges (at least Mulliken) consistent with natom and sum to zero?"""
for type in set(['mulliken'] + list(self.data.atomcharges.keys())):
charges = self.data.atomcharges[type]
self.assertEquals(len(charges), self.data.natom)
self.assertAlmostEquals(sum(charges), 0.0, delta=0.001)
def testatomcoords(self):
"""Are the dimensions of atomcoords 1 x natom x 3?"""
expected_shape = (1, self.data.natom, 3)
self.assertEquals(self.data.atomcoords.shape, expected_shape)
def testatomcoords_units(self):
"""Are atomcoords consistent with Angstroms?"""
min_carbon_dist = get_minimum_carbon_separation(self.data)
dev = abs(min_carbon_dist - 1.34)
self.assertTrue(dev < 0.03, "Minimum carbon dist is %.2f (not 1.34)" % min_carbon_dist)
def testcharge_and_mult(self):
"""Are the charge and multiplicity correct?"""
self.assertEquals(self.data.charge, 0)
self.assertEquals(self.data.mult, 1)
def testnbasis(self):
"""Is the number of basis set functions correct?"""
count = sum([self.nbasisdict[n] for n in self.data.atomnos])
self.assertEquals(self.data.nbasis, count)
@skipForParser('ADF', 'ADF parser does not extract atombasis')
@skipForLogfile('Jaguar/basicJaguar7', 'Data file does not contain enough information. Can we make a new one?')
def testatombasis(self):
"""Are the indices in atombasis the right amount and unique?"""
all = []
for i, atom in enumerate(self.data.atombasis):
self.assertEquals(len(atom), self.nbasisdict[self.data.atomnos[i]])
all += atom
# Test if there are as many indices as atomic orbitals.
self.assertEquals(len(all), self.data.nbasis)
# Check if all are different (every orbital indexed once).
self.assertEquals(len(set(all)), len(all))
@skipForParser('GAMESS', 'atommasses not implemented yet')
@skipForParser('GAMESSUK', 'atommasses not implemented yet')
@skipForParser('Jaguar', 'atommasses not implemented yet')
@skipForParser('Molpro', 'atommasses not implemented yet')
@skipForParser('NWChem', 'atommasses not implemented yet')
@skipForLogfile('Psi/basicPsi3', 'atommasses not implemented yet')
@skipForLogfile('Psi/basicPsi4.0b5', 'atommasses not implemented yet')
@skipForParser('QChem', 'atommasses not implemented yet')
def testatommasses(self):
"""Do the atom masses sum up to the molecular mass?"""
mm = 1000*sum(self.data.atommasses)
msg = "Molecule mass: %f not %f +- %fmD" % (mm, self.molecularmass, self.mass_precision)
self.assertAlmostEquals(mm, self.molecularmass, delta=self.mass_precision, msg=msg)
def testcoreelectrons(self):
"""Are the coreelectrons all 0?"""
ans = numpy.zeros(self.data.natom, 'i')
numpy.testing.assert_array_equal(self.data.coreelectrons, ans)
def testnormalisesym(self):
"""Did this subclass overwrite normalisesym?"""
self.assertNotEquals(self.logfile.normalisesym("A"), "ERROR: This should be overwritten by this subclass")
@skipForParser('Molpro', '?')
@skipForParser('ORCA', 'ORCA has no support for symmetry yet')
def testsymlabels(self):
"""Are all the symmetry labels either Ag/u or Bg/u?"""
sumwronglabels = sum([x not in ['Ag', 'Bu', 'Au', 'Bg'] for x in self.data.mosyms[0]])
self.assertEquals(sumwronglabels, 0)
def testhomos(self):
"""Is the index of the HOMO equal to 34?"""
numpy.testing.assert_array_equal(self.data.homos, numpy.array([34],"i"), "%s != array([34],'i')" % numpy.array_repr(self.data.homos))
def testscfvaluetype(self):
"""Are scfvalues and its elements the right type??"""
self.assertEquals(type(self.data.scfvalues),type([]))
self.assertEquals(type(self.data.scfvalues[0]),type(numpy.array([])))
def testscfenergy(self):
"""Is the SCF energy within the target?"""
self.assertAlmostEquals(self.data.scfenergies[-1], self.b3lyp_energy, delta=40, msg="Final scf energy: %f not %i +- 40eV" %(self.data.scfenergies[-1], self.b3lyp_energy))
def testscftargetdim(self):
"""Do the scf targets have the right dimensions?"""
self.assertEquals(self.data.scftargets.shape, (len(self.data.scfvalues), len(self.data.scfvalues[0][0])))
def testlengthmoenergies(self):
"""Is the number of evalues equal to nmo?"""
self.assertEquals(len(self.data.moenergies[0]), self.data.nmo)
def testtypemoenergies(self):
"""Is moenergies a list containing one numpy array?"""
self.assertEquals(type(self.data.moenergies), type([]))
self.assertEquals(type(self.data.moenergies[0]), type(numpy.array([])))
@skipForParser('DALTON', 'mocoeffs not implemented yet')
@skipForLogfile('Jaguar/basicJaguar7', 'Data file does not contain enough information. Can we make a new one?')
@skipForLogfile('Psi/basicPsi3', 'MO coefficients are printed separately for each SALC')
def testdimmocoeffs(self):
"""Are the dimensions of mocoeffs equal to 1 x nmo x nbasis?"""
self.assertEquals(type(self.data.mocoeffs), type([]))
self.assertEquals(len(self.data.mocoeffs), 1)
self.assertEquals(self.data.mocoeffs[0].shape,
(self.data.nmo, self.data.nbasis))
@skipForParser('DALTON', 'To print: **INTEGRALS\n.PROPRI')
@skipForParser('Psi', 'Psi does not currently have the option to print the overlap matrix')
@skipForParser('QChem', 'QChem cannot print the overlap matrix')
def testaooverlaps(self):
"""Are the dims and values of the overlap matrix correct?"""
self.assertEquals(self.data.aooverlaps.shape, (self.data.nbasis, self.data.nbasis))
# The matrix is symmetric.
row = self.data.aooverlaps[0,:]
col = self.data.aooverlaps[:,0]
self.assertEquals(sum(col - row), 0.0)
# All values on diagonal should be exactly zero.
for i in range(self.data.nbasis):
self.assertEquals(self.data.aooverlaps[i,i], 1.0)
# Check some additional values that don't seem to move around between programs.
self.assertAlmostEquals(self.data.aooverlaps[0, 1], self.overlap01, delta=0.01)
self.assertAlmostEquals(self.data.aooverlaps[1, 0], self.overlap01, delta=0.01)
self.assertEquals(self.data.aooverlaps[3,0], 0.0)
self.assertEquals(self.data.aooverlaps[0,3], 0.0)
def testoptdone(self):
"""There should be no optdone attribute set."""
self.assertFalse(hasattr(self.data, 'optdone'))
@skipForParser('Gaussian', 'Logfile needs to be updated')
@skipForParser('Jaguar', 'No dipole moments in the logfile')
def testmoments(self):
"""Does the dipole and possible higher molecular moments look reasonable?"""
# The reference point is always a vector, but not necessarily the
# origin or center of mass. In this case, however, the center of mass
# is at the origin, so we now what to expect.
reference = self.data.moments[0]
self.assertEquals(len(reference), 3)
for x in reference:
self.assertEquals(x, 0.0)
# Length and value of dipole moment should always be correct (zero for this test).
dipole = self.data.moments[1]
self.assertEquals(len(dipole), 3)
for d in dipole:
self.assertAlmostEquals(d, 0.0, places=7)
# If the quadrupole is there, we can expect roughly -50B for the XX moment,
# -50B for the YY moment and and -60B for the ZZ moment.
if len(self.data.moments) > 2:
quadrupole = self.data.moments[2]
self.assertEquals(len(quadrupole), 6)
self.assertAlmostEquals(quadrupole[0], -50, delta=2.5)
self.assertAlmostEquals(quadrupole[3], -50, delta=2.5)
self.assertAlmostEquals(quadrupole[5], -60, delta=3)
# If the octupole is there, it should have 10 components and be zero.
if len(self.data.moments) > 3:
octupole = self.data.moments[3]
self.assertEquals(len(octupole), 10)
for m in octupole:
self.assertAlmostEquals(m, 0.0, delta=0.001)
# The hexadecapole should have 15 elements, an XXXX component of around -1900 Debye*ang^2,
# a YYYY component of -330B and a ZZZZ component of -50B.
if len(self.data.moments) > 4:
hexadecapole = self.data.moments[4]
self.assertEquals(len(hexadecapole), 15)
self.assertAlmostEquals(hexadecapole[0], -1900, delta=90)
self.assertAlmostEquals(hexadecapole[10], -330, delta=11)
self.assertAlmostEquals(hexadecapole[14], -50, delta=2.5)
# The are 21 unique 32-pole moments, and all are zero in this test case.
if len(self.data.moments) > 5:
moment32 = self.data.moments[5]
self.assertEquals(len(moment32), 21)
for m in moment32:
self.assertEquals(m, 0.0)
@skipForParser('ADF', 'Does not support metadata yet')
@skipForParser('GAMESS', 'Does not support metadata yet')
@skipForParser('GAMESSUK', 'Does not support metadata yet')
@skipForParser('Gaussian', 'Does not support metadata yet')
@skipForParser('Jaguar', 'Does not support metadata yet')
@skipForParser('Molpro', 'Does not support metadata yet')
@skipForParser('NWChem', 'Does not support metadata yet')
@skipForParser('ORCA', 'Does not support metadata yet')
@skipForParser('Psi', 'Does not support metadata yet')
@skipForParser('QChem', 'Does not support metadata yet')
def testmetadata(self):
"""Does metadata have expected keys and values?"""
self.assertTrue(hasattr(self.data, "metadata"))
self.assertIn("basis_set", self.data.metadata)
self.assertIn("methods", self.data.metadata)
self.assertIn("package", self.data.metadata)
self.assertIn("package_version", self.data.metadata)
class ADFSPTest(GenericSPTest):
"""Customized restricted single point unittest"""
# ADF only prints up to 0.1mD per atom, so the precision here is worse than 0.1mD.
mass_precision = 0.3
foverlap00 = 1.00003
foverlap11 = 1.02672
foverlap22 = 1.03585
b3lyp_energy = -140
def testfoverlaps(self):
"""Are the dims and values of the fragment orbital overlap matrix correct?"""
self.assertEquals(self.data.fooverlaps.shape, (self.data.nbasis, self.data.nbasis))
# The matrix is symmetric.
row = self.data.fooverlaps[0,:]
col = self.data.fooverlaps[:,0]
self.assertEquals(sum(col - row), 0.0)
# Although the diagonal elements are close to zero, the SFOs
# are generally not normalized, so test for a few specific values.
self.assertAlmostEquals(self.data.fooverlaps[0, 0], self.foverlap00, delta=0.0001)
self.assertAlmostEquals(self.data.fooverlaps[1, 1], self.foverlap11, delta=0.0001)
self.assertAlmostEquals(self.data.fooverlaps[2, 2], self.foverlap22, delta=0.0001)
class Jaguar7SPTest(GenericSPTest):
"""Customized restricted single point unittest"""
# Jaguar prints only 10 virtual MOs by default. Can we re-run with full output?
def testlengthmoenergies(self):
"""Is the number of evalues equal to the number of occ. MOs + 10?"""
self.assertEquals(len(self.data.moenergies[0]), self.data.homos[0]+11)
class Psi3SPTest(GenericSPTest):
"""Customized restricted single point HF/KS unittest"""
# The final energy is also a bit higher here, I think due to the fact
# that a SALC calculation is done instead of a full LCAO.
b3lyp_energy = -10300
class OrcaSPTest(GenericSPTest):
"""Customized restricted single point unittest"""
# Orca has different weights for the masses
molecularmass = 130190
if __name__=="__main__":
import sys
sys.path.append(os.path.join(__filedir__, ".."))
from test_data import DataSuite
suite = DataSuite(['SP'])
suite.testall()
|
|
#!/usr/bin/env python
# rgb2colorname.py
# by [email protected], [email protected], https://github.com/paarthneekhara
# Usage:
# Explained in https://github.com/jetbloom/rgb2colorname/blob/master/README.md
# KDTree Implementation details http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.html#scipy.spatial.KDTree
# This will be upgraded to 3 dimensional arrays
import numpy as np
from scipy import spatial
# define function:
#def find_nearest_vector(array, value):
# # http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.norm.html
# idx = np.array([np.linalg.norm(x+y) for (x,y,z) in array-value]).argmin()
# return array[idx]
#Naive Solution- incomplete
#def find_index_of_nearest_xyz(x_array,y_array, z_array, x_point, y_point,z_point):
#write loop
# distance = (x_array-x_point)**2 + (y_array-y_point)**2 +(z_array-z_point)**2
# idx,idy,idz = numpy.where(distance==distance.min())
# return idx[0],idy[0],idz[0]
#combined_x_y_arrays = numpy.dstack([y_array.ravel(),x_array.ravel()])[0]
#points_list = list(points.transpose())
#def do_kdtree(RGB,points):
# mytree = scipy.spatial.cKDTree(RGB)
# dist, indexes = mytree.query(points)
# return indexes
n=571 # based on program output.
#A=np.empty((n,n,n))
#A = np.random.random((10,2))*100
#### Paste in contents of rgb_combined_v01.csv.txt below: ###
#TODO: Remove array and use dataframe to import from csv
A = np.array([ \
[240,248,255] \
,[250,235,215] \
,[255,239,219] \
,[238,223,204] \
,[205,192,176] \
,[139,131,120] \
,[0,255,255] \
,[127,255,212] \
,[127,255,212] \
,[118,238,198] \
,[102,205,170] \
,[69,139,116] \
,[240,255,255] \
,[240,255,255] \
,[224,238,238] \
,[193,205,205] \
,[131,139,139] \
,[245,245,220] \
,[255,228,196] \
,[255,228,196] \
,[238,213,183] \
,[205,183,158] \
,[139,125,107] \
,[0,0,0] \
,[255,235,205] \
,[0,0,255] \
,[0,0,255] \
,[0,0,238] \
,[0,0,205] \
,[0,0,139] \
,[138,43,226] \
,[165,42,42] \
,[255,64,64] \
,[238,59,59] \
,[205,51,51] \
,[139,35,35] \
,[222,184,135] \
,[255,211,155] \
,[238,197,145] \
,[205,170,125] \
,[139,115,85] \
,[95,158,160] \
,[152,245,255] \
,[142,229,238] \
,[122,197,205] \
,[83,134,139] \
,[127,255,0] \
,[127,255,0] \
,[118,238,0] \
,[102,205,0] \
,[69,139,0] \
,[210,105,30] \
,[255,127,36] \
,[238,118,33] \
,[205,102,29] \
,[139,69,19] \
,[255,127,80] \
,[255,114,86] \
,[238,106,80] \
,[205,91,69] \
,[139,62,47] \
,[100,149,237] \
,[255,248,220] \
,[255,248,220] \
,[238,232,205] \
,[205,200,177] \
,[139,136,120] \
,[220,20,60] \
,[0,255,255] \
,[0,255,255] \
,[0,238,238] \
,[0,205,205] \
,[0,139,139] \
,[0,0,139] \
,[0,139,139] \
,[184,134,11] \
,[255,185,15] \
,[238,173,14] \
,[205,149,12] \
,[139,101,8] \
,[169,169,169] \
,[0,100,0] \
,[189,183,107] \
,[139,0,139] \
,[85,107,47] \
,[202,255,112] \
,[188,238,104] \
,[162,205,90] \
,[110,139,61] \
,[255,140,0] \
,[255,127,0] \
,[238,118,0] \
,[205,102,0] \
,[139,69,0] \
,[153,50,204] \
,[191,62,255] \
,[178,58,238] \
,[154,50,205] \
,[104,34,139] \
,[139,0,0] \
,[233,150,122] \
,[143,188,143] \
,[193,255,193] \
,[180,238,180] \
,[155,205,155] \
,[105,139,105] \
,[72,61,139] \
,[47,79,79] \
,[151,255,255] \
,[141,238,238] \
,[121,205,205] \
,[82,139,139] \
,[0,206,209] \
,[148,0,211] \
,[255,20,147] \
,[255,20,147] \
,[238,18,137] \
,[205,16,118] \
,[139,10,80] \
,[0,191,255] \
,[0,191,255] \
,[0,178,238] \
,[0,154,205] \
,[0,104,139] \
,[105,105,105] \
,[30,144,255] \
,[30,144,255] \
,[28,134,238] \
,[24,116,205] \
,[16,78,139] \
,[178,34,34] \
,[255,48,48] \
,[238,44,44] \
,[205,38,38] \
,[139,26,26] \
,[255,250,240] \
,[255,250,240] \
,[34,139,34] \
,[255,0,255] \
,[220,220,220] \
,[248,248,255] \
,[255,215,0] \
,[255,215,0] \
,[238,201,0] \
,[205,173,0] \
,[139,117,0] \
,[218,165,32] \
,[255,193,37] \
,[238,180,34] \
,[205,155,29] \
,[139,105,20] \
,[128,128,128] \
,[190,190,190] \
,[0,0,0] \
,[3,3,3] \
,[26,26,26] \
,[255,255,255] \
,[28,28,28] \
,[31,31,31] \
,[33,33,33] \
,[36,36,36] \
,[38,38,38] \
,[41,41,41] \
,[43,43,43] \
,[46,46,46] \
,[48,48,48] \
,[5,5,5] \
,[51,51,51] \
,[54,54,54] \
,[56,56,56] \
,[59,59,59] \
,[61,61,61] \
,[64,64,64] \
,[66,66,66] \
,[69,69,69] \
,[71,71,71] \
,[74,74,74] \
,[8,8,8] \
,[77,77,77] \
,[79,79,79] \
,[82,82,82] \
,[84,84,84] \
,[87,87,87] \
,[89,89,89] \
,[92,92,92] \
,[94,94,94] \
,[97,97,97] \
,[99,99,99] \
,[10,10,10] \
,[2,102,102] \
,[105,105,105] \
,[107,107,107] \
,[110,110,110] \
,[112,112,112] \
,[115,115,115] \
,[117,117,117] \
,[120,120,120] \
,[122,122,122] \
,[125,125,125] \
,[13,13,13] \
,[127,127,127] \
,[130,130,130] \
,[133,133,133] \
,[135,135,135] \
,[138,138,138] \
,[140,140,140] \
,[143,143,143] \
,[145,145,145] \
,[148,148,148] \
,[150,150,150] \
,[15,15,15] \
,[153,153,153] \
,[156,156,156] \
,[158,158,158] \
,[161,161,161] \
,[163,163,163] \
,[166,166,166] \
,[168,168,168] \
,[171,171,171] \
,[173,173,173] \
,[176,176,176] \
,[18,18,18] \
,[179,179,179] \
,[181,181,181] \
,[184,184,184] \
,[186,186,186] \
,[189,189,189] \
,[191,191,191] \
,[194,194,194] \
,[196,196,196] \
,[199,199,199] \
,[201,201,201] \
,[20,20,20] \
,[204,204,204] \
,[207,207,207] \
,[209,209,209] \
,[212,212,212] \
,[214,214,214] \
,[217,217,217] \
,[219,219,219] \
,[222,222,222] \
,[224,224,224] \
,[227,227,227] \
,[23,23,23] \
,[229,229,229] \
,[232,232,232] \
,[235,235,235] \
,[237,237,237] \
,[240,240,240] \
,[242,242,242] \
,[245,245,245] \
,[247,247,247] \
,[250,250,250] \
,[252,252,252] \
,[0,128,0] \
,[0,255,0] \
,[0,255,0] \
,[0,238,0] \
,[0,205,0] \
,[0,139,0] \
,[173,255,47] \
,[240,255,240] \
,[240,255,240] \
,[224,238,224] \
,[193,205,193] \
,[131,139,131] \
,[255,105,180] \
,[255,110,180] \
,[238,106,167] \
,[205,96,144] \
,[139,58,98] \
,[205,92,92] \
,[255,106,106] \
,[238,99,99] \
,[205,85,85] \
,[139,58,58] \
,[75,0,130] \
,[255,255,240] \
,[255,255,240] \
,[238,238,224] \
,[205,205,193] \
,[139,139,131] \
,[240,230,140] \
,[255,246,143] \
,[238,230,133] \
,[205,198,115] \
,[139,134,78] \
,[230,230,250] \
,[255,240,245] \
,[255,240,245] \
,[238,224,229] \
,[205,193,197] \
,[139,131,134] \
,[124,252,0] \
,[255,250,205] \
,[255,250,205] \
,[238,233,191] \
,[205,201,165] \
,[139,137,112] \
,[173,216,230] \
,[191,239,255] \
,[178,223,238] \
,[154,192,205] \
,[104,131,139] \
,[240,128,128] \
,[224,255,255] \
,[224,255,255] \
,[209,238,238] \
,[180,205,205] \
,[122,139,139] \
,[238,221,130] \
,[255,236,139] \
,[238,220,130] \
,[205,190,112] \
,[139,129,76] \
,[250,250,210] \
,[211,211,211] \
,[144,238,144] \
,[255,182,193] \
,[255,174,185] \
,[238,162,173] \
,[205,140,149] \
,[139,95,101] \
,[255,160,122] \
,[255,160,122] \
,[238,149,114] \
,[205,129,98] \
,[139,87,66] \
,[32,178,170] \
,[135,206,250] \
,[176,226,255] \
,[164,211,238] \
,[141,182,205] \
,[96,123,139] \
,[132,112,255] \
,[119,136,153] \
,[176,196,222] \
,[202,225,255] \
,[188,210,238] \
,[162,181,205] \
,[110,123,139] \
,[255,255,224] \
,[255,255,224] \
,[238,238,209] \
,[205,205,180] \
,[139,139,122] \
,[0,255,0] \
,[50,205,50] \
,[250,240,230] \
,[255,0,255] \
,[255,0,255] \
,[238,0,238] \
,[205,0,205] \
,[139,0,139] \
,[128,0,0] \
,[176,48,96] \
,[255,52,179] \
,[238,48,167] \
,[205,41,144] \
,[139,28,98] \
,[102,205,170] \
,[0,0,205] \
,[186,85,211] \
,[224,102,255] \
,[209,95,238] \
,[180,82,205] \
,[122,55,139] \
,[147,112,219] \
,[171,130,255] \
,[159,121,238] \
,[137,104,205] \
,[93,71,139] \
,[60,179,113] \
,[123,104,238] \
,[0,250,154] \
,[72,209,204] \
,[199,21,133] \
,[25,25,112] \
,[245,255,250] \
,[255,228,225] \
,[255,228,225] \
,[238,213,210] \
,[205,183,181] \
,[139,125,123] \
,[255,228,181] \
,[255,222,173] \
,[255,222,173] \
,[238,207,161] \
,[205,179,139] \
,[139,121,94] \
,[0,0,128] \
,[0,0,128] \
,[253,245,230] \
,[128,128,0] \
,[107,142,35] \
,[192,255,62] \
,[179,238,58] \
,[154,205,50] \
,[105,139,34] \
,[255,165,0] \
,[255,165,0] \
,[238,154,0] \
,[205,133,0] \
,[139,90,0] \
,[255,69,0] \
,[255,69,0] \
,[238,64,0] \
,[205,55,0] \
,[139,37,0] \
,[218,112,214] \
,[255,131,250] \
,[238,122,233] \
,[205,105,201] \
,[139,71,137] \
,[238,232,170] \
,[152,251,152] \
,[154,255,154] \
,[144,238,144] \
,[124,205,124] \
,[84,139,84] \
,[175,238,238] \
,[187,255,255] \
,[174,238,238] \
,[150,205,205] \
,[102,139,139] \
,[219,112,147] \
,[255,130,171] \
,[238,121,159] \
,[205,104,137] \
,[139,71,93] \
,[255,239,213] \
,[255,218,185] \
,[255,218,185] \
,[238,203,173] \
,[205,175,149] \
,[139,119,101] \
,[205,133,63] \
,[255,192,203] \
,[255,181,197] \
,[238,169,184] \
,[205,145,158] \
,[139,99,108] \
,[221,160,221] \
,[255,187,255] \
,[238,174,238] \
,[205,150,205] \
,[139,102,139] \
,[176,224,230] \
,[128,0,128] \
,[160,32,240] \
,[155,48,255] \
,[145,44,238] \
,[125,38,205] \
,[85,26,139] \
,[102,51,153] \
,[255,0,0] \
,[255,0,0] \
,[238,0,0] \
,[205,0,0] \
,[139,0,0] \
,[188,143,143] \
,[255,193,193] \
,[238,180,180] \
,[205,155,155] \
,[139,105,105] \
,[65,105,225] \
,[72,118,255] \
,[67,110,238] \
,[58,95,205] \
,[39,64,139] \
,[139,69,19] \
,[250,128,114] \
,[255,140,105] \
,[238,130,98] \
,[205,112,84] \
,[139,76,57] \
,[244,164,96] \
,[46,139,87] \
,[84,255,159] \
,[78,238,148] \
,[67,205,128] \
,[46,139,87] \
,[255,245,238] \
,[255,245,238] \
,[238,229,222] \
,[205,197,191] \
,[139,134,130] \
,[160,82,45] \
,[255,130,71] \
,[238,121,66] \
,[205,104,57] \
,[139,71,38] \
,[192,192,192] \
,[135,206,235] \
,[135,206,255] \
,[126,192,238] \
,[108,166,205] \
,[74,112,139] \
,[106,90,205] \
,[131,111,255] \
,[122,103,238] \
,[105,89,205] \
,[71,60,139] \
,[112,128,144] \
,[198,226,255] \
,[185,211,238] \
,[159,182,205] \
,[108,123,139] \
,[255,250,250] \
,[255,250,250] \
,[238,233,233] \
,[205,201,201] \
,[139,137,137] \
,[0,255,127] \
,[0,255,127] \
,[0,238,118] \
,[0,205,102] \
,[0,139,69] \
,[70,130,180] \
,[99,184,255] \
,[92,172,238] \
,[79,148,205] \
,[54,100,139] \
,[210,180,140] \
,[255,165,79] \
,[238,154,73] \
,[205,133,63] \
,[139,90,43] \
,[0,128,128] \
,[216,191,216] \
,[255,225,255] \
,[238,210,238] \
,[205,181,205] \
,[139,123,139] \
,[255,99,71] \
,[255,99,71] \
,[238,92,66] \
,[205,79,57] \
,[139,54,38] \
,[64,224,208] \
,[0,245,255] \
,[0,229,238] \
,[0,197,205] \
,[0,134,139] \
,[238,130,238] \
,[208,32,144] \
,[255,62,150] \
,[238,58,140] \
,[205,50,120] \
,[139,34,82] \
,[128,128,128] \
,[0,128,0] \
,[128,0,0] \
,[128,0,128] \
,[245,222,179] \
,[255,231,186] \
,[238,216,174] \
,[205,186,150] \
,[139,126,102] \
,[255,255,255] \
,[245,245,245] \
,[190,190,190] \
,[0,255,0] \
,[176,48,96] \
,[160,32,240] \
,[255,255,0] \
,[255,255,0] \
,[238,238,0] \
,[205,205,0] \
,[139,139,0] \
,[154,205,50] \
])
### End of paste ###
B = np.array([ \
["#000000","black"] \
,["#000000","gray0"] \
,["#000080","navy"] \
,["#000080","NavyBlue"] \
,["#00008B","blue4"] \
,["#00008B","DarkBlue"] \
,["#0000CD","MediumBlue"] \
,["#0000CD","blue3"] \
,["#0000EE","blue2"] \
,["#0000FF","blue"] \
,["#0000FF","blue1"] \
,["#006400","DarkGreen"] \
,["#00688B","DeepSkyBlue4"] \
,["#008000","WebGreen"] \
,["#008000","green"] \
,["#008080","teal"] \
,["#00868B","turquoise4"] \
,["#008B00","green4"] \
,["#008B45","SpringGreen4"] \
,["#008B8B","cyan4"] \
,["#008B8B","DarkCyan"] \
,["#009ACD","DeepSkyBlue3"] \
,["#00B2EE","DeepSkyBlue2"] \
,["#00BFFF","DeepSkyBlue"] \
,["#00BFFF","DeepSkyBlue1"] \
,["#00C5CD","turquoise3"] \
,["#00CD00","green3"] \
,["#00CD66","SpringGreen3"] \
,["#00CDCD","cyan3"] \
,["#00CED1","DarkTurquoise"] \
,["#00E5EE","turquoise2"] \
,["#00EE00","green2"] \
,["#00EE76","SpringGreen2"] \
,["#00EEEE","cyan2"] \
,["#00F5FF","turquoise1"] \
,["#00FA9A","MediumSpringGreen"] \
,["#00FF00","lime"] \
,["#00FF00","green"] \
,["#00FF00","green1"] \
,["#00FF00","X11Green"] \
,["#00FF7F","SpringGreen"] \
,["#00FF7F","SpringGreen1"] \
,["#00FFFF","cyan1"] \
,["#00FFFF","aqua"] \
,["#00FFFF","cyan"] \
,["#026666","gray40"] \
,["#030303","gray1"] \
,["#050505","gray2"] \
,["#080808","gray3"] \
,["#0A0A0A","gray4"] \
,["#0D0D0D","gray5"] \
,["#0F0F0F","gray6"] \
,["#104E8B","DodgerBlue4"] \
,["#121212","gray7"] \
,["#141414","gray8"] \
,["#171717","gray9"] \
,["#1874CD","DodgerBlue3"] \
,["#191970","MidnightBlue"] \
,["#1A1A1A","gray10"] \
,["#1C1C1C","gray11"] \
,["#1C86EE","DodgerBlue2"] \
,["#1E90FF","DodgerBlue"] \
,["#1E90FF","DodgerBlue1"] \
,["#1F1F1F","gray12"] \
,["#20B2AA","LightSeaGreen"] \
,["#212121","gray13"] \
,["#228B22","ForestGreen"] \
,["#242424","gray14"] \
,["#262626","gray15"] \
,["#27408B","RoyalBlue4"] \
,["#292929","gray16"] \
,["#2B2B2B","gray17"] \
,["#2E2E2E","gray18"] \
,["#2E8B57","SeaGreen"] \
,["#2E8B57","SeaGreen4"] \
,["#2F4F4F","DarkSlateGray"] \
,["#303030","gray19"] \
,["#32CD32","LimeGreen"] \
,["#333333","gray20"] \
,["#363636","gray21"] \
,["#36648B","SteelBlue4"] \
,["#383838","gray22"] \
,["#3A5FCD","RoyalBlue3"] \
,["#3B3B3B","gray23"] \
,["#3CB371","MediumSeaGreen"] \
,["#3D3D3D","gray24"] \
,["#404040","gray25"] \
,["#40E0D0","turquoise"] \
,["#4169E1","RoyalBlue"] \
,["#424242","gray26"] \
,["#436EEE","RoyalBlue2"] \
,["#43CD80","SeaGreen3"] \
,["#454545","gray27"] \
,["#458B00","chartreuse4"] \
,["#458B74","aquamarine4"] \
,["#4682B4","SteelBlue"] \
,["#473C8B","SlateBlue4"] \
,["#474747","gray28"] \
,["#483D8B","DarkSlateBlue"] \
,["#4876FF","RoyalBlue1"] \
,["#48D1CC","MediumTurquoise"] \
,["#4A4A4A","gray29"] \
,["#4A708B","SkyBlue4"] \
,["#4B0082","indigo"] \
,["#4D4D4D","gray30"] \
,["#4EEE94","SeaGreen2"] \
,["#4F4F4F","gray31"] \
,["#4F94CD","SteelBlue3"] \
,["#525252","gray32"] \
,["#528B8B","DarkSlateGray4"] \
,["#53868B","CadetBlue4"] \
,["#545454","gray33"] \
,["#548B54","PaleGreen4"] \
,["#54FF9F","SeaGreen1"] \
,["#551A8B","purple4"] \
,["#556B2F","DarkOliveGreen"] \
,["#575757","gray34"] \
,["#595959","gray35"] \
,["#5C5C5C","gray36"] \
,["#5CACEE","SteelBlue2"] \
,["#5D478B","MediumPurple4"] \
,["#5E5E5E","gray37"] \
,["#5F9EA0","CadetBlue"] \
,["#607B8B","LightSkyBlue4"] \
,["#616161","gray38"] \
,["#636363","gray39"] \
,["#63B8FF","SteelBlue1"] \
,["#6495ED","CornflowerBlue"] \
,["#663399","RebeccaPurple"] \
,["#668B8B","PaleTurquoise4"] \
,["#66CD00","chartreuse3"] \
,["#66CDAA","MediumAquamarine"] \
,["#66CDAA","aquamarine3"] \
,["#68228B","DarkOrchid4"] \
,["#68838B","LightBlue4"] \
,["#6959CD","SlateBlue3"] \
,["#696969","DimGray"] \
,["#696969","gray41"] \
,["#698B22","OliveDrab4"] \
,["#698B69","DarkSeaGreen4"] \
,["#6A5ACD","SlateBlue"] \
,["#6B6B6B","gray42"] \
,["#6B8E23","OliveDrab"] \
,["#6C7B8B","SlateGray4"] \
,["#6CA6CD","SkyBlue3"] \
,["#6E6E6E","gray43"] \
,["#6E7B8B","LightSteelBlue4"] \
,["#6E8B3D","DarkOliveGreen4"] \
,["#707070","gray44"] \
,["#708090","SlateGray"] \
,["#737373","gray45"] \
,["#757575","gray46"] \
,["#76EE00","chartreuse2"] \
,["#76EEC6","aquamarine2"] \
,["#778899","LightSlateGray"] \
,["#787878","gray47"] \
,["#79CDCD","DarkSlateGray3"] \
,["#7A378B","MediumOrchid4"] \
,["#7A67EE","SlateBlue2"] \
,["#7A7A7A","gray48"] \
,["#7A8B8B","LightCyan4"] \
,["#7AC5CD","CadetBlue3"] \
,["#7B68EE","MediumSlateBlue"] \
,["#7CCD7C","PaleGreen3"] \
,["#7CFC00","LawnGreen"] \
,["#7D26CD","purple3"] \
,["#7D7D7D","gray49"] \
,["#7EC0EE","SkyBlue2"] \
,["#7F7F7F","gray50"] \
,["#7FFF00","chartreuse"] \
,["#7FFF00","chartreuse1"] \
,["#7FFFD4","aquamarine"] \
,["#7FFFD4","aquamarine1"] \
,["#800000","WebMaroon"] \
,["#800000","maroon"] \
,["#800080","WebPurple"] \
,["#800080","purple"] \
,["#808000","olive"] \
,["#808080","WebGray"] \
,["#808080","gray"] \
,["#828282","gray51"] \
,["#836FFF","SlateBlue1"] \
,["#838B83","honeydew4"] \
,["#838B8B","azure4"] \
,["#8470FF","LightSlateBlue"] \
,["#858585","gray52"] \
,["#878787","gray53"] \
,["#87CEEB","SkyBlue"] \
,["#87CEFA","LightSkyBlue"] \
,["#87CEFF","SkyBlue1"] \
,["#8968CD","MediumPurple3"] \
,["#8A2BE2","BlueViolet"] \
,["#8A8A8A","gray54"] \
,["#8B0000","DarkRed"] \
,["#8B0000","red4"] \
,["#8B008B","DarkMagenta"] \
,["#8B008B","magenta4"] \
,["#8B0A50","DeepPink4"] \
,["#8B1A1A","firebrick4"] \
,["#8B1C62","maroon4"] \
,["#8B2252","VioletRed4"] \
,["#8B2323","brown4"] \
,["#8B2500","OrangeRed4"] \
,["#8B3626","tomato4"] \
,["#8B3A3A","IndianRed4"] \
,["#8B3A62","HotPink4"] \
,["#8B3E2F","coral4"] \
,["#8B4500","DarkOrange4"] \
,["#8B4513","SaddleBrown"] \
,["#8B4513","chocolate4"] \
,["#8B4726","sienna4"] \
,["#8B475D","PaleVioletRed4"] \
,["#8B4789","orchid4"] \
,["#8B4C39","salmon4"] \
,["#8B5742","LightSalmon4"] \
,["#8B5A00","orange4"] \
,["#8B5A2B","tan4"] \
,["#8B5F65","LightPink4"] \
,["#8B636C","pink4"] \
,["#8B6508","DarkGoldenrod4"] \
,["#8B668B","plum4"] \
,["#8B6914","goldenrod4"] \
,["#8B6969","RosyBrown4"] \
,["#8B7355","burlywood4"] \
,["#8B7500","gold4"] \
,["#8B7765","PeachPuff4"] \
,["#8B795E","NavajoWhite4"] \
,["#8B7B8B","thistle4"] \
,["#8B7D6B","bisque4"] \
,["#8B7D7B","MistyRose4"] \
,["#8B7E66","wheat4"] \
,["#8B814C","LightGoldenrod4"] \
,["#8B8378","AntiqueWhite4"] \
,["#8B8386","LavenderBlush4"] \
,["#8B864E","khaki4"] \
,["#8B8682","seashell4"] \
,["#8B8878","cornsilk4"] \
,["#8B8970","LemonChiffon4"] \
,["#8B8989","snow4"] \
,["#8B8B00","yellow4"] \
,["#8B8B7A","LightYellow4"] \
,["#8B8B83","ivory4"] \
,["#8C8C8C","gray55"] \
,["#8DB6CD","LightSkyBlue3"] \
,["#8DEEEE","DarkSlateGray2"] \
,["#8EE5EE","CadetBlue2"] \
,["#8F8F8F","gray56"] \
,["#8FBC8F","DarkSeaGreen"] \
,["#90EE90","LightGreen"] \
,["#90EE90","PaleGreen2"] \
,["#912CEE","purple2"] \
,["#919191","gray57"] \
,["#9370DB","MediumPurple"] \
,["#9400D3","DarkViolet"] \
,["#949494","gray58"] \
,["#969696","gray59"] \
,["#96CDCD","PaleTurquoise3"] \
,["#97FFFF","DarkSlateGray1"] \
,["#98F5FF","CadetBlue1"] \
,["#98FB98","PaleGreen"] \
,["#9932CC","DarkOrchid"] \
,["#999999","gray60"] \
,["#9A32CD","DarkOrchid3"] \
,["#9AC0CD","LightBlue3"] \
,["#9ACD32","YellowGreen"] \
,["#9ACD32","OliveDrab3"] \
,["#9AFF9A","PaleGreen1"] \
,["#9B30FF","purple1"] \
,["#9BCD9B","DarkSeaGreen3"] \
,["#9C9C9C","gray61"] \
,["#9E9E9E","gray62"] \
,["#9F79EE","MediumPurple2"] \
,["#9FB6CD","SlateGray3"] \
,["#A020F0","purple"] \
,["#A020F0","X11Purple"] \
,["#A0522D","sienna"] \
,["#A1A1A1","gray63"] \
,["#A2B5CD","LightSteelBlue3"] \
,["#A2CD5A","DarkOliveGreen3"] \
,["#A3A3A3","gray64"] \
,["#A4D3EE","LightSkyBlue2"] \
,["#A52A2A","brown"] \
,["#A6A6A6","gray65"] \
,["#A8A8A8","gray66"] \
,["#A9A9A9","DarkGray"] \
,["#AB82FF","MediumPurple1"] \
,["#ABABAB","gray67"] \
,["#ADADAD","gray68"] \
,["#ADD8E6","LightBlue"] \
,["#ADFF2F","GreenYellow"] \
,["#AEEEEE","PaleTurquoise2"] \
,["#AFEEEE","PaleTurquoise"] \
,["#B03060","maroon"] \
,["#B03060","X11Maroon"] \
,["#B0B0B0","gray69"] \
,["#B0C4DE","LightSteelBlue"] \
,["#B0E0E6","PowderBlue"] \
,["#B0E2FF","LightSkyBlue1"] \
,["#B22222","firebrick"] \
,["#B23AEE","DarkOrchid2"] \
,["#B2DFEE","LightBlue2"] \
,["#B3B3B3","gray70"] \
,["#B3EE3A","OliveDrab2"] \
,["#B452CD","MediumOrchid3"] \
,["#B4CDCD","LightCyan3"] \
,["#B4EEB4","DarkSeaGreen2"] \
,["#B5B5B5","gray71"] \
,["#B8860B","DarkGoldenrod"] \
,["#B8B8B8","gray72"] \
,["#B9D3EE","SlateGray2"] \
,["#BA55D3","MediumOrchid"] \
,["#BABABA","gray73"] \
,["#BBFFFF","PaleTurquoise1"] \
,["#BC8F8F","RosyBrown"] \
,["#BCD2EE","LightSteelBlue2"] \
,["#BCEE68","DarkOliveGreen2"] \
,["#BDB76B","DarkKhaki"] \
,["#BDBDBD","gray74"] \
,["#BEBEBE","gray"] \
,["#BEBEBE","X11Gray"] \
,["#BF3EFF","DarkOrchid1"] \
,["#BFBFBF","gray75"] \
,["#BFEFFF","LightBlue1"] \
,["#C0C0C0","silver"] \
,["#C0FF3E","OliveDrab1"] \
,["#C1CDC1","honeydew3"] \
,["#C1CDCD","azure3"] \
,["#C1FFC1","DarkSeaGreen1"] \
,["#C2C2C2","gray76"] \
,["#C4C4C4","gray77"] \
,["#C6E2FF","SlateGray1"] \
,["#C71585","MediumVioletRed"] \
,["#C7C7C7","gray78"] \
,["#C9C9C9","gray79"] \
,["#CAE1FF","LightSteelBlue1"] \
,["#CAFF70","DarkOliveGreen1"] \
,["#CCCCCC","gray80"] \
,["#CD0000","red3"] \
,["#CD00CD","magenta3"] \
,["#CD1076","DeepPink3"] \
,["#CD2626","firebrick3"] \
,["#CD2990","maroon3"] \
,["#CD3278","VioletRed3"] \
,["#CD3333","brown3"] \
,["#CD3700","OrangeRed3"] \
,["#CD4F39","tomato3"] \
,["#CD5555","IndianRed3"] \
,["#CD5B45","coral3"] \
,["#CD5C5C","IndianRed"] \
,["#CD6090","HotPink3"] \
,["#CD6600","DarkOrange3"] \
,["#CD661D","chocolate3"] \
,["#CD6839","sienna3"] \
,["#CD6889","PaleVioletRed3"] \
,["#CD69C9","orchid3"] \
,["#CD7054","salmon3"] \
,["#CD8162","LightSalmon3"] \
,["#CD8500","orange3"] \
,["#CD853F","peru"] \
,["#CD853F","tan3"] \
,["#CD8C95","LightPink3"] \
,["#CD919E","pink3"] \
,["#CD950C","DarkGoldenrod3"] \
,["#CD96CD","plum3"] \
,["#CD9B1D","goldenrod3"] \
,["#CD9B9B","RosyBrown3"] \
,["#CDAA7D","burlywood3"] \
,["#CDAD00","gold3"] \
,["#CDAF95","PeachPuff3"] \
,["#CDB38B","NavajoWhite3"] \
,["#CDB5CD","thistle3"] \
,["#CDB79E","bisque3"] \
,["#CDB7B5","MistyRose3"] \
,["#CDBA96","wheat3"] \
,["#CDBE70","LightGoldenrod3"] \
,["#CDC0B0","AntiqueWhite3"] \
,["#CDC1C5","LavenderBlush3"] \
,["#CDC5BF","seashell3"] \
,["#CDC673","khaki3"] \
,["#CDC8B1","cornsilk3"] \
,["#CDC9A5","LemonChiffon3"] \
,["#CDC9C9","snow3"] \
,["#CDCD00","yellow3"] \
,["#CDCDB4","LightYellow3"] \
,["#CDCDC1","ivory3"] \
,["#CFCFCF","gray81"] \
,["#D02090","VioletRed"] \
,["#D15FEE","MediumOrchid2"] \
,["#D1D1D1","gray82"] \
,["#D1EEEE","LightCyan2"] \
,["#D2691E","chocolate"] \
,["#D2B48C","tan"] \
,["#D3D3D3","LightGray"] \
,["#D4D4D4","gray83"] \
,["#D6D6D6","gray84"] \
,["#D8BFD8","thistle"] \
,["#D9D9D9","gray85"] \
,["#DA70D6","orchid"] \
,["#DAA520","goldenrod"] \
,["#DB7093","PaleVioletRed"] \
,["#DBDBDB","gray86"] \
,["#DC143C","crimson"] \
,["#DCDCDC","gainsboro"] \
,["#DDA0DD","plum"] \
,["#DEB887","burlywood"] \
,["#DEDEDE","gray87"] \
,["#E066FF","MediumOrchid1"] \
,["#E0E0E0","gray88"] \
,["#E0EEE0","honeydew2"] \
,["#E0EEEE","azure2"] \
,["#E0FFFF","LightCyan"] \
,["#E0FFFF","LightCyan1"] \
,["#E3E3E3","gray89"] \
,["#E5E5E5","gray90"] \
,["#E6E6FA","lavender"] \
,["#E8E8E8","gray91"] \
,["#E9967A","DarkSalmon"] \
,["#EBEBEB","gray92"] \
,["#EDEDED","gray93"] \
,["#EE0000","red2"] \
,["#EE00EE","magenta2"] \
,["#EE1289","DeepPink2"] \
,["#EE2C2C","firebrick2"] \
,["#EE30A7","maroon2"] \
,["#EE3A8C","VioletRed2"] \
,["#EE3B3B","brown2"] \
,["#EE4000","OrangeRed2"] \
,["#EE5C42","tomato2"] \
,["#EE6363","IndianRed2"] \
,["#EE6A50","coral2"] \
,["#EE6AA7","HotPink2"] \
,["#EE7600","DarkOrange2"] \
,["#EE7621","chocolate2"] \
,["#EE7942","sienna2"] \
,["#EE799F","PaleVioletRed2"] \
,["#EE7AE9","orchid2"] \
,["#EE8262","salmon2"] \
,["#EE82EE","violet"] \
,["#EE9572","LightSalmon2"] \
,["#EE9A00","orange2"] \
,["#EE9A49","tan2"] \
,["#EEA2AD","LightPink2"] \
,["#EEA9B8","pink2"] \
,["#EEAD0E","DarkGoldenrod2"] \
,["#EEAEEE","plum2"] \
,["#EEB422","goldenrod2"] \
,["#EEB4B4","RosyBrown2"] \
,["#EEC591","burlywood2"] \
,["#EEC900","gold2"] \
,["#EECBAD","PeachPuff2"] \
,["#EECFA1","NavajoWhite2"] \
,["#EED2EE","thistle2"] \
,["#EED5B7","bisque2"] \
,["#EED5D2","MistyRose2"] \
,["#EED8AE","wheat2"] \
,["#EEDC82","LightGoldenrod2"] \
,["#EEDD82","LightGoldenrod"] \
,["#EEDFCC","AntiqueWhite2"] \
,["#EEE0E5","LavenderBlush2"] \
,["#EEE5DE","seashell2"] \
,["#EEE685","khaki2"] \
,["#EEE8AA","PaleGoldenrod"] \
,["#EEE8CD","cornsilk2"] \
,["#EEE9BF","LemonChiffon2"] \
,["#EEE9E9","snow2"] \
,["#EEEE00","yellow2"] \
,["#EEEED1","LightYellow2"] \
,["#EEEEE0","ivory2"] \
,["#F08080","LightCoral"] \
,["#F0E68C","khaki"] \
,["#F0F0F0","gray94"] \
,["#F0F8FF","AliceBlue"] \
,["#F0FFF0","honeydew"] \
,["#F0FFF0","honeydew1"] \
,["#F0FFFF","azure"] \
,["#F0FFFF","azure1"] \
,["#F2F2F2","gray95"] \
,["#F4A460","SandyBrown"] \
,["#F5DEB3","wheat"] \
,["#F5F5DC","beige"] \
,["#F5F5F5","WhiteSmoke"] \
,["#F5F5F5","gray96"] \
,["#F5FFFA","MintCream"] \
,["#F7F7F7","gray97"] \
,["#F8F8FF","GhostWhite"] \
,["#FA8072","salmon"] \
,["#FAEBD7","AntiqueWhite"] \
,["#FAF0E6","linen"] \
,["#FAFAD2","LightGoldenrodYellow"] \
,["#FAFAFA","gray98"] \
,["#FCFCFC","gray99"] \
,["#FDF5E6","OldLace"] \
,["#FF0000","red"] \
,["#FF0000","red1"] \
,["#FF00FF","magenta1"] \
,["#FF00FF","fuchsia"] \
,["#FF00FF","magenta"] \
,["#FF1493","DeepPink"] \
,["#FF1493","DeepPink1"] \
,["#FF3030","firebrick1"] \
,["#FF34B3","maroon1"] \
,["#FF3E96","VioletRed1"] \
,["#FF4040","brown1"] \
,["#FF4500","OrangeRed"] \
,["#FF4500","OrangeRed1"] \
,["#FF6347","tomato"] \
,["#FF6347","tomato1"] \
,["#FF69B4","HotPink"] \
,["#FF6A6A","IndianRed1"] \
,["#FF6EB4","HotPink1"] \
,["#FF7256","coral1"] \
,["#FF7F00","DarkOrange1"] \
,["#FF7F24","chocolate1"] \
,["#FF7F50","coral"] \
,["#FF8247","sienna1"] \
,["#FF82AB","PaleVioletRed1"] \
,["#FF83FA","orchid1"] \
,["#FF8C00","DarkOrange"] \
,["#FF8C69","salmon1"] \
,["#FFA07A","LightSalmon"] \
,["#FFA07A","LightSalmon1"] \
,["#FFA500","orange"] \
,["#FFA500","orange1"] \
,["#FFA54F","tan1"] \
,["#FFAEB9","LightPink1"] \
,["#FFB5C5","pink1"] \
,["#FFB6C1","LightPink"] \
,["#FFB90F","DarkGoldenrod1"] \
,["#FFBBFF","plum1"] \
,["#FFC0CB","pink"] \
,["#FFC125","goldenrod1"] \
,["#FFC1C1","RosyBrown1"] \
,["#FFD39B","burlywood1"] \
,["#FFD700","gold"] \
,["#FFD700","gold1"] \
,["#FFDAB9","PeachPuff"] \
,["#FFDAB9","PeachPuff1"] \
,["#FFDEAD","NavajoWhite"] \
,["#FFDEAD","NavajoWhite1"] \
,["#FFE1FF","thistle1"] \
,["#FFE4B5","moccasin"] \
,["#FFE4C4","bisque"] \
,["#FFE4C4","bisque1"] \
,["#FFE4E1","MistyRose"] \
,["#FFE4E1","MistyRose1"] \
,["#FFE7BA","wheat1"] \
,["#FFEBCD","BlanchedAlmond"] \
,["#FFEC8B","LightGoldenrod1"] \
,["#FFEFD5","PapayaWhip"] \
,["#FFEFDB","AntiqueWhite1"] \
,["#FFF0F5","LavenderBlush"] \
,["#FFF0F5","LavenderBlush1"] \
,["#FFF5EE","seashell"] \
,["#FFF5EE","seashell1"] \
,["#FFF68F","khaki1"] \
,["#FFF8DC","cornsilk"] \
,["#FFF8DC","cornsilk1"] \
,["#FFFACD","LemonChiffon"] \
,["#FFFACD","LemonChiffon1"] \
,["#FFFAF0","FloralWhite"] \
,["#FFFAFA","snow"] \
,["#FFFAFA","snow1"] \
,["#FFFF00","yellow"] \
,["#FFFF00","yellow1"] \
,["#FFFFE0","LightYellow"] \
,["#FFFFE0","LightYellow1"] \
,["#FFFFF0","ivory"] \
,["#FFFFF0","ivory1"] \
,["#FFFFFF","white"] \
,["#FFFFFF","gray100"] \
])
# RGB= np.delete(A, np.s_[3:5], axis=1) # remove columns 3 to 5.
pt = [154,215,50] # <-- the point to find
print (A[spatial.KDTree(A).query(pt)[1]]) # <-- the nearest point
|
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# aiohttp documentation build configuration file, created by
# sphinx-quickstart on Wed Mar 5 12:35:35 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import codecs
import re
_docs_path = os.path.dirname(__file__)
_version_path = os.path.abspath(os.path.join(_docs_path,
'..', 'aiohttp', '__init__.py'))
with codecs.open(_version_path, 'r', 'latin1') as fp:
try:
_version_info = re.search(r"^__version__ = '"
r"(?P<major>\d+)"
r"\.(?P<minor>\d+)"
r"\.(?P<patch>\d+)"
r"(?P<tag>.*)?'$",
fp.read(), re.M).groupdict()
except IndexError:
raise RuntimeError('Unable to determine version.')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
# import alabaster
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'alabaster',
'sphinxcontrib.asyncio',
'sphinxcontrib.newsfeed',
]
try:
import sphinxcontrib.spelling # noqa
extensions.append('sphinxcontrib.spelling')
except ImportError:
pass
intersphinx_mapping = {
'python': ('http://docs.python.org/3', None),
'multidict':
('https://multidict.readthedocs.io/en/stable/', None),
'yarl':
('https://yarl.readthedocs.io/en/stable/', None),
'aiohttpjinja2':
('https://aiohttp-jinja2.readthedocs.io/en/stable/', None),
'aiohttpsession':
('https://aiohttp-session.readthedocs.io/en/stable/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'aiohttp'
copyright = '2013-2017, Aiohttp contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '{major}.{minor}'.format(**_version_info)
# The full version, including alpha/beta/rc tags.
release = '{major}.{minor}.{patch}-{tag}'.format(**_version_info)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# The default language to highlight source code in.
highlight_language = 'python3'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo': 'aiohttp-icon-128x128.png',
'description': 'http client/server for asyncio',
'github_user': 'aio-libs',
'github_repo': 'aiohttp',
'github_button': True,
'github_type': 'star',
'github_banner': True,
'travis_button': True,
'codecov_button': True,
'pre_bg': '#FFF6E5',
'note_bg': '#E5ECD1',
'note_border': '#BFCF8C',
'body_text': '#482C0A',
'sidebar_text': '#49443E',
'sidebar_header': '#4B4032',
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = [alabaster.get_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = 'aiohttp-icon.svg'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'aiohttp-icon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': [
'about.html', 'navigation.html', 'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'aiohttpdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'aiohttp.tex', 'aiohttp Documentation',
'aiohttp contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'aiohttp', 'aiohttp Documentation',
['aiohttp'], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'aiohttp', 'aiohttp Documentation',
'Aiohttp contributors', 'aiohttp', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
disqus_shortname = 'aiohttp'
|
|
# Copyright 2014 Uri Laserson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import subprocess
from ulutil import seqtools
class ExonerateCommand(object):
"""Build command for exonerate"""
options_list = [
'query',
'target',
'querytype',
'targettype',
'querychunkid',
'querychunktotal',
'targetchunkid',
'targetchunktotal',
'verbose',
'exhaustive',
'bigseq',
'forcescan',
'saturatethreshold',
'customserver',
'fastasuffix',
'model',
'score',
'percent',
'showalignment',
'showsugar',
'showcigar',
'showvulgar',
'showquerygff',
'showtargetgff',
# 'ryo', NOTE: this is left out as it requires special handling
'bestn',
'subopt',
'gappedextension',
'refine',
'refineboundary',
'dpmemory',
'compiled',
'terminalrangeint',
'terminalrangeext',
'joinrangeint',
'joinrangeext',
'spanrangeint',
'spanrangeext',
'extensionthreshold',
'singlepass',
'joinfilter',
'annotation',
'softmaskquery',
'softmasktarget',
'dnasubmat',
'proteinsubmat',
'fsmmemory',
'forcefsm',
'wordjump',
'gapopen',
'gapextend',
'codongapopen',
'codongapextend',
'minner',
'maxner',
'neropen',
'minintron',
'maxintron',
'intronpenalty',
'frameshift',
'useaatla',
'geneticcode',
'hspfilter',
'useworddropoff',
'seedrepeat',
'dnawordlen',
'proteinwordlen',
'codonnwordlen',
'dnahspdropoff',
'proteinhspdropoff',
'codonhspdropoff',
'dnahspthreshold',
'proteinhspthreshold',
'codonhspthreshold',
'dnawordlimit',
'proteinwordlimit',
'codonwordlimit',
'geneseed',
'geneseedrepeat',
'alignmentwidth',
'forwardcoordinates',
'quality',
'splice3',
'splice5',
'forcegtag']
def __init__(self, *args, **kw):
# register preset handlers
self.register = {
'affine:local' : self.preset_affinelocal,
'affine:global' : self.preset_affineglobal,
'findend' : self.preset_findend,
'parsable' : self.preset_parsable,
'pretty' : self.preset_pretty,
'bestonly' : self.preset_bestonly,
'ungapped' : self.preset_ungapped
}
# these attributes must be handled special, and set manually at the start
self.options = {}
self.ryo = None
# first execute any registered functions
for a in args:
self.register[a]()
# check for ryo output and save it (needs special handling)
if kw.has_key('ryo'): self.ryo = kw.pop('ryo')
# then set all the manual options supplied
self.options.update(kw)
# set standard options in case they weren't given initially
# they can still be overwritten
self.softset_default()
# return self
def __setattr__(self,name,value):
"""Allows setting of options by acting on object attributes.
For example:
cmd = ExonerateCommand()
cmd.querytype = 'dna'
Catches the special cases of ryo and options.
ryo needs to be set manually
options shouldn't be overwritten, but lets you...
"""
if name in ExonerateCommand.options_list:
self.options[name] = value
else:
object.__setattr__(self,name,value)
def __getattr__(self,name):
if name in ExonerateCommand.options_list:
return self.options[name]
else:
raise AttributeError
def build_command(self):
self.cmd = 'exonerate'
for (option,value) in self.options.iteritems():
self.cmd += ' --%s %s' % (option,value)
# handle ryo output using raw string
if self.ryo is not None:
self.cmd += r' --%s "%s"' % ('ryo',self.ryo)
return self.cmd
def softset_default(self):
"""Conditionally override options to a reasonable default."""
if not self.options.has_key('model'):
self.model = 'affine:local'
if not self.options.has_key('querytype'):
self.querytype = 'dna'
if not self.options.has_key('targettype'):
self.targettype = 'dna'
def hardset_preset(self,*args):
for a in args:
register[a](self)
def preset_affinelocal(self):
self.model = 'affine:local'
def preset_affineglobal(self):
self.model = 'affine:global'
self.exhaustive = True
def preset_ungapped(self):
self.model = 'ungapped'
self.exhaustive = True
def preset_findend(self):
self.model = 'affine:overlap'
self.exhaustive = True
def preset_parsable(self):
self.verbose = 0
# self.showalignment = False
# self.showvulgar = False
self.ryo = r'aln_summary: %qi %ql %qab %qae %qS %ti %tl %tab %tae %tS %s %et %ei %pi\n'
def preset_pretty(self):
self.showalignment = True
self.showvulgar = True
self.showsugar = True
def preset_bestonly(self):
self.bestn = 1
def run_exonerate(cmd,query=None,target=None):
"""Run exonerate using given ExonerateCommand object
query and target must refer to files
"""
# check query and target are set properly
if query is not None: cmd.query = query
if target is not None: cmd.target = target
try:
cmd.query
cmd.target
except KeyError:
print "cmd.query or cmd.target is not set"
raise
# submit process
p = subprocess.Popen(cmd.build_command(),shell=True,stdout=subprocess.PIPE)
aln = p.stdout.read()
p.wait()
return aln
def run_exonerate2(cmd,query,target,queryname='query',targetname='target',debug=False):
"""Perform pairwise alignment using cmd ExonerateCommand object
query and target are sequences
"""
# TODO: see if this can be implemented without writing to temporary files
# write seqs to tempfiles
(fdq,queryfile) = tempfile.mkstemp()
(fdt,targetfile) = tempfile.mkstemp()
iopq = open(queryfile,'w')
iopt = open(targetfile,'w')
print >>iopq, ">%s\n%s\n" % (queryname,query)
print >>iopt, ">%s\n%s\n" % (targetname,target)
iopq.close()
iopt.close()
os.close(fdq)
os.close(fdt)
try:
# perform alignment
cmd.query = queryfile
cmd.target = targetfile
aln = run_exonerate(cmd)
finally:
# clean up
os.remove(queryfile)
os.remove(targetfile)
if debug: print aln
return aln
def iter_alnsummary(rawaln):
"""Return alnsummary line from rawaln."""
for line in rawaln.split('\n'):
if line.startswith('aln_summary'):
yield line
def extract_alnsummary(rawaln):
"""Return alnsummary line from rawaln."""
return iter_alnsummary(rawaln).next()
def iter_vulgar(rawaln):
"""Return vulgar line from rawaln."""
for line in rawaln.split('\n'):
if line.startswith('vulgar'):
yield line
def extract_vulgar(rawaln):
"""Return vulgar line from rawaln."""
return iter_vulgar(rawaln).next()
def iter_alnsummary_vulgar(rawaln):
for (alnsummary,vulgar_commands) in zip(iter_alnsummary(rawaln),iter_vulgar(rawaln)):
yield (alnsummary,vulgar_commands)
def parse_alnsummary(rawalnsummary):
"""Parse alnsummary line from exonerate using 'parsable' preset.
Takes an alnsummary line from an alignment that was generated from an ryo
'parsable' preset.
"""
# 'aln_summary: %qi %ql %qab %qae %qS %ti %tl %tab %tae %tS %s %et %ei %pi\n'
data = rawalnsummary.split()
aln = {}
aln['query_id'] = data[1]
aln['query_len'] = int(data[2])
aln['query_aln_begin'] = int(data[3])
aln['query_aln_end'] = int(data[4])
aln['query_strand'] = data[5]
aln['target_id'] = data[6]
aln['target_len'] = int(data[7])
aln['target_aln_begin'] = int(data[8])
aln['target_aln_end'] = int(data[9])
aln['target_strand'] = data[10]
aln['score'] = int(data[11])
aln['equiv_total'] = int(data[12])
aln['equiv_id'] = int(data[13])
aln['percent_id'] = float(data[14])
return aln
def parse_aln(rawaln):
"""Parse raw alignment from exonerate using 'parsable' preset.
Takes a raw alignment and searches for an alnsummary line (generated from
an ryo 'parsable' preset) and parses it.
"""
for line in rawaln.split('\n'):
if line.strip().startswith('aln_summary'):
rawalnsummary = line.strip()
break
else:
raise ValueError, "aln_summary line not found in raw aln:\n%s" % rawaln
return parse_alnsummary(rawalnsummary)
def parse_vulgar(rawvulgar):
"""Parse vulgar line
Takes vulgar line from alignment output
returns only the non-sugar part that allows you to build the aln
"""
data = rawvulgar.split()[10:]
cmds = []
for i in range(0,len(data),3):
cmds.append( (data[0],int(data[1]),int(data[2])) )
return cmds
def build_aln(alnsummary,vulgar_commands,queryseq,targetseq):
"""Build full alignment from exonerate using 'parsable' preset and vulgar output"""
queryname = alnsummary['query_id']
targetname = alnsummary['target_id']
# process strands. the position vars below will always progress
# from 0->len(seq), so the seqs must be revcomped accordingly
queryposition = alnsummary['query_aln_begin']
targetposition = alnsummary['target_aln_begin']
if alnsummary['query_strand'] == '-':
queryseq = seqtools.reverse_complement(queryseq)
queryposition = len(queryseq) - queryposition
if alnsummary['target_strand'] == '-':
targetseq = seqtools.reverse_complement(targetseq)
targetposition = len(targetseq) - targetposition
pad = abs(queryposition - targetposition)
# build alignment
queryaln = ''
targetaln = ''
# process necessary padding
if queryposition > targetposition:
targetaln = ' ' * pad
else:
queryaln = ' ' * pad
# add pre-aln sequence
queryaln += queryseq[0:queryposition]
targetaln += targetseq[0:targetposition]
# walk through alignment (from vulgar output)
for cmd in vulgar_commands:
if cmd[0] == 'M':
assert(cmd[1]==cmd[2])
queryaln += queryseq[queryposition:queryposition+cmd[1]]
targetaln += targetseq[targetposition:targetposition+cmd[2]]
queryposition += cmd[1]
targetposition += cmd[2]
elif cmd[0] == 'G':
assert( (cmd[1]==0) != (cmd[1]==0) ) # xor
if cmd[1] == 0:
queryaddendum = '-' * cmd[2]
targetaddendum = targetseq[targetposition:targetposition+cmd[2]]
elif cmd[2] == 0:
queryaddendum = queryseq[queryposition:queryposition+cmd[1]]
targetaddendum = '-' * cmd[1]
queryaln += queryaddendum
targetaln += targetaddendum
queryposition += cmd[1]
targetposition += cmd[2]
else:
raise ValueError, "I do not understand the vulgar command %s" % cmd[0]
# add any post-aln sequence
queryaln += queryseq[queryposition:]
targetaln += targetseq[targetposition:]
return (queryaln,targetaln)
|
|
import curses
from MediaManager.util.numbers import humanize
class SyncList(dict):
def __init__(self, filepath):
self.__filepath = filepath
def load(self):
try:
for line in open(self.__filepath, 'rt'):
line = line.split('#', 1)[0]
line = line.rstrip()
if not line:
continue
cmd, title = line.split(' ', 1)
self[title] = cmd
except IOError:
pass
return self
def save(self):
with open(self.__filepath, 'wt') as file:
for name in sorted(self):
line = '%s %s\n' % (self[name], name)
file.write(line)
class SyncSelect(object):
def __init__(self, from_lib, to_lib, sync_list):
self.__sync_list = sync_list
self.__src = from_lib
self.__dst = to_lib
self.__dst_size = to_lib.available_space
def show_selection_window(self):
# Ratify actions
for name, action in [(n, a) for n, a in self.__sync_list.items()]:
if action == '+':
if not self.__src.has_album(name):
continue
if self.__dst.has_album(name):
del self.__sync_list[name]
elif action == '-':
if not self.__dst.has_album(name):
del self.__sync_list[name]
else:
continue
def get_color(name):
if self.__dst.has_album(name):
if name in self.__sync_list:
if self.__sync_list[name] == '-':
return 22
return 4
if self.__src.has_album(name):
if name in self.__sync_list:
if self.__sync_list[name] == '+':
return 21
return 1
return 3
src_names = [a.name for a in self.__src]
dst_names = [a.name for a in self.__dst]
action_names = [n for n in self.__sync_list]
names = set(src_names)
names.update(dst_names)
names.update(action_names)
items = []
for name in sorted(names):
items.append((get_color(name), name))
items.sort(key=lambda a: a[1].lower())
try:
w = None
color_map = {
}
pair_map = {
1: (curses.COLOR_WHITE, curses.COLOR_BLACK),
2: (curses.COLOR_BLUE, curses.COLOR_WHITE),
3: (curses.COLOR_RED, curses.COLOR_BLACK),
4: (curses.COLOR_CYAN, curses.COLOR_BLACK),
#10: (curses.COLOR_BLACK, curses.COLOR_WHITE),
#11: (curses.COLOR_GREEN, curses.COLOR_WHITE),
#12: (curses.COLOR_BLUE, curses.COLOR_WHITE),
21: (curses.COLOR_BLACK, curses.COLOR_GREEN),
22: (curses.COLOR_BLACK, curses.COLOR_RED),
}
w = Window(color_map, pair_map)
max_y, max_x = w.getmaxyx()
w.refresh()
title = TextBox((0, 0), max_x, ' * Album Selection * (W accept, ^C to exit) ', 2, 2)
t = ToggleList((2, 0, max_y - 3, max_x), items, 1)
match = TextBox((0, 50), 25, '<match>', 2, 11)
dst = TextBox((1, 0), max_x, ' Sync to: %s' % self.__dst.base_dir, 2, 2)
space = TextBox((max_y - 1, 0), max_x, '', 2, 2)
info = TextBox((max_y - 2, 0), max_x, '', 2, 2)
w.refresh()
def set_space():
used = 0
for album in self.__dst:
used += album.total_size
add = 0
remove = 0
for name, cmd in self.__sync_list.items():
if cmd == '+':
if self.__src.has_album(name):
add += self.__src.get_album(name).total_size
elif cmd == '-':
remove += self.__dst.get_album(name).total_size
avail = humanize(self.__dst_size)
final_usage = humanize(used + add - remove)
write = humanize(add)
usage = " Free: %s Usage: %s Write: %s" % (avail, final_usage, write)
space.set_value(usage)
def set_info():
line = t.current_line
name = items[line][1]
def trunc(s):
overflow = len(s) - (max_x - 6)
if overflow > 0:
s = '..%s' % s[overflow + 3:]
return s
try:
msg = ' Src: %s ' % trunc(self.__src.get_album(name)._AlbumInfo__base_path)
except KeyError:
msg = '???'
try:
info.set_value(msg)
except:
info.set_value('')
def state_toggle(line):
name = items[line][1]
if self.__dst.has_album(name):
if name in self.__sync_list:
del self.__sync_list[name]
self.__dst_size -= self.__dst.get_album(name).total_size
else:
self.__sync_list[name] = '-'
self.__dst_size += self.__dst.get_album(name).total_size
elif self.__src.has_album(name):
if name in self.__sync_list:
del self.__sync_list[name]
self.__dst_size += self.__src.get_album(name).total_size
else:
self.__sync_list[name] = '+'
self.__dst_size -= self.__src.get_album(name).total_size
else:
pass
set_space()
t.set_color(line, get_color(name))
def state_add(line):
name = items[line][1]
if self.__dst.has_album(name):
if name in self.__sync_list:
state_toggle(line)
elif self.__src.has_album(name):
if name not in self.__sync_list:
state_toggle(line)
def state_remove(line):
name = items[line][1]
if self.__dst.has_album(name):
if name not in self.__sync_list:
state_toggle(line)
elif self.__src.has_album(name):
if name in self.__sync_list:
state_toggle(line)
def getch():
char = w.getch()
# with open('log', 'at') as fh:
# fh.write('%s\n' % repr(char))
if char == curses.KEY_PPAGE: t.key_pgup()
elif char == curses.KEY_NPAGE: t.key_pgdn()
elif char == curses.KEY_LEFT: t.key_pgup()
elif char == curses.KEY_RIGHT: t.key_pgdn()
elif char == curses.KEY_UP: t.key_up()
elif char == curses.KEY_DOWN: t.key_dn()
elif char == ord(' '):
state_toggle(t.current_line)
elif char == ord('s'):
state_toggle(t.current_line)
t.key_dn()
elif char == ord('+'):
state_add(t.current_line)
t.key_dn()
elif char == ord('-'):
state_remove(t.current_line)
t.key_dn()
elif char == ord('W'):
return 'W'
else:
#raise Exception(repr(char))
return char
set_space()
set_info()
while True:
try:
ch = getch()
except KeyboardInterrupt:
return False
if ch == 27:
return False
if ch == 'W':
return True
if ch is not None:
match.putch(ch)
set_info()
finally:
if w:
w.close()
return False
class ToggleList(object):
__pad = None
def __init__(self, xxx_todo_changeme, items, color=None):
(t, l, b, r) = xxx_todo_changeme
self.__tlbr = (t, l, b, r)
self.__items = items
page_size = b - t + 1
width = 0
for item in items:
width = max(width, len(item))
width += 8
width = max(width, r - l)
self.__pager = LinePager(len(items), page_size)
self.__pad = Pad((len(items), width), self.__tlbr, color)
for y, (color, string) in enumerate(self.__items):
self.__draw_line(y, string, color)
#self.__refresh()
self.__set_select()
def __refresh(self):
self.__pad.refresh((self.__pager.page_top, 0))
def __draw_line(self, y, string, color):
self.__pad.addstr((y, 1), "%d %s" % (y, string), color)
def key_up(self):
self.__clear_select()
self.__pager.move_cursor(-1)
self.__set_select()
def key_dn(self):
self.__clear_select()
self.__pager.move_cursor(1)
self.__set_select()
def key_pgup(self):
self.__clear_select()
self.__pager.move_page(-1)
self.__set_select()
def key_pgdn(self):
self.__clear_select()
self.__pager.move_page(1)
self.__set_select()
def set_color(self, y, color):
self.__draw_line(y, self.__items[y][1], color)
self.__refresh()
@property
def current_line(self):
return self.__pager.cursor_line
def __clear_select(self):
self.__pad.addstr((self.__pager.cursor_line, 0), ' ')
def __set_select(self):
self.__pad.addstr((self.__pager.cursor_line, 0), '*')
self.__refresh()
class TextBox(object):
def __init__(self, xxx_todo_changeme1, w, default_string=None, color=None, default_color=None):
(t, l) = xxx_todo_changeme1
self.__width = w
self.__default_string = default_string
self.__color = color
self.__default_color = default_color
assert self.__width > 0
assert t >= 0
assert l >= 0
self.__pad = Pad((1, self.__width), (t, l, t+1, l+self.__width), default_color)
self.__buffer = ''
self.__draw()
self.__refresh()
def __draw(self):
if not self.__buffer:
string = self.__default_string
color = self.__default_color
else:
string = self.__buffer
color = self.__color
string += ' ' * max(0, (self.__width - len(string) - 1))
self.__pad.addstr((0, 0), string, color)
def __refresh(self):
self.__pad.refresh((0, 0))
def putch(self, ch):
#raise Exception(type(ch))
if ch == 127: #curses.KEY_BACKSPACE:
self.__buffer = self.__buffer[:len(self.__buffer)-1]
elif ch >= ord('A') and ch <= ord('z') or ch == ord(' '):
if len(self.__buffer) == self.__width - 1:
return
self.__buffer += chr(ch)
else:
return
self.__draw()
self.__refresh()
def value(self):
return self.__buffer
def set_value(self, string):
self.__buffer = string
self.__draw()
self.__refresh()
class LinePager(object):
def __init__(self, size, page_size):
self.__size = size
self.__page_size = page_size
self.__page_top = 0
self.__cursor_line = 0
def move_cursor(self, distance):
self.__move_cursor_line(distance)
self.__page_track_cursor()
def move_page(self, pages):
distance = pages*self.__page_size
self.__move_page_top(distance)
self.__move_cursor_line(distance)
def __move_cursor_line(self, distance):
self.__cursor_line = min(max(0, self.__cursor_line + distance), self.__size-1)
def __move_page_top(self, distance):
self.__page_top = min(max(0, self.__page_top + distance), self.__size-self.__page_size)
def __page_track_cursor(self):
if self.__cursor_line < self.__page_top:
self.__move_page_top(-(self.__page_top - self.__cursor_line))
elif self.__cursor_line >= self.__page_top + self.__page_size:
self.__move_page_top(self.__cursor_line - self.__page_top - self.__page_size + 1)
@property
def page_top(self):
return self.__page_top
@property
def cursor_line(self):
return self.__cursor_line
class Window(object):
__stdscr = None
def __init__(self, color_map, pair_map):
self.__stdscr = curses.initscr()
self.__stdscr.keypad(1)
curses.noecho()
curses.cbreak()
curses.curs_set(0)
curses.start_color()
for number, (r, b, g) in color_map.items():
curses.init_color(number, r, b, g)
for number, (fg, bg) in pair_map.items():
curses.init_pair(number, fg, bg)
def getmaxyx(self):
return self.__stdscr.getmaxyx()
def close(self):
if not self.__stdscr:
return
self.__stdscr.keypad(0)
curses.nocbreak()
curses.echo()
curses.endwin()
self.__stdscr = None
def getch(self):
return self.__stdscr.getch()
def addstr(self, xxx_todo_changeme2, string, color):
(y, x) = xxx_todo_changeme2
meta = curses.color_pair(color)
self.__stdscr.addstr(y, x, string, meta)
def refresh(self):
self.__stdscr.refresh()
class Pad(object):
__tlbr = None
__pad = None
def __init__(self, xxx_todo_changeme3, xxx_todo_changeme4, color=None):
(h, w) = xxx_todo_changeme3
(t, l, b, r) = xxx_todo_changeme4
self.__h, self.__w = (h, w)
self.__tlbr = (t, l, b, r)
self.__pad = curses.newpad(h, w)
#self.__w.close()
#print dir(self.__pad)
if color:
self.__pad.bkgd(curses.color_pair(color))
def close(self):
if self.__pad:
self.__pad.clear()
self.__pad.erase()
self.__pad = None
def refresh(self, xxx_todo_changeme5):
(y, x) = xxx_todo_changeme5
self.__pad.refresh(y, x, *self.__tlbr)
def addstr(self, xxx_todo_changeme6, string, color=None):
(y, x) = xxx_todo_changeme6
string = string.encode('ascii', 'ignore')
try:
if color:
meta = curses.color_pair(color)
self.__pad.addstr(y, x, string, meta)
else:
self.__pad.addstr(y, x, string)
except curses.error as e:
if y >= self.__h:
raise curses.error('%s: %s' % (e.message, 'Y bounds exceeded'))
if x + len(string) >= self.__w:
raise curses.error('%s: %s' % (e.message, 'X bounds exceeded'))
|
|
"""
A simple JSON REST request abstraction layer that is used by the
``dropbox.client`` and ``dropbox.session`` modules. You shouldn't need to use this.
"""
import io
import socket
import sys
import urllib
try:
import json
except ImportError:
import simplejson as json
try:
import urllib3
except ImportError:
raise ImportError('Dropbox python client requires urllib3.')
SDK_VERSION = "2.0.0"
try:
import pkg_resources
TRUSTED_CERT_FILE = pkg_resources.resource_filename(__name__, 'trusted-certs.crt')
except ImportError:
TRUSTED_CERT_FILE = 'dropbox/trusted-certs.crt'
class RESTResponse(io.IOBase):
"""
Responses to requests can come in the form of ``RESTResponse``. These are
thin wrappers around the socket file descriptor.
:meth:`read()` and :meth:`close()` are implemented.
It is important to call :meth:`close()` to return the connection
back to the connection pool to be reused. If a connection
is not closed by the caller it may leak memory. The object makes a
best-effort attempt upon destruction to call :meth:`close()`,
but it's still best to explicitly call :meth:`close()`.
"""
def __init__(self, resp):
# arg: A urllib3.HTTPResponse object
self.urllib3_response = resp
self.status = resp.status
self.version = resp.version
self.reason = resp.reason
self.strict = resp.strict
self.is_closed = False
def __del__(self):
# Attempt to close when ref-count goes to zero.
self.close()
def __exit__(self, typ, value, traceback):
# Allow this to be used in "with" blocks.
self.close()
# -----------------
# Important methods
# -----------------
def read(self, amt=None):
"""
Read data off the underlying socket.
Parameters
amt
Amount of data to read. Defaults to ``None``, indicating to read
everything.
Returns
Data off the socket. If ``amt`` is not ``None``, at most ``amt`` bytes are returned.
An empty string when the socket has no data.
Raises
``ValueError``
If the ``RESTResponse`` has already been closed.
"""
if self.is_closed:
raise ValueError('Response already closed')
return self.urllib3_response.read(amt)
BLOCKSIZE = 4 * 1024 * 1024 # 4MB at a time just because
def close(self):
"""Closes the underlying socket."""
# Double closing is harmless
if self.is_closed:
return
# Read any remaining crap off the socket before releasing the
# connection. Buffer it just in case it's huge
while self.read(RESTResponse.BLOCKSIZE):
pass
# Mark as closed and release the connection (exactly once)
self.is_closed = True
self.urllib3_response.release_conn()
@property
def closed(self):
return self.is_closed
# ---------------------------------
# Backwards compat for HTTPResponse
# ---------------------------------
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
# Some compat functions showed up recently in urllib3
try:
urllib3.HTTPResponse.flush
urllib3.HTTPResponse.fileno
def fileno(self):
return self.urllib3_response.fileno()
def flush(self):
return self.urllib3_response.flush()
except AttributeError:
pass
def create_connection(address):
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def json_loadb(data):
if sys.version_info >= (3,):
data = data.decode('utf8')
return json.loads(data)
class RESTClientObject(object):
def __init__(self, max_reusable_connections=8, mock_urlopen=None):
"""
Parameters
max_reusable_connections
max connections to keep alive in the pool
mock_urlopen
an optional alternate urlopen function for testing
This class uses ``urllib3`` to maintain a pool of connections. We attempt
to grab an existing idle connection from the pool, otherwise we spin
up a new connection. Once a connection is closed, it is reinserted
into the pool (unless the pool is full).
SSL settings:
- Certificates validated using Dropbox-approved trusted root certs
- TLS v1.0 (newer TLS versions are not supported by urllib3)
- Default ciphersuites. Choosing ciphersuites is not supported by urllib3
- Hostname verification is provided by urllib3
"""
self.mock_urlopen = mock_urlopen
self.pool_manager = urllib3.PoolManager(
num_pools=4, # only a handful of hosts. api.dropbox.com, api-content.dropbox.com
maxsize=max_reusable_connections,
block=False,
timeout=60.0, # long enough so datastores await doesn't get interrupted
cert_reqs=2,
ca_certs=TRUSTED_CERT_FILE,
ssl_version=3,
)
def request(self, method, url, post_params=None, body=None, headers=None, raw_response=False):
"""Performs a REST request. See :meth:`RESTClient.request()` for detailed description."""
post_params = post_params or {}
headers = headers or {}
headers['User-Agent'] = 'OfficialDropboxPythonSDK/' + SDK_VERSION
if post_params:
if body:
raise ValueError("body parameter cannot be used with post_params parameter")
body = urllib.urlencode(post_params)
headers["Content-type"] = "application/x-www-form-urlencoded"
# Handle StringIO instances, because urllib3 doesn't.
if hasattr(body, 'getvalue'):
body = str(body.getvalue())
headers["Content-Length"] = len(body)
# Reject any headers containing newlines; the error from the server isn't pretty.
for key, value in headers.items():
if isinstance(value, basestring) and '\n' in value:
raise ValueError("headers should not contain newlines (%s: %s)" %
(key, value))
try:
# Grab a connection from the pool to make the request.
# We return it to the pool when caller close() the response
urlopen = self.mock_urlopen if self.mock_urlopen else self.pool_manager.urlopen
r = urlopen(
method=method,
url=url,
body=body,
headers=headers,
preload_content=False
)
r = RESTResponse(r) # wrap up the urllib3 response before proceeding
except socket.error as e:
raise RESTSocketError(url, e)
except urllib3.exceptions.SSLError as e:
raise RESTSocketError(url, "SSL certificate error: %s" % e)
if r.status != 200:
raise ErrorResponse(r, r.read())
return self.process_response(r, raw_response)
def process_response(self, r, raw_response):
if raw_response:
return r
else:
s = r.read()
try:
resp = json_loadb(s)
except ValueError:
raise ErrorResponse(r, s)
r.close()
return resp
def GET(self, url, headers=None, raw_response=False):
assert type(raw_response) == bool
return self.request("GET", url, headers=headers, raw_response=raw_response)
def POST(self, url, params=None, headers=None, raw_response=False):
assert type(raw_response) == bool
if params is None:
params = {}
return self.request("POST", url,
post_params=params, headers=headers, raw_response=raw_response)
def PUT(self, url, body, headers=None, raw_response=False):
assert type(raw_response) == bool
return self.request("PUT", url, body=body, headers=headers, raw_response=raw_response)
class RESTClient(object):
"""
A class with all static methods to perform JSON REST requests that is used internally
by the Dropbox Client API. It provides just enough gear to make requests
and get responses as JSON data (when applicable). All requests happen over SSL.
"""
IMPL = RESTClientObject()
@classmethod
def request(cls, *n, **kw):
"""Perform a REST request and parse the response.
Parameters
method
An HTTP method (e.g. ``'GET'`` or ``'POST'``).
url
The URL to make a request to.
post_params
A dictionary of parameters to put in the body of the request.
This option may not be used if the body parameter is given.
body
The body of the request. Typically, this value will be a string.
It may also be a file-like object. The body
parameter may not be used with the post_params parameter.
headers
A dictionary of headers to send with the request.
raw_response
Whether to return a :class:`RESTResponse` object. Default ``False``.
It's best enabled for requests that return large amounts of data that you
would want to ``.read()`` incrementally rather than loading into memory. Also
use this for calls where you need to read metadata like status or headers,
or if the body is not JSON.
Returns
The JSON-decoded data from the server, unless ``raw_response`` is
set, in which case a :class:`RESTResponse` object is returned instead.
Raises
:class:`ErrorResponse`
The returned HTTP status is not 200, or the body was
not parsed from JSON successfully.
:class:`RESTSocketError`
A ``socket.error`` was raised while contacting Dropbox.
"""
return cls.IMPL.request(*n, **kw)
@classmethod
def GET(cls, *n, **kw):
"""Perform a GET request using :meth:`RESTClient.request()`."""
return cls.IMPL.GET(*n, **kw)
@classmethod
def POST(cls, *n, **kw):
"""Perform a POST request using :meth:`RESTClient.request()`."""
return cls.IMPL.POST(*n, **kw)
@classmethod
def PUT(cls, *n, **kw):
"""Perform a PUT request using :meth:`RESTClient.request()`."""
return cls.IMPL.PUT(*n, **kw)
class RESTSocketError(socket.error):
"""A light wrapper for ``socket.error`` that adds some more information."""
def __init__(self, host, e):
msg = "Error connecting to \"%s\": %s" % (host, str(e))
socket.error.__init__(self, msg)
# Dummy class for docstrings, see doco.py.
class _ErrorResponse__doc__(Exception):
"""Exception raised when :class:`DropboxClient` exeriences a problem.
For example, this is raised when the server returns an unexpected
non-200 HTTP response.
"""
_status__doc__ = "HTTP response status (an int)."
_reason__doc__ = "HTTP response reason (a string)."
_body__doc__ = "HTTP response body (string or JSON dict)."
_headers__doc__ = "HTTP response headers (a list of (header, value) tuples)."
_error_msg__doc__ = "Error message for developer (optional)."
_user_error_msg__doc__ = "Error message for end user (optional)."
class ErrorResponse(Exception):
"""
Raised by :meth:`RESTClient.request()` for requests that:
- Return a non-200 HTTP response, or
- Have a non-JSON response body, or
- Have a malformed/missing header in the response.
Most errors that Dropbox returns will have an error field that is unpacked and
placed on the ErrorResponse exception. In some situations, a user_error field
will also come back. Messages under user_error are worth showing to an end-user
of your app, while other errors are likely only useful for you as the developer.
"""
def __init__(self, http_resp, body):
"""
Parameters
http_resp
The :class:`RESTResponse` which errored
body
Body of the :class:`RESTResponse`.
The reason we can't simply call ``http_resp.read()`` to
get the body, is that ``read()`` is not idempotent.
Since it can't be called more than once,
we have to pass the string body in separately
"""
self.status = http_resp.status
self.reason = http_resp.reason
self.body = body
self.headers = http_resp.getheaders()
http_resp.close() # won't need this connection anymore
try:
self.body = json_loadb(self.body)
self.error_msg = self.body.get('error')
self.user_error_msg = self.body.get('user_error')
except ValueError:
self.error_msg = None
self.user_error_msg = None
def __str__(self):
if self.user_error_msg and self.user_error_msg != self.error_msg:
# one is translated and the other is English
msg = "%r (%r)" % (self.user_error_msg, self.error_msg)
elif self.error_msg:
msg = repr(self.error_msg)
elif not self.body:
msg = repr(self.reason)
else:
msg = "Error parsing response body or headers: " +\
"Body - %.100r Headers - %r" % (self.body, self.headers)
return "[%d] %s" % (self.status, msg)
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'CopyBuilding.room'
db.delete_column(u'build_copybuilding', 'room_id')
# Deleting field 'CopyBuilding.hallway'
db.delete_column(u'build_copybuilding', 'hallway_id')
# Deleting field 'CopyBuilding.wc'
db.delete_column(u'build_copybuilding', 'wc_id')
# Deleting field 'CopyBuilding.kitchen'
db.delete_column(u'build_copybuilding', 'kitchen_id')
# Deleting field 'Building.room'
db.delete_column(u'build_building', 'room_id')
# Deleting field 'Building.hallway'
db.delete_column(u'build_building', 'hallway_id')
# Deleting field 'Building.wc'
db.delete_column(u'build_building', 'wc_id')
# Deleting field 'Building.kitchen'
db.delete_column(u'build_building', 'kitchen_id')
# Deleting field 'Ground.room'
db.delete_column(u'build_ground', 'room_id')
# Deleting field 'Ground.hallway'
db.delete_column(u'build_ground', 'hallway_id')
# Deleting field 'Ground.wc'
db.delete_column(u'build_ground', 'wc_id')
# Deleting field 'Ground.kitchen'
db.delete_column(u'build_ground', 'kitchen_id')
def backwards(self, orm):
# Adding field 'CopyBuilding.room'
db.add_column(u'build_copybuilding', 'room',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Room'], null=True, blank=True),
keep_default=False)
# Adding field 'CopyBuilding.hallway'
db.add_column(u'build_copybuilding', 'hallway',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Hallway'], null=True, blank=True),
keep_default=False)
# Adding field 'CopyBuilding.wc'
db.add_column(u'build_copybuilding', 'wc',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.WC'], null=True, blank=True),
keep_default=False)
# Adding field 'CopyBuilding.kitchen'
db.add_column(u'build_copybuilding', 'kitchen',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Kitchen'], null=True, blank=True),
keep_default=False)
# Adding field 'Building.room'
db.add_column(u'build_building', 'room',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Room'], null=True, blank=True),
keep_default=False)
# Adding field 'Building.hallway'
db.add_column(u'build_building', 'hallway',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Hallway'], null=True, blank=True),
keep_default=False)
# Adding field 'Building.wc'
db.add_column(u'build_building', 'wc',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.WC'], null=True, blank=True),
keep_default=False)
# Adding field 'Building.kitchen'
db.add_column(u'build_building', 'kitchen',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Kitchen'], null=True, blank=True),
keep_default=False)
# Adding field 'Ground.room'
db.add_column(u'build_ground', 'room',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Room'], null=True, blank=True),
keep_default=False)
# Adding field 'Ground.hallway'
db.add_column(u'build_ground', 'hallway',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Hallway'], null=True, blank=True),
keep_default=False)
# Adding field 'Ground.wc'
db.add_column(u'build_ground', 'wc',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.WC'], null=True, blank=True),
keep_default=False)
# Adding field 'Ground.kitchen'
db.add_column(u'build_ground', 'kitchen',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Kitchen'], null=True, blank=True),
keep_default=False)
models = {
'build.building': {
'Meta': {'object_name': 'Building'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'approve_status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'build_state': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'build_year': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'cad_num': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2048', 'db_index': 'True'}),
'cad_passport': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cad_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flat_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'mo_fond_doc_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mo_fond_doc_num': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ownership_doc_num': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ownership_year': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'payment_perspective': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'planing_floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'readiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'build.contract': {
'Meta': {'object_name': 'Contract'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'creation_form': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'blank': 'True'}),
'docs': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.ContractDocuments']", 'null': 'True', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'has_trouble_docs': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
'num': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'period_of_payment': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'summ_mo_money': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'summ_without_mo_money': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'summa': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'summa_fed': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'summa_reg': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'build.contractdocuments': {
'Meta': {'object_name': 'ContractDocuments'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mun_contracts': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'build.copybuilding': {
'Meta': {'object_name': 'CopyBuilding'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'approve_status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'build_state': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'build_year': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'cad_num': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'cad_passport': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cad_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flat_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']", 'null': 'True', 'blank': 'True'}),
'mo_fond_doc_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mo_fond_doc_num': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ownership_doc_num': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ownership_year': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'payment_perspective': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'planing_floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'readiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'build.copycontract': {
'Meta': {'object_name': 'CopyContract'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'creation_form': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'blank': 'True'}),
'docs': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.ContractDocuments']", 'null': 'True', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'has_trouble_docs': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
'num': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'period_of_payment': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'summ_mo_money': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'summ_without_mo_money': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'summa': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'summa_fed': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'summa_reg': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'build.ground': {
'Meta': {'object_name': 'Ground'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'approve_status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'build_state': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'build_year': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'cad_num': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2048', 'db_index': 'True'}),
'cad_passport': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cad_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'finish_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'mo_fond_doc_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mo_fond_doc_num': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ownership_doc_num': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ownership_year': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'payment_perspective': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'planing_floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'readiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.developer': {
'Meta': {'object_name': 'Developer'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'boss_position': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'face_list': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'})
},
'mo.mo': {
'Meta': {'object_name': 'MO'},
'common_amount': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_economy': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_fed_amount': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_percentage': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_reg_amount': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_spent': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'creation_form': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'has_trouble': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'home_fed_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'home_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'home_reg_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2048'}),
'planing_home_orphans': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['build']
|
|
import sys
import numpy as np
from PyQt4.QtGui import QIntValidator, QDoubleValidator, QApplication, QSizePolicy
from PyMca5.PyMcaIO import specfilewrapper as specfile
from orangewidget import gui
from orangewidget.settings import Setting
from oasys.widgets import widget
try:
from orangecontrib.xoppy.util.xoppy_calc import xoppy_doc
except ImportError:
print("Error importing: xoppy_doc")
raise
try:
from orangecontrib.xoppy.util.xoppy_calc import xoppy_calc_xwiggler
except ImportError:
print("compute pressed.")
print("Error importing: xoppy_calc_xwiggler")
raise
class OWxwiggler(widget.OWWidget):
name = "xwiggler"
id = "orange.widgets.dataxwiggler"
description = "xoppy application to compute..."
icon = "icons/xoppy_xwiggler.png"
author = "create_widget.py"
maintainer_email = "[email protected]"
priority = 10
category = ""
keywords = ["xoppy", "xwiggler"]
outputs = [{"name": "xoppy_data",
"type": np.ndarray,
"doc": ""},
{"name": "xoppy_specfile",
"type": str,
"doc": ""}]
#inputs = [{"name": "Name",
# "type": type,
# "handler": None,
# "doc": ""}]
want_main_area = False
FIELD = Setting(0)
NPERIODS = Setting(12)
ULAMBDA = Setting(0.125)
K = Setting(14.0)
ENERGY = Setting(6.04)
PHOT_ENERGY_MIN = Setting(100.0)
PHOT_ENERGY_MAX = Setting(100100.0)
NPOINTS = Setting(100)
LOGPLOT = Setting(1)
NTRAJPOINTS = Setting(101)
CURRENT = Setting(200.0)
FILE = Setting("?")
def __init__(self):
super().__init__()
box0 = gui.widgetBox(self.controlArea, " ",orientation="horizontal")
#widget buttons: compute, set defaults, help
gui.button(box0, self, "Compute", callback=self.compute)
gui.button(box0, self, "Defaults", callback=self.defaults)
gui.button(box0, self, "Help", callback=self.help1)
self.process_showers()
box = gui.widgetBox(self.controlArea, " ",orientation="vertical")
idx = -1
#widget index 0
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "FIELD",
label=self.unitLabels()[idx], addSpace=True,
items=['Sinusoidal', 'B from file', 'B from harmonics'],
valueType=int, orientation="horizontal")
self.show_at(self.unitFlags()[idx], box1)
#widget index 1
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "NPERIODS",
label=self.unitLabels()[idx], addSpace=True,
valueType=int, validator=QIntValidator())
self.show_at(self.unitFlags()[idx], box1)
#widget index 2
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "ULAMBDA",
label=self.unitLabels()[idx], addSpace=True,
valueType=float, validator=QDoubleValidator())
self.show_at(self.unitFlags()[idx], box1)
#widget index 3
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "K",
label=self.unitLabels()[idx], addSpace=True,
valueType=float, validator=QDoubleValidator())
self.show_at(self.unitFlags()[idx], box1)
#widget index 4
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "ENERGY",
label=self.unitLabels()[idx], addSpace=True,
valueType=float, validator=QDoubleValidator())
self.show_at(self.unitFlags()[idx], box1)
#widget index 5
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "PHOT_ENERGY_MIN",
label=self.unitLabels()[idx], addSpace=True,
valueType=float, validator=QDoubleValidator())
self.show_at(self.unitFlags()[idx], box1)
#widget index 6
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "PHOT_ENERGY_MAX",
label=self.unitLabels()[idx], addSpace=True,
valueType=float, validator=QDoubleValidator())
self.show_at(self.unitFlags()[idx], box1)
#widget index 7
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "NPOINTS",
label=self.unitLabels()[idx], addSpace=True,
valueType=int, validator=QIntValidator())
self.show_at(self.unitFlags()[idx], box1)
#widget index 8
idx += 1
box1 = gui.widgetBox(box)
gui.comboBox(box1, self, "LOGPLOT",
label=self.unitLabels()[idx], addSpace=True,
items=['Lin', 'Log'],
valueType=int, orientation="horizontal")
self.show_at(self.unitFlags()[idx], box1)
#widget index 9
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "NTRAJPOINTS",
label=self.unitLabels()[idx], addSpace=True,
valueType=int, validator=QIntValidator())
self.show_at(self.unitFlags()[idx], box1)
#widget index 10
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "CURRENT",
label=self.unitLabels()[idx], addSpace=True,
valueType=float, validator=QDoubleValidator())
self.show_at(self.unitFlags()[idx], box1)
#widget index 11
idx += 1
box1 = gui.widgetBox(box)
gui.lineEdit(box1, self, "FILE",
label=self.unitLabels()[idx], addSpace=True)
self.show_at(self.unitFlags()[idx], box1)
gui.rubber(self.controlArea)
def unitLabels(self):
return ['Magnetic field: ','Number of periods','Wiggler period [m]','K value','Beam energy [GeV]','Min Photon Energy [eV]','Max Photon Energy [eV]','Number of energy points','Energy points spacing','Number of traj points per period','Electron Beam Current [mA]','File with Magnetic Field']
def unitFlags(self):
return ['True','True','self.FIELD != 1','self.FIELD == 0','True','True','True','True','True','self.FIELD != 1','True','self.FIELD != 0']
#def unitNames(self):
# return ['FIELD','NPERIODS','ULAMBDA','K','ENERGY','PHOT_ENERGY_MIN','PHOT_ENERGY_MAX','NPOINTS','LOGPLOT','NTRAJPOINTS','CURRENT','FILE']
def compute(self):
fileName = xoppy_calc_xwiggler(FIELD=self.FIELD,NPERIODS=self.NPERIODS,ULAMBDA=self.ULAMBDA,K=self.K,ENERGY=self.ENERGY,PHOT_ENERGY_MIN=self.PHOT_ENERGY_MIN,PHOT_ENERGY_MAX=self.PHOT_ENERGY_MAX,NPOINTS=self.NPOINTS,LOGPLOT=self.LOGPLOT,NTRAJPOINTS=self.NTRAJPOINTS,CURRENT=self.CURRENT,FILE=self.FILE)
#send specfile
if fileName == None:
print("Nothing to send")
else:
self.send("xoppy_specfile",fileName)
sf = specfile.Specfile(fileName)
if sf.scanno() == 1:
#load spec file with one scan, # is comment
print("Loading file: ",fileName)
out = np.loadtxt(fileName)
print("data shape: ",out.shape)
#get labels
txt = open(fileName).readlines()
tmp = [ line.find("#L") for line in txt]
itmp = np.where(np.array(tmp) != (-1))
labels = txt[itmp[0]].replace("#L ","").split(" ")
print("data labels: ",labels)
self.send("xoppy_data",out)
else:
print("File %s contains %d scans. Cannot send it as xoppy_table"%(fileName,sf.scanno()))
def defaults(self):
self.resetSettings()
self.compute()
return
def help1(self):
print("help pressed.")
xoppy_doc('xwiggler')
if __name__ == "__main__":
app = QApplication(sys.argv)
w = OWxwiggler()
w.show()
app.exec()
w.saveSettings()
|
|
#!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c11"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090',
'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4',
'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7',
'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5',
'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de',
'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b',
'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2',
'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
except pkg_resources.DistributionNotFound:
pass
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
|
|
# encoding: utf-8
"""
attribute/__init__.py
Created by Thomas Mangin on 2009-11-05.
Copyright (c) 2009-2013 Exa Networks. All rights reserved.
"""
import collections
from struct import unpack
from exabgp.util.od import od
from exabgp.configuration.environment import environment
from exabgp.bgp.message.update.attribute.attribute import Attribute
from exabgp.bgp.message.update.attribute.origin import Origin
from exabgp.bgp.message.update.attribute.aspath import ASPath
from exabgp.bgp.message.update.attribute.localpref import LocalPreference
from exabgp.bgp.message.update.attribute.generic import GenericAttribute
from exabgp.bgp.message.notification import Notify
from exabgp.logger import Logger
from exabgp.logger import LazyFormat
class _NOTHING (object):
def pack (self,negotiated=None):
return ''
NOTHING = _NOTHING()
# ============================================================== MultiAttributes
#
# 0 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
# | Attr. Flags |Attr. Type Code|
# +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
class MultiAttributes (list):
def __init__ (self,attribute):
list.__init__(self)
self.ID = attribute.ID
self.FLAG = attribute.FLAG
self.MULTIPLE = True
self.append(attribute)
def pack (self,negotiated=None):
r = []
for attribute in self:
r.append(attribute.pack())
return ''.join(r)
def __len__ (self):
return len(self.pack())
def __str__ (self):
return '%s' % ' '.join(str(_) for _ in self)
# =================================================================== Attributes
#
class Attributes (dict):
# A cache of parsed attributes
cache = {}
# The previously parsed Attributes
cached = None
# previously parsed attribute, from which cached was made of
previous = ''
representation = {
# key: (how, default, name, presentation),
Attribute.ID.ORIGIN : ('string', '', 'origin', '%s'),
Attribute.ID.AS_PATH : ('multiple','', ('as-path','as-set'), '%s'),
Attribute.ID.NEXT_HOP : ('string', '', 'next-hop', '%s'),
Attribute.ID.MED : ('integer', '', 'med', '%s'),
Attribute.ID.LOCAL_PREF : ('integer', '', 'local-preference', '%s'),
Attribute.ID.ATOMIC_AGGREGATE : ('boolean', '', 'atomic-aggregate', '%s'),
Attribute.ID.AGGREGATOR : ('string', '', 'aggregator', '( %s )'),
Attribute.ID.AS4_AGGREGATOR : ('string', '', 'aggregator', '( %s )'),
Attribute.ID.COMMUNITY : ('list', '', 'community', '%s'),
Attribute.ID.ORIGINATOR_ID : ('inet', '', 'originator-id', '%s'),
Attribute.ID.CLUSTER_LIST : ('list', '', 'cluster-list', '%s'),
Attribute.ID.EXTENDED_COMMUNITY : ('list', '', 'extended-community', '%s'),
Attribute.ID.PMSI_TUNNEL : ('string', '', 'pmsi', '%s'),
Attribute.ID.AIGP : ('integer', '', 'aigp', '%s'),
}
def __init__ (self):
# cached representation of the object
self._str = ''
self._idx = ''
self._json = ''
# The parsed attributes have no mp routes and/or those are last
self.cacheable = True
# XXX: FIXME: surely not the best place for this
Attribute.caching = environment.settings().cache.attributes
def has (self,k):
return k in self
def add (self,attribute,data=None):
# we return None as attribute if the unpack code must not generate them
if attribute is None:
return
self._str = ''
self._json = ''
if attribute.MULTIPLE:
if self.has(attribute.ID):
self[attribute.ID].append(attribute)
else:
self[attribute.ID] = MultiAttributes(attribute)
else:
if attribute.ID in self:
raise Notify(3,0,'multiple attribute for %s' % str(Attribute.ID(attribute.ID)))
self[attribute.ID] = attribute
def remove (self,attrid):
self.pop(attrid)
def watchdog (self):
if Attribute.ID.INTERNAL_WATCHDOG in self:
return self.pop(Attribute.ID.INTERNAL_WATCHDOG)
return None
def withdraw (self):
if Attribute.ID.INTERNAL_WITHDRAW in self:
self.pop(Attribute.ID.INTERNAL_WITHDRAW)
return True
return False
def pack (self,negotiated,with_default=True):
local_asn = negotiated.local_as
peer_asn = negotiated.peer_as
message = ''
default = {
Attribute.ID.ORIGIN: lambda l,r: Origin(Origin.IGP),
Attribute.ID.AS_PATH: lambda l,r: ASPath([],[]) if l == r else ASPath([local_asn,],[]),
Attribute.ID.LOCAL_PREF: lambda l,r: LocalPreference(100) if l == r else NOTHING,
}
check = {
Attribute.ID.NEXT_HOP: lambda l,r,nh: nh.ipv4() == True,
Attribute.ID.LOCAL_PREF: lambda l,r,nh: l == r,
}
if with_default:
keys = set(self.keys() + default.keys())
else:
keys = set(self.keys())
for code in sorted(keys):
if code in (Attribute.ID.INTERNAL_SPLIT, Attribute.ID.INTERNAL_WATCHDOG, Attribute.ID.INTERNAL_WITHDRAW):
continue
if code in self:
if code in check:
if check[code](local_asn,peer_asn,self[code]):
message += self[code].pack(negotiated)
continue
else:
message += self[code].pack(negotiated)
continue
else:
if code in default:
message += default[code](local_asn,peer_asn).pack(negotiated)
return message
def json (self):
if not self._json:
def generate (self):
for code in sorted(self.keys() + [Attribute.ID.ATOMIC_AGGREGATE,]):
# remove the next-hop from the attribute as it is define with the NLRI
if code in (Attribute.ID.NEXT_HOP, Attribute.ID.INTERNAL_SPLIT, Attribute.ID.INTERNAL_WATCHDOG, Attribute.ID.INTERNAL_WITHDRAW):
continue
if code in self.representation:
how, default, name, presentation = self.representation[code]
if how == 'boolean':
yield '"%s": %s' % (name, 'true' if self.has(code) else 'false')
elif how == 'string':
yield '"%s": "%s"' % (name, presentation % str(self[code]))
elif how == 'list':
yield '"%s": %s' % (name, presentation % self[code].json())
elif how == 'multiple':
for n in name:
value = self[code].json(n)
if value:
yield '"%s": %s' % (n, presentation % value)
elif how == 'inet':
yield '"%s": "%s"' % (name, presentation % str(self[code]))
# Should never be ran
else:
yield '"%s": %s' % (name, presentation % str(self[code]))
else:
yield '"attribute-0x%02X-0x%02X": "%s"' % (code,self[code].FLAG,str(self[code]))
self._json = ', '.join(generate(self))
return self._json
def __str__ (self):
if not self._str:
def generate (self):
for code in sorted(self.keys()):
# XXX: FIXME: really we should have a INTERNAL attribute in the classes
if code in (Attribute.ID.INTERNAL_SPLIT, Attribute.ID.INTERNAL_WATCHDOG, Attribute.ID.INTERNAL_WITHDRAW, Attribute.ID.NEXT_HOP):
continue
if code in self.representation:
how, default, name, presentation = self.representation[code]
if how == 'boolean':
yield ' %s' % name
elif how == 'multiple':
yield ' %s %s' % (name[0], presentation % str(self[code]))
else:
yield ' %s %s' % (name, presentation % str(self[code]))
else:
yield ' attribute [ 0x%02X 0x%02X %s ]' % (code,self[code].FLAG,str(self[code]))
# XXX: FIXME: remove this ' ' + ? should it be done by the caller ?
self._str = ''.join(generate(self))
return self._str
def index (self):
# XXX: FIXME: something a little bit smaller memory wise ?
if not self._idx:
self._idx = '%s next-hop %s' % (str(self), str(self[Attribute.ID.NEXT_HOP])) if Attribute.ID.NEXT_HOP in self else str(self)
return self._idx
@classmethod
def unpack (cls,data,negotiated):
try:
if cls.cached:
if data == cls.previous:
return Attributes.cached
elif cls.previous and data.startswith(cls.previous):
attributes = Attributes()
for key in Attributes.cached:
attributes[key] = Attributes.cached[key]
attributes.parse(data[len(cls.previous):],negotiated)
else:
attributes = cls().parse(data,negotiated)
else:
attributes = cls().parse(data,negotiated)
if Attribute.ID.AS_PATH in attributes and Attribute.ID.AS4_PATH in attributes:
attributes.merge_attributes()
if Attribute.ID.MP_REACH_NLRI not in attributes and Attribute.ID.MP_UNREACH_NLRI not in attributes:
cls.previous = data
cls.cached = attributes
else:
cls.previous = ''
cls.cache = None
return attributes
except IndexError:
raise Notify(3,2,data)
@staticmethod
def flag_attribute_content (data):
flag = Attribute.Flag(ord(data[0]))
attr = Attribute.ID(ord(data[1]))
if flag & Attribute.Flag.EXTENDED_LENGTH:
length = unpack('!H',data[2:4])[0]
return flag, attr, data[4:length+4]
else:
length = ord(data[2])
return flag, attr , data[3:length+3]
def parse (self,data,negotiated):
if not data:
return self
# We do not care if the attribute are transitive or not as we do not redistribute
flag = Attribute.Flag(ord(data[0]))
aid = Attribute.ID(ord(data[1]))
if flag & Attribute.Flag.EXTENDED_LENGTH:
length = unpack('!H',data[2:4])[0]
offset = 4
else:
length = ord(data[2])
offset = 3
data = data[offset:]
next = data[length:]
attribute = data[:length]
logger = Logger()
logger.parser(LazyFormat("parsing flag %x type %02x (%s) len %02x %s" % (flag,int(aid),aid,length,'payload ' if length else ''),od,data[:length]))
# remove the PARTIAL bit before comparaison if the attribute is optional
if aid in Attribute.attributes_optional:
aid &= ~Attribute.Flag.PARTIAL & 0xFF
# handle the attribute if we know it
if Attribute.registered(aid,flag):
self.add(Attribute.unpack(aid,flag,attribute,negotiated))
return self.parse(next,negotiated)
# XXX: FIXME: we could use a fallback function here like capability
# if we know the attribute but the flag is not what the RFC says.
if aid in Attribute.attributes_known:
logger.parser('invalid flag for attribute %s (aid 0x%02X, flag 0x%02X)' % (Attribute.ID.names.get(aid,'unset'),aid,flag))
return self.parse(next,negotiated)
# it is an unknown transitive attribute we need to pass on
if flag & Attribute.Flag.TRANSITIVE:
logger.parser('unknown transitive attribute (aid 0x%02X, flag 0x%02X)' % (aid,flag))
self.add(GenericAttribute(aid,flag|Attribute.Flag.PARTIAL,attribute),attribute)
return self.parse(next,negotiated)
# it is an unknown non-transitive attribute we can ignore.
logger.parser('ignoring unknown non-transitive attribute (aid 0x%02X, flag 0x%02X)' % (aid,flag))
return self.parse(next,negotiated)
def merge_attributes (self):
as2path = self[Attribute.ID.AS_PATH]
as4path = self[Attribute.ID.AS4_PATH]
self.remove(Attribute.ID.AS_PATH)
self.remove(Attribute.ID.AS4_PATH)
# this key is unique as index length is a two header, plus a number of ASN of size 2 or 4
# so adding the : make the length odd and unique
key = "%s:%s" % (as2path.index, as4path.index)
# found a cache copy
cached = Attribute.cache.get(Attribute.ID.AS_PATH,{}).get(key,None)
if cached:
self.add(cached,key)
return
# as_seq = []
# as_set = []
len2 = len(as2path.as_seq)
len4 = len(as4path.as_seq)
# RFC 4893 section 4.2.3
if len2 < len4:
as_seq = as2path.as_seq
else:
as_seq = as2path.as_seq[:-len4]
as_seq.extend(as4path.as_seq)
len2 = len(as2path.as_set)
len4 = len(as4path.as_set)
if len2 < len4:
as_set = as4path.as_set
else:
as_set = as2path.as_set[:-len4]
as_set.extend(as4path.as_set)
aspath = ASPath(as_seq,as_set)
self.add(aspath,key)
def __hash__(self):
# XXX: FIXME: not excellent... :-(
return hash(repr(self))
# Orange BAGPIPE code ..
# test that sets of attributes exactly match
# can't rely on __eq__ for this, because __eq__ relies on Attribute.__eq__ which does not look at attributes values
def sameValuesAs(self,other):
# we sort based on string representation since the items do not
# necessarily implement __cmp__
sorter = lambda x,y: cmp(repr(x), repr(y))
try:
for key in set(self.iterkeys()).union(set(other.iterkeys())):
if key == Attribute.ID.MP_REACH_NLRI:
continue
sval = self[key]
oval = other[key]
# In the case where the attribute is, for instance, a list
# we want to compare values independently of the order
if isinstance(sval, collections.Iterable):
if not isinstance(oval, collections.Iterable):
return False
sval = sorted(sval,sorter)
oval = set(oval,sorter)
if sval != oval:
return False
return True
except KeyError:
return False
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''Various generic helper methods'''
import xbmcgui
import xbmc
import xbmcvfs
import xbmcaddon
import sys
from traceback import format_exc
import requests
import arrow
from requests.packages.urllib3.util.retry import Retry
from requests.adapters import HTTPAdapter
import urllib
import unicodedata
import os
import datetime
import time
try:
import simplejson as json
except Exception:
import json
try:
from multiprocessing.pool import ThreadPool
SUPPORTS_POOL = True
except Exception:
SUPPORTS_POOL = False
ADDON_ID = "script.module.metadatautils"
KODI_LANGUAGE = xbmc.getLanguage(xbmc.ISO_639_1)
if not KODI_LANGUAGE:
KODI_LANGUAGE = "en"
KODI_VERSION = int(xbmc.getInfoLabel("System.BuildVersion").split(".")[0])
# setup requests with some additional options
requests.packages.urllib3.disable_warnings()
SESSION = requests.Session()
RETRIES = Retry(total=5, backoff_factor=5, status_forcelist=[500, 502, 503, 504])
SESSION.mount('http://', HTTPAdapter(max_retries=RETRIES))
SESSION.mount('https://', HTTPAdapter(max_retries=RETRIES))
FORCE_DEBUG_LOG = False
LIMIT_EXTRAFANART = 0
try:
ADDON = xbmcaddon.Addon(ADDON_ID)
FORCE_DEBUG_LOG = ADDON.getSetting('debug_log') == 'true'
LIMIT_EXTRAFANART = int(ADDON.getSetting('max_extrafanarts'))
del ADDON
except Exception:
pass
def log_msg(msg, loglevel=xbmc.LOGDEBUG):
'''log message to kodi logfile'''
if isinstance(msg, unicode):
msg = msg.encode('utf-8')
if loglevel == xbmc.LOGDEBUG and FORCE_DEBUG_LOG:
loglevel = xbmc.LOGNOTICE
xbmc.log("%s --> %s" % (ADDON_ID, msg), level=loglevel)
def log_exception(modulename, exceptiondetails):
'''helper to properly log an exception'''
log_msg(format_exc(sys.exc_info()), xbmc.LOGWARNING)
log_msg("ERROR in %s ! --> %s" % (modulename, exceptiondetails), xbmc.LOGERROR)
def rate_limiter(rl_params):
''' A very basic rate limiter which limits to 1 request per X seconds to the api'''
# Please respect the parties providing these free api's to us and do not modify this code.
# If I suspect any abuse I will revoke all api keys and require all users
# to have a personal api key for all services.
# Thank you
if not rl_params:
return
monitor = xbmc.Monitor()
win = xbmcgui.Window(10000)
rl_name = rl_params[0]
rl_delay = rl_params[1]
cur_timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
prev_timestamp = try_parse_int(win.getProperty("ratelimiter.%s" % rl_name))
if (prev_timestamp + rl_delay) > cur_timestamp:
sec_to_wait = (prev_timestamp + rl_delay) - cur_timestamp
log_msg(
"Rate limiter active for %s - delaying request with %s seconds - "
"Configure a personal API key in the settings to get rid of this message and the delay." %
(rl_name, sec_to_wait), xbmc.LOGNOTICE)
while sec_to_wait and not monitor.abortRequested():
monitor.waitForAbort(1)
# keep setting the timestamp to create some sort of queue
cur_timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
win.setProperty("ratelimiter.%s" % rl_name, "%s" % cur_timestamp)
sec_to_wait -= 1
# always set the timestamp
cur_timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
win.setProperty("ratelimiter.%s" % rl_name, "%s" % cur_timestamp)
del monitor
del win
def get_json(url, params=None, retries=0, ratelimit=None):
'''get info from a rest api'''
result = {}
if not params:
params = {}
# apply rate limiting if needed
rate_limiter(ratelimit)
try:
response = requests.get(url, params=params, timeout=20)
if response and response.content and response.status_code == 200:
result = json.loads(response.content.decode('utf-8', 'replace'))
if "results" in result:
result = result["results"]
elif "result" in result:
result = result["result"]
elif response.status_code in (429, 503, 504):
raise Exception('Read timed out')
except Exception as exc:
result = None
if "Read timed out" in str(exc) and retries < 5 and not ratelimit:
# retry on connection error or http server limiting
monitor = xbmc.Monitor()
if not monitor.waitForAbort(2):
result = get_json(url, params, retries + 1)
del monitor
else:
log_exception(__name__, exc)
# return result
return result
def try_encode(text, encoding="utf-8"):
'''helper to encode a string to utf-8'''
try:
return text.encode(encoding, "ignore")
except Exception:
return text
def try_decode(text, encoding="utf-8"):
'''helper to decode a string to unicode'''
try:
return text.decode(encoding, "ignore")
except Exception:
return text
def urlencode(text):
'''helper to properly urlencode a string'''
blah = urllib.urlencode({'blahblahblah': try_encode(text)})
blah = blah[13:]
return blah
def formatted_number(number):
'''try to format a number to formatted string with thousands'''
try:
number = int(number)
if number < 0:
return '-' + formatted_number(-number)
result = ''
while number >= 1000:
number, number2 = divmod(number, 1000)
result = ",%03d%s" % (number2, result)
return "%d%s" % (number, result)
except Exception:
return ""
def process_method_on_list(method_to_run, items):
'''helper method that processes a method on each listitem with pooling if the system supports it'''
all_items = []
if SUPPORTS_POOL:
pool = ThreadPool()
try:
all_items = pool.map(method_to_run, items)
except Exception:
# catch exception to prevent threadpool running forever
log_msg(format_exc(sys.exc_info()))
log_msg("Error in %s" % method_to_run)
pool.close()
pool.join()
else:
all_items = [method_to_run(item) for item in items]
all_items = filter(None, all_items)
return all_items
def get_clean_image(image):
'''helper to strip all kodi tags/formatting of an image path/url'''
if not image:
return ""
if "music@" in image:
# fix for embedded images
thumbcache = xbmc.getCacheThumbName(image).replace(".tbn", ".jpg")
thumbcache = "special://thumbnails/%s/%s" % (thumbcache[0], thumbcache)
if not xbmcvfs.exists(thumbcache):
xbmcvfs.copy(image, thumbcache)
image = thumbcache
if image and "image://" in image:
image = image.replace("image://", "")
image = urllib.unquote(image.encode("utf-8"))
if image.endswith("/"):
image = image[:-1]
if not isinstance(image, unicode):
image = image.decode("utf8")
return image
def get_duration(duration):
'''transform duration time in minutes to hours:minutes'''
if not duration:
return {}
if isinstance(duration, (unicode, str)):
duration.replace("min", "").replace("", "").replace(".", "")
try:
total_minutes = int(duration)
if total_minutes < 60:
hours = 0
else:
hours = total_minutes / 60
minutes = total_minutes - (hours * 60)
formatted_time = "%s:%s" % (hours, str(minutes).zfill(2))
except Exception as exc:
log_exception(__name__, exc)
return {}
return {
"Duration": formatted_time,
"Duration.Hours": hours,
"Duration.Minutes": minutes,
"Runtime": total_minutes,
"RuntimeExtended": "%s %s" % (total_minutes, xbmc.getLocalizedString(12391)),
"DurationAndRuntime": "%s (%s min.)" % (formatted_time, total_minutes),
"DurationAndRuntimeExtended": "%s (%s %s)" % (formatted_time, total_minutes, xbmc.getLocalizedString(12391))
}
def int_with_commas(number):
'''helper to pretty format a number'''
try:
number = int(number)
if number < 0:
return '-' + int_with_commas(-number)
result = ''
while number >= 1000:
number, number2 = divmod(number, 1000)
result = ",%03d%s" % (number2, result)
return "%d%s" % (number, result)
except Exception:
return ""
def try_parse_int(string):
'''helper to parse int from string without erroring on empty or misformed string'''
try:
return int(string)
except Exception:
return 0
def extend_dict(org_dict, new_dict, allow_overwrite=None):
'''Create a new dictionary with a's properties extended by b,
without overwriting existing values.'''
if not new_dict:
return org_dict
if not org_dict:
return new_dict
for key, value in new_dict.iteritems():
if value:
if not org_dict.get(key):
# orginal dict doesn't has this key (or no value), just overwrite
org_dict[key] = value
else:
# original dict already has this key, append results
if isinstance(value, list):
# make sure that our original value also is a list
if isinstance(org_dict[key], list):
for item in value:
if item not in org_dict[key]:
org_dict[key].append(item)
# previous value was str, combine both in list
elif isinstance(org_dict[key], (str, unicode)):
org_dict[key] = org_dict[key].split(" / ")
for item in value:
if item not in org_dict[key]:
org_dict[key].append(item)
elif isinstance(value, dict):
org_dict[key] = extend_dict(org_dict[key], value, allow_overwrite)
elif allow_overwrite and key in allow_overwrite:
# value may be overwritten
org_dict[key] = value
else:
# conflict, leave alone
pass
return org_dict
def localdate_from_utc_string(timestring):
'''helper to convert internal utc time (used in pvr) to local timezone'''
utc_datetime = arrow.get(timestring)
local_datetime = utc_datetime.to('local')
return local_datetime.format("YYYY-MM-DD HH:mm:ss")
def localized_date_time(timestring):
'''returns localized version of the timestring (used in pvr)'''
date_time = arrow.get(timestring)
local_date = date_time.strftime(xbmc.getRegion("dateshort"))
local_time = date_time.strftime(xbmc.getRegion("time").replace(":%S", ""))
return (local_date, local_time)
def normalize_string(text):
'''normalize string, strip all special chars'''
text = text.replace(":", "")
text = text.replace("/", "-")
text = text.replace("\\", "-")
text = text.replace("<", "")
text = text.replace(">", "")
text = text.replace("*", "")
text = text.replace("?", "")
text = text.replace('|', "")
text = text.replace('(', "")
text = text.replace(')', "")
text = text.replace("\"", "")
text = text.strip()
text = text.rstrip('.')
text = unicodedata.normalize('NFKD', try_decode(text))
return text
def get_compare_string(text):
'''strip all special chars in a string for better comparing of searchresults'''
if not isinstance(text, unicode):
text.decode("utf-8")
text = text.lower()
text = ''.join(e for e in text if e.isalnum())
return text
def strip_newlines(text):
'''strip any newlines from a string'''
return text.replace('\n', ' ').replace('\r', '').rstrip()
def detect_plugin_content(plugin_path):
'''based on the properties of a vfspath we try to detect the content type'''
content_type = ""
if not plugin_path:
return ""
# detect content based on the path
if "listing" in plugin_path:
content_type = "folder"
elif "movie" in plugin_path.lower():
content_type = "movies"
elif "album" in plugin_path.lower():
content_type = "albums"
elif "show" in plugin_path.lower():
content_type = "tvshows"
elif "episode" in plugin_path.lower():
content_type = "episodes"
elif "song" in plugin_path.lower():
content_type = "songs"
elif "musicvideo" in plugin_path.lower():
content_type = "musicvideos"
elif "pvr" in plugin_path.lower():
content_type = "pvr"
elif "type=dynamic" in plugin_path.lower():
content_type = "movies"
elif "videos" in plugin_path.lower():
content_type = "movies"
elif "type=both" in plugin_path.lower():
content_type = "movies"
elif "media" in plugin_path.lower():
content_type = "movies"
elif "favourites" in plugin_path.lower():
content_type = "movies"
elif ("box" in plugin_path.lower() or "dvd" in plugin_path.lower() or
"rentals" in plugin_path.lower() or "incinemas" in plugin_path.lower() or
"comingsoon" in plugin_path.lower() or "upcoming" in plugin_path.lower() or
"opening" in plugin_path.lower() or "intheaters" in plugin_path.lower()):
content_type = "movies"
# if we didn't get the content based on the path, we need to probe the addon...
if not content_type and not xbmc.getCondVisibility("Window.IsMedia"): # safety check
from kodidb import KodiDb
media_array = KodiDb().files(plugin_path, limits=(0, 1))
for item in media_array:
if item.get("filetype", "") == "directory":
content_type = "folder"
break
elif item.get("type") and item["type"] != "unknown":
content_type = item["type"] + "s"
break
elif "showtitle" not in item and "artist" not in item:
# these properties are only returned in the json response if we're looking at actual file content...
# if it's missing it means this is a main directory listing and no need to
# scan the underlying listitems.
content_type = "files"
break
if "showtitle" not in item and "artist" in item:
# AUDIO ITEMS
if item["type"] == "artist":
content_type = "artists"
break
elif (isinstance(item["artist"], list) and len(item["artist"]) > 0 and
item["artist"][0] == item["title"]):
content_type = "artists"
break
elif item["type"] == "album" or item["album"] == item["title"]:
content_type = "albums"
break
elif ((item["type"] == "song" and "play_album" not in item["file"]) or
(item["artist"] and item["album"])):
content_type = "songs"
break
else:
# VIDEO ITEMS
if item["showtitle"] and not item.get("artist"):
# this is a tvshow, episode or season...
if item["type"] == "season" or (item["season"] > -1 and item["episode"] == -1):
content_type = "seasons"
break
elif item["type"] == "episode" or item["season"] > -1 and item["episode"] > -1:
content_type = "episodes"
break
else:
content_type = "tvshows"
break
elif item.get("artist"):
# this is a musicvideo!
content_type = "musicvideos"
break
elif (item["type"] == "movie" or item.get("imdbnumber") or item.get("mpaa") or
item.get("trailer") or item.get("studio")):
content_type = "movies"
break
log_msg("detect_plugin_path_content for: %s - result: %s" % (plugin_path, content_type))
return content_type
def download_artwork(folderpath, artwork):
'''download artwork to local folder'''
efa_path = ""
new_dict = {}
if not xbmcvfs.exists(folderpath):
xbmcvfs.mkdir(folderpath)
for key, value in artwork.iteritems():
if key == "fanart":
new_dict[key] = download_image(os.path.join(folderpath, "fanart.jpg"), value)
elif key == "thumb":
new_dict[key] = download_image(os.path.join(folderpath, "folder.jpg"), value)
elif key == "discart":
new_dict[key] = download_image(os.path.join(folderpath, "disc.png"), value)
elif key == "banner":
new_dict[key] = download_image(os.path.join(folderpath, "banner.jpg"), value)
elif key == "clearlogo":
new_dict[key] = download_image(os.path.join(folderpath, "logo.png"), value)
elif key == "clearart":
new_dict[key] = download_image(os.path.join(folderpath, "clearart.png"), value)
elif key == "characterart":
new_dict[key] = download_image(os.path.join(folderpath, "characterart.png"), value)
elif key == "poster":
new_dict[key] = download_image(os.path.join(folderpath, "poster.jpg"), value)
elif key == "landscape":
new_dict[key] = download_image(os.path.join(folderpath, "landscape.jpg"), value)
elif key == "thumbback":
new_dict[key] = download_image(os.path.join(folderpath, "thumbback.jpg"), value)
elif key == "spine":
new_dict[key] = download_image(os.path.join(folderpath, "spine.jpg"), value)
elif key == "fanarts" and value:
# copy extrafanarts only if the directory doesn't exist at all
delim = "\\" if "\\" in folderpath else "/"
efa_path = "%sextrafanart" % folderpath + delim
if not xbmcvfs.exists(efa_path):
xbmcvfs.mkdir(efa_path)
images = []
for count, image in enumerate(value):
image = download_image(os.path.join(efa_path, "fanart%s.jpg" % count), image)
images.append(image)
if LIMIT_EXTRAFANART and count == LIMIT_EXTRAFANART:
break
new_dict[key] = images
elif key == "posters" and value:
# copy extraposters only if the directory doesn't exist at all
delim = "\\" if "\\" in folderpath else "/"
efa_path = "%sextraposter" % folderpath + delim
if not xbmcvfs.exists(efa_path):
xbmcvfs.mkdir(efa_path)
images = []
for count, image in enumerate(value):
image = download_image(os.path.join(efa_path, "poster%s.jpg" % count), image)
images.append(image)
if LIMIT_EXTRAFANART and count == LIMIT_EXTRAFANART:
break
new_dict[key] = images
else:
new_dict[key] = value
if efa_path:
new_dict["extrafanart"] = efa_path
return new_dict
def download_image(filename, url):
'''download specific image to local folder'''
if not url:
return url
refresh_needed = False
if xbmcvfs.exists(filename) and filename == url:
# only overwrite if new image is different
return filename
else:
if xbmcvfs.exists(filename):
xbmcvfs.delete(filename)
refresh_needed = True
if xbmcvfs.copy(url, filename):
if refresh_needed:
refresh_image(filename)
return filename
return url
def refresh_image(imagepath):
'''tell kodi texture cache to refresh a particular image'''
import sqlite3
dbpath = xbmc.translatePath("special://database/Textures13.db").decode('utf-8')
connection = sqlite3.connect(dbpath, timeout=30, isolation_level=None)
try:
cache_image = connection.execute('SELECT cachedurl FROM texture WHERE url = ?', (imagepath,)).fetchone()
if cache_image and isinstance(cache_image, (unicode, str)):
if xbmcvfs.exists(cache_image):
xbmcvfs.delete("special://profile/Thumbnails/%s" % cache_image)
connection.execute('DELETE FROM texture WHERE url = ?', (imagepath,))
connection.close()
except Exception as exc:
log_exception(__name__, exc)
finally:
del connection
# pylint: disable-msg=too-many-local-variables
def manual_set_artwork(artwork, mediatype, header=None):
'''Allow user to manually select the artwork with a select dialog'''
changemade = False
if mediatype == "artist":
art_types = ["thumb", "poster", "fanart", "banner", "clearart", "clearlogo", "landscape"]
elif mediatype == "album":
art_types = ["thumb", "discart", "thumbback", "spine"]
else:
art_types = ["thumb", "poster", "fanart", "banner", "clearart",
"clearlogo", "discart", "landscape", "characterart"]
if not header:
header = xbmc.getLocalizedString(13511)
# show dialogselect with all artwork options
abort = False
while not abort:
listitems = []
for arttype in art_types:
img = artwork.get(arttype, "")
listitem = xbmcgui.ListItem(label=arttype, label2=img, iconImage=img)
listitem.setProperty("icon", img)
listitems.append(listitem)
dialog = DialogSelect("DialogSelect.xml", "", listing=listitems,
window_title=header, multiselect=False)
dialog.doModal()
selected_item = dialog.result
del dialog
if selected_item == -1:
abort = True
else:
# show results for selected art type
artoptions = []
selected_item = listitems[selected_item]
image = selected_item.getProperty("icon").decode("utf-8")
label = selected_item.getLabel().decode("utf-8")
subheader = "%s: %s" % (header, label)
if image:
# current image
listitem = xbmcgui.ListItem(label=xbmc.getLocalizedString(13512), iconImage=image, label2=image)
listitem.setProperty("icon", image)
artoptions.append(listitem)
# none option
listitem = xbmcgui.ListItem(label=xbmc.getLocalizedString(231), iconImage="DefaultAddonNone.png")
listitem.setProperty("icon", "DefaultAddonNone.png")
artoptions.append(listitem)
# browse option
listitem = xbmcgui.ListItem(label=xbmc.getLocalizedString(1024), iconImage="DefaultFolder.png")
listitem.setProperty("icon", "DefaultFolder.png")
artoptions.append(listitem)
# add remaining images as option
allarts = artwork.get(label + "s", [])
for item in allarts:
listitem = xbmcgui.ListItem(label=item, iconImage=item)
listitem.setProperty("icon", item)
artoptions.append(listitem)
dialog = DialogSelect("DialogSelect.xml", "", listing=artoptions, window_title=subheader)
dialog.doModal()
selected_item = dialog.result
del dialog
if image and selected_item == 1:
# set image to None
artwork[label] = ""
changemade = True
elif (image and selected_item > 2) or (not image and selected_item > 0):
# one of the optional images is selected as new default
artwork[label] = artoptions[selected_item].getProperty("icon")
changemade = True
elif (image and selected_item == 2) or (not image and selected_item == 0):
# manual browse...
dialog = xbmcgui.Dialog()
image = dialog.browse(2, xbmc.getLocalizedString(1030),
'files', mask='.gif|.png|.jpg').decode("utf-8")
del dialog
if image:
artwork[label] = image
changemade = True
# return endresult
return changemade, artwork
# pylint: enable-msg=too-many-local-variables
class DialogSelect(xbmcgui.WindowXMLDialog):
'''wrapper around Kodi dialogselect to present a list of items'''
list_control = None
def __init__(self, *args, **kwargs):
xbmcgui.WindowXMLDialog.__init__(self)
self.listing = kwargs.get("listing")
self.window_title = kwargs.get("window_title", "")
self.result = -1
def onInit(self):
'''called when the dialog is drawn'''
self.list_control = self.getControl(6)
self.getControl(1).setLabel(self.window_title)
self.getControl(3).setVisible(False)
try:
self.getControl(7).setLabel(xbmc.getLocalizedString(222))
except Exception:
pass
self.getControl(5).setVisible(False)
# add our items to the listing and focus the control
self.list_control.addItems(self.listing)
self.setFocus(self.list_control)
def onAction(self, action):
'''On kodi action'''
if action.getId() in (9, 10, 92, 216, 247, 257, 275, 61467, 61448, ):
self.result = -1
self.close()
def onClick(self, control_id):
'''Triggers if our dialog is clicked'''
if control_id in (6, 3,):
num = self.list_control.getSelectedPosition()
self.result = num
else:
self.result = -1
self.close()
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 25 18:22:00 2017
@author: hubj
"""
# %matplotlib inline
import numpy as np # linear algebra
# import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import dicom
import os
import scipy.ndimage
# import matplotlib.pyplot as plt
import pickle
# from skimage import measure, morphology
# from mpl_toolkits.mplot3d.art3d import Poly3DCollection
def load_scan(path):
slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]
slices.sort(key = lambda x: int(x.InstanceNumber))
try:
slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])
except:
slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)
for s in slices:
s.SliceThickness = slice_thickness
return slices
def get_pixels_hu(scans):
image = np.stack([s.pixel_array for s in scans])
# Convert to int16 (from sometimes int16),
# should be possible as values should always be low enough (<32k)
image = image.astype(np.int16)
# Set outside-of-scan pixels to 0
# The intercept is usually -1024, so air is approximately 0
image[image == -2000] = 0
# Convert to Hounsfield units (HU)
intercept = scans[0].RescaleIntercept
slope = scans[0].RescaleSlope
if slope != 1:
image = slope * image.astype(np.float64)
image = image.astype(np.int16)
image += np.int16(intercept)
return np.array(image, dtype=np.int16)
def resample(image, new_shape):
current_shape = np.array(image.shape)
resize_factor = new_shape / current_shape
return scipy.ndimage.interpolation.zoom(image, resize_factor)
def plot_3d(image, threshold=-300, alpha=0.1):
# Position the scan upright,
# so the head of the patient would be at the top facing the camera
p = image.transpose(2,1,0)
p = p[:,:,::-1]
verts, faces = measure.marching_cubes(p, threshold)
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, projection='3d')
# Fancy indexing: `verts[faces]` to generate a collection of triangles
mesh = Poly3DCollection(verts[faces], alpha=alpha)
face_color = [0.5, 0.5, 1]
mesh.set_facecolor(face_color)
ax.add_collection3d(mesh)
ax.set_xlim(0, p.shape[0])
ax.set_ylim(0, p.shape[1])
ax.set_zlim(0, p.shape[2])
plt.show()
MIN_BOUND = -1000.0
MAX_BOUND = 400.0
def normalize(image):
image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)
image[image>1] = 1.
image[image<0] = 0.
return image
PIXEL_MEAN = 0.25
def zero_center(image):
image = image - PIXEL_MEAN
return image
INPUT_FOLDER = 'data/'
def crop_rip_cage(hu, threshold):
x0_mid = int(hu.shape[0]/2)
x1_min = None
x1_max = None
for x1 in range(hu.shape[1]):
# print(x0_mid)
# print(x1)
# print(hu[x0_mid:, x1, :].shape)
# print(hu[x0_mid:, x1, :])
# print(hu[x0_mid:, x1, :]>threshold)
if (hu[x0_mid:,x1,:]>threshold).any():
x1_min = x1
break
for x1 in range(hu.shape[1]-1,0,-1):
if (hu[x0_mid:,x1,:]>threshold).any():
x1_max = x1
break
assert(x1_min<x1_max)
x2_min = None
x2_max = None
for x2 in range(hu.shape[2]):
if (hu[x0_mid:,:,x2]>threshold).any():
x2_min = x2
break
for x2 in range(hu.shape[2]-1,0,-1):
if (hu[x0_mid:,:,x2]>threshold).any():
x2_max = x2
break
assert(x2_min<x2_max)
x0_min = None
x0_max = None
for x0 in range(hu.shape[0]):
if (hu[x0,:,:]>threshold).any():
x0_min = x0
break
for x0 in range(hu.shape[0]-1,0,-1):
if (hu[x0,:,:]>threshold).any():
x0_max = x0
break
assert(x0_min<x0_max)
# print('x0', x0_min, x0_max)
# print('x1', x1_min, x1_max)
# print('x2', x2_min, x2_max)
# x0_mid = (x0_max+x0_min)/2
# x1_mid = (x1_max+x1_min)/2
# x2_mid = (x2_max+x2_min)/2
# plt.figure()
# plt.imshow(hu[x0_min + (x0_max-x0_min)/3, x1_min:x1_max, x2_min:x2_max])
## plt.figure()
## plt.imshow(hu[x0_mid, x1_min:x1_max, x2_min:x2_max])
# plt.figure()
# plt.imshow(hu[x0_min + (2*(x0_max-x0_min))/3, x1_min:x1_max, x2_min:x2_max])
# plt.figure()
# plt.imshow(hu[x0_min:x0_max, x1_mid, x2_min:x2_max])
# plt.figure()
# plt.imshow(hu[x0_min:x0_max, x1_min:x1_max, x2_min + (x2_max-x2_min)/3])
## plt.figure()
## plt.imshow(hu[x0_min:x0_max, x1_min:x1_max, x2_mid])
# plt.figure()
# plt.imshow(hu[x0_min:x0_max, x1_min:x1_max, x2_min + ((x2_max - x2_min)*2)/3])
# plt.imshow(resample(hu, [123, 123, 123]))
return x0_max-x0_min, x1_max-x1_min,x2_max-x2_min
def preprocess(patId):
first_patient = load_scan(INPUT_FOLDER + patId)
first_patient_pixels = get_pixels_hu(first_patient)
cropped_dimensions = crop_rip_cage(first_patient_pixels, 600)
pix_resampled = resample(first_patient_pixels, [93, 218, 356])
return pix_resampled
# return cropped_dimensions
# Some constants
patients = os.listdir(INPUT_FOLDER)
# patients = [patients[0]]
patients.sort()
data = None
# min_x0 = min_x1 = min_x2 = 987656432
# max_x0 = max_x1 = max_x2 = 0
for i, patId in enumerate(patients):
print("processing patient {0:s} {1:d}/{2:d}".format(patId, i+1, len(patients)))
data = preprocess(patId)
with open("preprocessed/{0:s}.pickle".format(patId), "wb") as f:
pickle.dump(data, f)
# min_x0 = min(data[0], min_x0)
# min_x1 = min(data[1], min_x1)
# min_x2 = min(data[2], min_x2)
# max_x0 = max(data[0], max_x0)
# max_x1 = max(data[1], max_x1)
# max_x2 = max(data[2], max_x2)
# print('min_x0', min_x0)
# print('min_x1', min_x1)
# print('min_x2', min_x2)
# print('max_x0', max_x0)
# print('max_x1', max_x1)
# print('max_x2', max_x2)
# with open("{0:s}.pickle".format(patId), "w") as f:
# pickle.dump(data, f)
#sick_data = preprocess('0acbebb8d463b4b9ca88cf38431aac69')
#healthy1_data = main(patients[1])
|
|
# Copyright 2017 Square, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
class IDCodeRegisterBits(ctypes.LittleEndianStructure):
"""This class holds the different bit masks for the IDCode register.
Attributes:
valid: validity bit, should always be ``0``.
manufactuer: the JEDEC Manufacturer ID.
part_no: the part number defined by the manufacturer.
version_code: the version code.
"""
_fields_ = [
('valid', ctypes.c_uint32, 1),
('manufacturer', ctypes.c_uint32, 11),
('part_no', ctypes.c_uint32, 16),
('version_code', ctypes.c_uint32, 4)
]
class IDCodeRegisterFlags(ctypes.Union):
"""Mask for the IDCode register bits.
Attributes:
value: the value stored in the mask.
"""
_anonymous_ = ('bit',)
_fields_ = [
('bit', IDCodeRegisterBits),
('value', ctypes.c_uint32)
]
class AbortRegisterBits(ctypes.LittleEndianStructure):
"""This class holds the different bit mask for the Abort Register.
Attributes:
DAPABORT: write ``1`` to trigger a DAP abort.
STKCMPCLR: write ``1`` to clear the ``STICKYCMP`` sticky compare flag
(only supported on SW-DP).
STKERRCLR: write ``1`` to clear the ``STICKYERR`` sticky error flag
(only supported on SW-DP).
WDERRCLR: write ``1`` to clear the ``WDATAERR`` write data error flag
(only supported on SW-DP).
ORUNERRCLR: write ``1`` to clear the ``STICKYORUN`` overrun error flag
(only supported on SW-DP).
"""
_fields_ = [
('DAPABORT', ctypes.c_uint32, 1),
('STKCMPCLR', ctypes.c_uint32, 1),
('STKERRCLR', ctypes.c_uint32, 1),
('WDERRCLR', ctypes.c_uint32, 1),
('ORUNERRCLR', ctypes.c_uint32, 1),
('RESERVED', ctypes.c_uint32, 27),
]
class AbortRegisterFlags(ctypes.Union):
"""Mask for the abort register bits.
Attributes:
value: the value stored in the mask.
"""
_anonymous_ = ('bit',)
_fields_ = [
('bit', AbortRegisterBits),
('value', ctypes.c_uint32)
]
class ControlStatusRegisterBits(ctypes.LittleEndianStructure):
"""This class holds the different bit masks for the DP Control / Status
Register bit assignments.
Attributes:
ORUNDETECT: if set, enables overrun detection.
STICKYORUN: if overrun is enabled, is set when overrun occurs.
TRNMODE: transfer mode for acess port operations.
STICKYCMP: is set when a match occurs on a pushed compare or verify
operation.
STICKYERR: is set when an error is returned by an access port
transaction.
READOK: is set when the response to a previous access port or ``RDBUFF``
was ``OK``.
WDATAERR: set to ``1`` if a Write Data Error occurs.
MASKLANE: bytes to be masked in pushed compare and verify operations.
TRNCNT: transaction counter.
RESERVED: reserved.
CDBGRSTREQ: debug reset request.
CDBGRSTACK: debug reset acknowledge.
CDBGPWRUPREQ: debug power-up request.
CDBGPWRUPACK: debug power-up acknowledge.
CSYSPWRUPREQ: system power-up request
CSYSPWRUPACK: system power-up acknowledge.
See also:
See the ARM documentation on the significance of these masks
`here <http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ihi0031c/index.html>`_.
"""
_fields_ = [
('ORUNDETECT', ctypes.c_uint32, 1), # read/write
('STICKYORUN', ctypes.c_uint32, 1), # read-only
('TRNMODE', ctypes.c_uint32, 2), # read/write
('STICKYCMP', ctypes.c_uint32, 1), # read-only
('STICKYERR', ctypes.c_uint32, 1), # read-only
('READOK', ctypes.c_uint32, 1), # read-only
('WDATAERR', ctypes.c_uint32, 1), # read-only
('MASKLANE', ctypes.c_uint32, 4), # read/write
('TRNCNT', ctypes.c_uint32, 12), # read/write
('RESERVED', ctypes.c_uint32, 2), # -
('CDBGRSTREQ', ctypes.c_uint32, 1), # read/write
('CDBGRSTACK', ctypes.c_uint32, 1), # read-only
('CDBGPWRUPREQ', ctypes.c_uint32, 1), # read/write
('CDBGPWRUPACK', ctypes.c_uint32, 1), # read-only
('CSYSPWRUPREQ', ctypes.c_uint32, 1), # read/write
('CSYSPWRUPACK', ctypes.c_uint32, 1) # read-only
]
class ControlStatusRegisterFlags(ctypes.Union):
"""Mask for the control/status register bits.
Attributes:
value: the value stored in the mask.
"""
_anonymous_ = ('bit',)
_fields_ = [
('bit', ControlStatusRegisterBits),
('value', ctypes.c_uint32)
]
class SelectRegisterBits(ctypes.LittleEndianStructure):
"""This class holds the different bit masks for the AP Select Register.
Attributes:
CTRLSEL: SW-DP debug port address bank select.
RESERVED_A: reserved.
APBANKSEL: selects the active four-word register window on the current
access port.
RESERVED_B: reserved.
APSEL: selects the current access port.
"""
_fields_ = [
('CTRLSEL', ctypes.c_uint32, 1),
('RESERVED_A', ctypes.c_uint32, 3),
('APBANKSEL', ctypes.c_uint32, 4),
('RESERVED_B', ctypes.c_uint32, 16),
('APSEL', ctypes.c_uint32, 8)
]
class SelectRegisterFlags(ctypes.Union):
"""Mask for the select register bits.
Attributes:
value: the value stored in the mask.
"""
_anonymous_ = ('bit',)
_fields_ = [
('bit', SelectRegisterBits),
('value', ctypes.c_uint32)
]
class MDMAPControlRegisterBits(ctypes.LittleEndianStructure):
"""This class holds the different bit masks for the MDM-AP Control
Register.
Attributes:
flash_mass_erase: set to cause a mass erase, this is cleared
automatically when a mass erase finishes.
debug_disable: set to disable debug, clear to allow debug.
debug_request: set to force the core to halt.
sys_reset_request: set to force a system reset.
core_hold_reset: set to suspend the core in reset at the end of reset
sequencing.
VLLDBGREQ: set to hold the system in reset after the next recovery from
VLLSx (Very Low Leakage Stop).
VLLDBGACK: set to release a system held in reset following a VLLSx
(Very Low Leakage Stop) recovery.
VLLSTATACK: set to acknowledge that the DAP LLS (Low Leakage Stop) and
VLLS (Very Low Leakage Stop) status bits have read.
"""
_fields_ = [
('flash_mass_erase', ctypes.c_uint8, 1),
('debug_disable', ctypes.c_uint8, 1),
('debug_request', ctypes.c_uint8, 1),
('sys_reset_request', ctypes.c_uint8, 1),
('core_hold_reset', ctypes.c_uint8, 1),
('VLLDBGREQ', ctypes.c_uint8, 1),
('VLLDBGACK', ctypes.c_uint8, 1),
('VLLSTATACK', ctypes.c_uint8, 1)
]
class MDMAPControlRegisterFlags(ctypes.Union):
"""Mask for the MDM-AP control register bits.
Attributes:
value: the value stored in the mask.
"""
_anonymous_ = ('bit',)
_fields_ = [
('bit', MDMAPControlRegisterBits),
('value', ctypes.c_uint8)
]
class MDMAPStatusRegisterBits(ctypes.LittleEndianStructure):
"""Holds the bit masks for the MDM-AP Status Register.
Attributes:
flash_mass_erase_ack: cleared after a system reset, indicates that a
flash mass erase was acknowledged.
flash_ready: indicates that flash has been initialized and can be
configured.
system_security: if set, system is secure and debugger cannot access the
memory or system bus.
system_reset: ``1`` if system is in reset, otherwise ``0``.
mass_erase_enabled: ``1`` if MCU can be mass erased, otherwise ``0``.
low_power_enabled: ``1`` if low power stop mode is enabled, otherwise ``0``.
very_low_power_mode: ``1`` if device is in very low power mode.
LLSMODEEXIT: indicates an exit from LLS mode has occurred.
VLLSxMODEEXIT: indicates an exit from VLLSx mode has occured.
core_halted; indicates core has entered debug halt mode.
core_deep_sleep: indicates core has entered a low power mode.
core_sleeping: indicates the core has entered a low power mode.
Note:
if ``core_sleeping & !core_deep_sleep``, then the core is in VLPW (very
low power wait) mode, otherwise if ``core_sleeping & core_deep_sleep``,
then it is in VLPS (very low power stop) mode.
"""
_fields_ = [
('flash_mass_erase_ack', ctypes.c_uint32, 1),
('flash_ready', ctypes.c_uint32, 1),
('system_security', ctypes.c_uint32, 1),
('system_reset', ctypes.c_uint32, 1),
('RESERVED_A', ctypes.c_uint32, 1),
('mass_erase_enabled', ctypes.c_uint32, 1),
('backdoor_access_enabled', ctypes.c_uint32, 1),
('low_power_enabled', ctypes.c_uint32, 1),
('very_low_power_mode', ctypes.c_uint32, 1),
('LLSMODEEXIT', ctypes.c_uint32, 1),
('VLLSxMODEEXIT', ctypes.c_uint32, 1),
('RESERVED_B', ctypes.c_uint32, 5),
('core_halted', ctypes.c_uint32, 1),
('core_deep_sleep', ctypes.c_uint32, 1),
('core_sleeping', ctypes.c_uint32, 1),
('RESERVED_C', ctypes.c_uint32, 13)
]
class MDMAPStatusRegisterFlags(ctypes.Union):
"""Mask for the MDM-AP status register bits.
Attributes:
value: the value stored in the mask.
"""
_anonymous_ = ('bit',)
_fields_ = [
('bit', MDMAPStatusRegisterBits),
('value', ctypes.c_uint32)
]
|
|
## @file
# This file is used to parse and evaluate expression in directive or PCD value.
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
## Import Modules
#
from __future__ import print_function
from __future__ import absolute_import
from Common.GlobalData import *
from CommonDataClass.Exceptions import BadExpression
from CommonDataClass.Exceptions import WrnExpression
from .Misc import GuidStringToGuidStructureString, ParseFieldValue,CopyDict
import Common.EdkLogger as EdkLogger
import copy
from Common.DataType import *
import sys
from random import sample
import string
ERR_STRING_EXPR = 'This operator cannot be used in string expression: [%s].'
ERR_SNYTAX = 'Syntax error, the rest of expression cannot be evaluated: [%s].'
ERR_MATCH = 'No matching right parenthesis.'
ERR_STRING_TOKEN = 'Bad string token: [%s].'
ERR_MACRO_TOKEN = 'Bad macro token: [%s].'
ERR_EMPTY_TOKEN = 'Empty token is not allowed.'
ERR_PCD_RESOLVE = 'The PCD should be FeatureFlag type or FixedAtBuild type: [%s].'
ERR_VALID_TOKEN = 'No more valid token found from rest of string: [%s].'
ERR_EXPR_TYPE = 'Different types found in expression.'
ERR_OPERATOR_UNSUPPORT = 'Unsupported operator: [%s]'
ERR_REL_NOT_IN = 'Expect "IN" after "not" operator.'
WRN_BOOL_EXPR = 'Operand of boolean type cannot be used in arithmetic expression.'
WRN_EQCMP_STR_OTHERS = '== Comparison between Operand of string type and Boolean/Number Type always return False.'
WRN_NECMP_STR_OTHERS = '!= Comparison between Operand of string type and Boolean/Number Type always return True.'
ERR_RELCMP_STR_OTHERS = 'Operator taking Operand of string type and Boolean/Number Type is not allowed: [%s].'
ERR_STRING_CMP = 'Unicode string and general string cannot be compared: [%s %s %s]'
ERR_ARRAY_TOKEN = 'Bad C array or C format GUID token: [%s].'
ERR_ARRAY_ELE = 'This must be HEX value for NList or Array: [%s].'
ERR_EMPTY_EXPR = 'Empty expression is not allowed.'
ERR_IN_OPERAND = 'Macro after IN operator can only be: $(FAMILY), $(ARCH), $(TOOL_CHAIN_TAG) and $(TARGET).'
__ValidString = re.compile(r'[_a-zA-Z][_0-9a-zA-Z]*$')
_ReLabel = re.compile('LABEL\((\w+)\)')
_ReOffset = re.compile('OFFSET_OF\((\w+)\)')
PcdPattern = re.compile(r'[_a-zA-Z][0-9A-Za-z_]*\.[_a-zA-Z][0-9A-Za-z_]*$')
## SplitString
# Split string to list according double quote
# For example: abc"de\"f"ghi"jkl"mn will be: ['abc', '"de\"f"', 'ghi', '"jkl"', 'mn']
#
def SplitString(String):
# There might be escaped quote: "abc\"def\\\"ghi", 'abc\'def\\\'ghi'
RanStr = ''.join(sample(string.ascii_letters + string.digits, 8))
String = String.replace('\\\\', RanStr).strip()
RetList = []
InSingleQuote = False
InDoubleQuote = False
Item = ''
for i, ch in enumerate(String):
if ch == '"' and not InSingleQuote:
if String[i - 1] != '\\':
InDoubleQuote = not InDoubleQuote
if not InDoubleQuote:
Item += String[i]
RetList.append(Item)
Item = ''
continue
if Item:
RetList.append(Item)
Item = ''
elif ch == "'" and not InDoubleQuote:
if String[i - 1] != '\\':
InSingleQuote = not InSingleQuote
if not InSingleQuote:
Item += String[i]
RetList.append(Item)
Item = ''
continue
if Item:
RetList.append(Item)
Item = ''
Item += String[i]
if InSingleQuote or InDoubleQuote:
raise BadExpression(ERR_STRING_TOKEN % Item)
if Item:
RetList.append(Item)
for i, ch in enumerate(RetList):
if RanStr in ch:
RetList[i] = ch.replace(RanStr,'\\\\')
return RetList
def SplitPcdValueString(String):
# There might be escaped comma in GUID() or DEVICE_PATH() or " "
# or ' ' or L' ' or L" "
RanStr = ''.join(sample(string.ascii_letters + string.digits, 8))
String = String.replace('\\\\', RanStr).strip()
RetList = []
InParenthesis = 0
InSingleQuote = False
InDoubleQuote = False
Item = ''
for i, ch in enumerate(String):
if ch == '(':
InParenthesis += 1
elif ch == ')':
if InParenthesis:
InParenthesis -= 1
else:
raise BadExpression(ERR_STRING_TOKEN % Item)
elif ch == '"' and not InSingleQuote:
if String[i-1] != '\\':
InDoubleQuote = not InDoubleQuote
elif ch == "'" and not InDoubleQuote:
if String[i-1] != '\\':
InSingleQuote = not InSingleQuote
elif ch == ',':
if InParenthesis or InSingleQuote or InDoubleQuote:
Item += String[i]
continue
elif Item:
RetList.append(Item)
Item = ''
continue
Item += String[i]
if InSingleQuote or InDoubleQuote or InParenthesis:
raise BadExpression(ERR_STRING_TOKEN % Item)
if Item:
RetList.append(Item)
for i, ch in enumerate(RetList):
if RanStr in ch:
RetList[i] = ch.replace(RanStr,'\\\\')
return RetList
def IsValidCName(Str):
return True if __ValidString.match(Str) else False
def BuildOptionValue(PcdValue, GuidDict):
if PcdValue.startswith('H'):
InputValue = PcdValue[1:]
elif PcdValue.startswith("L'") or PcdValue.startswith("'"):
InputValue = PcdValue
elif PcdValue.startswith('L'):
InputValue = 'L"' + PcdValue[1:] + '"'
else:
InputValue = PcdValue
try:
PcdValue = ValueExpressionEx(InputValue, TAB_VOID, GuidDict)(True)
except:
pass
return PcdValue
## ReplaceExprMacro
#
def ReplaceExprMacro(String, Macros, ExceptionList = None):
StrList = SplitString(String)
for i, String in enumerate(StrList):
InQuote = False
if String.startswith('"'):
InQuote = True
MacroStartPos = String.find('$(')
if MacroStartPos < 0:
for Pcd in gPlatformPcds:
if Pcd in String:
if Pcd not in gConditionalPcds:
gConditionalPcds.append(Pcd)
continue
RetStr = ''
while MacroStartPos >= 0:
RetStr = String[0:MacroStartPos]
MacroEndPos = String.find(')', MacroStartPos)
if MacroEndPos < 0:
raise BadExpression(ERR_MACRO_TOKEN % String[MacroStartPos:])
Macro = String[MacroStartPos+2:MacroEndPos]
if Macro not in Macros:
# From C reference manual:
# If an undefined macro name appears in the constant-expression of
# !if or !elif, it is replaced by the integer constant 0.
RetStr += '0'
elif not InQuote:
Tklst = RetStr.split()
if Tklst and Tklst[-1] in {'IN', 'in'} and ExceptionList and Macro not in ExceptionList:
raise BadExpression(ERR_IN_OPERAND)
# Make sure the macro in exception list is encapsulated by double quote
# For example: DEFINE ARCH = IA32 X64
# $(ARCH) is replaced with "IA32 X64"
if ExceptionList and Macro in ExceptionList:
RetStr += '"' + Macros[Macro] + '"'
elif Macros[Macro].strip():
RetStr += Macros[Macro]
else:
RetStr += '""'
else:
RetStr += Macros[Macro]
RetStr += String[MacroEndPos+1:]
String = RetStr
MacroStartPos = String.find('$(')
StrList[i] = RetStr
return ''.join(StrList)
# transfer int to string for in/not in expression
def IntToStr(Value):
StrList = []
while Value > 0:
StrList.append(chr(Value & 0xff))
Value = Value >> 8
Value = '"' + ''.join(StrList) + '"'
return Value
SupportedInMacroList = ['TARGET', 'TOOL_CHAIN_TAG', 'ARCH', 'FAMILY']
class BaseExpression(object):
def __init__(self, *args, **kwargs):
super(BaseExpression, self).__init__()
# Check if current token matches the operators given from parameter
def _IsOperator(self, OpSet):
Idx = self._Idx
self._GetOperator()
if self._Token in OpSet:
if self._Token in self.LogicalOperators:
self._Token = self.LogicalOperators[self._Token]
return True
self._Idx = Idx
return False
class ValueExpression(BaseExpression):
# Logical operator mapping
LogicalOperators = {
'&&' : 'and', '||' : 'or',
'!' : 'not', 'AND': 'and',
'OR' : 'or' , 'NOT': 'not',
'XOR': '^' , 'xor': '^',
'EQ' : '==' , 'NE' : '!=',
'GT' : '>' , 'LT' : '<',
'GE' : '>=' , 'LE' : '<=',
'IN' : 'in'
}
NonLetterOpLst = ['+', '-', TAB_STAR, '/', '%', '&', '|', '^', '~', '<<', '>>', '!', '=', '>', '<', '?', ':']
SymbolPattern = re.compile("("
"\$\([A-Z][A-Z0-9_]*\)|\$\(\w+\.\w+\)|\w+\.\w+|"
"&&|\|\||!(?!=)|"
"(?<=\W)AND(?=\W)|(?<=\W)OR(?=\W)|(?<=\W)NOT(?=\W)|(?<=\W)XOR(?=\W)|"
"(?<=\W)EQ(?=\W)|(?<=\W)NE(?=\W)|(?<=\W)GT(?=\W)|(?<=\W)LT(?=\W)|(?<=\W)GE(?=\W)|(?<=\W)LE(?=\W)"
")")
@staticmethod
def Eval(Operator, Oprand1, Oprand2 = None):
WrnExp = None
if Operator not in {"==", "!=", ">=", "<=", ">", "<", "in", "not in"} and \
(isinstance(Oprand1, type('')) or isinstance(Oprand2, type(''))):
raise BadExpression(ERR_STRING_EXPR % Operator)
if Operator in {'in', 'not in'}:
if not isinstance(Oprand1, type('')):
Oprand1 = IntToStr(Oprand1)
if not isinstance(Oprand2, type('')):
Oprand2 = IntToStr(Oprand2)
TypeDict = {
type(0) : 0,
# For python2 long type
type(sys.maxsize + 1) : 0,
type('') : 1,
type(True) : 2
}
EvalStr = ''
if Operator in {"!", "NOT", "not"}:
if isinstance(Oprand1, type('')):
raise BadExpression(ERR_STRING_EXPR % Operator)
EvalStr = 'not Oprand1'
elif Operator in {"~"}:
if isinstance(Oprand1, type('')):
raise BadExpression(ERR_STRING_EXPR % Operator)
EvalStr = '~ Oprand1'
else:
if Operator in {"+", "-"} and (type(True) in {type(Oprand1), type(Oprand2)}):
# Boolean in '+'/'-' will be evaluated but raise warning
WrnExp = WrnExpression(WRN_BOOL_EXPR)
elif type('') in {type(Oprand1), type(Oprand2)} and not isinstance(Oprand1, type(Oprand2)):
# == between string and number/boolean will always return False, != return True
if Operator == "==":
WrnExp = WrnExpression(WRN_EQCMP_STR_OTHERS)
WrnExp.result = False
raise WrnExp
elif Operator == "!=":
WrnExp = WrnExpression(WRN_NECMP_STR_OTHERS)
WrnExp.result = True
raise WrnExp
else:
raise BadExpression(ERR_RELCMP_STR_OTHERS % Operator)
elif TypeDict[type(Oprand1)] != TypeDict[type(Oprand2)]:
if Operator in {"==", "!=", ">=", "<=", ">", "<"} and set((TypeDict[type(Oprand1)], TypeDict[type(Oprand2)])) == set((TypeDict[type(True)], TypeDict[type(0)])):
# comparison between number and boolean is allowed
pass
elif Operator in {'&', '|', '^', "and", "or"} and set((TypeDict[type(Oprand1)], TypeDict[type(Oprand2)])) == set((TypeDict[type(True)], TypeDict[type(0)])):
# bitwise and logical operation between number and boolean is allowed
pass
else:
raise BadExpression(ERR_EXPR_TYPE)
if isinstance(Oprand1, type('')) and isinstance(Oprand2, type('')):
if ((Oprand1.startswith('L"') or Oprand1.startswith("L'")) and (not Oprand2.startswith('L"')) and (not Oprand2.startswith("L'"))) or \
(((not Oprand1.startswith('L"')) and (not Oprand1.startswith("L'"))) and (Oprand2.startswith('L"') or Oprand2.startswith("L'"))):
raise BadExpression(ERR_STRING_CMP % (Oprand1, Operator, Oprand2))
if 'in' in Operator and isinstance(Oprand2, type('')):
Oprand2 = Oprand2.split()
EvalStr = 'Oprand1 ' + Operator + ' Oprand2'
# Local symbols used by built in eval function
Dict = {
'Oprand1' : Oprand1,
'Oprand2' : Oprand2
}
try:
Val = eval(EvalStr, {}, Dict)
except Exception as Excpt:
raise BadExpression(str(Excpt))
if Operator in {'and', 'or'}:
if Val:
Val = True
else:
Val = False
if WrnExp:
WrnExp.result = Val
raise WrnExp
return Val
def __init__(self, Expression, SymbolTable={}):
super(ValueExpression, self).__init__(self, Expression, SymbolTable)
self._NoProcess = False
if not isinstance(Expression, type('')):
self._Expr = Expression
self._NoProcess = True
return
self._Expr = ReplaceExprMacro(Expression.strip(),
SymbolTable,
SupportedInMacroList)
if not self._Expr.strip():
raise BadExpression(ERR_EMPTY_EXPR)
#
# The symbol table including PCD and macro mapping
#
self._Symb = CopyDict(SymbolTable)
self._Symb.update(self.LogicalOperators)
self._Idx = 0
self._Len = len(self._Expr)
self._Token = ''
self._WarnExcept = None
# Literal token without any conversion
self._LiteralToken = ''
# Public entry for this class
# @param RealValue: False: only evaluate if the expression is true or false, used for conditional expression
# True : return the evaluated str(value), used for PCD value
#
# @return: True or False if RealValue is False
# Evaluated value of string format if RealValue is True
#
def __call__(self, RealValue=False, Depth=0):
if self._NoProcess:
return self._Expr
self._Depth = Depth
self._Expr = self._Expr.strip()
if RealValue and Depth == 0:
self._Token = self._Expr
if self.__IsNumberToken():
return self._Expr
Token = ''
try:
Token = self._GetToken()
except BadExpression:
pass
if isinstance(Token, type('')) and Token.startswith('{') and Token.endswith('}') and self._Idx >= self._Len:
return self._Expr
self._Idx = 0
self._Token = ''
Val = self._ConExpr()
RealVal = Val
if isinstance(Val, type('')):
if Val == 'L""':
Val = False
elif not Val:
Val = False
RealVal = '""'
elif not Val.startswith('L"') and not Val.startswith('{') and not Val.startswith("L'") and not Val.startswith("'"):
Val = True
RealVal = '"' + RealVal + '"'
# The expression has been parsed, but the end of expression is not reached
# It means the rest does not comply EBNF of <Expression>
if self._Idx != self._Len:
raise BadExpression(ERR_SNYTAX % self._Expr[self._Idx:])
if RealValue:
RetVal = str(RealVal)
elif Val:
RetVal = True
else:
RetVal = False
if self._WarnExcept:
self._WarnExcept.result = RetVal
raise self._WarnExcept
else:
return RetVal
# Template function to parse binary operators which have same precedence
# Expr [Operator Expr]*
def _ExprFuncTemplate(self, EvalFunc, OpSet):
Val = EvalFunc()
while self._IsOperator(OpSet):
Op = self._Token
if Op == '?':
Val2 = EvalFunc()
if self._IsOperator({':'}):
Val3 = EvalFunc()
if Val:
Val = Val2
else:
Val = Val3
continue
try:
Val = self.Eval(Op, Val, EvalFunc())
except WrnExpression as Warn:
self._WarnExcept = Warn
Val = Warn.result
return Val
# A [? B]*
def _ConExpr(self):
return self._ExprFuncTemplate(self._OrExpr, {'?', ':'})
# A [|| B]*
def _OrExpr(self):
return self._ExprFuncTemplate(self._AndExpr, {"OR", "or", "||"})
# A [&& B]*
def _AndExpr(self):
return self._ExprFuncTemplate(self._BitOr, {"AND", "and", "&&"})
# A [ | B]*
def _BitOr(self):
return self._ExprFuncTemplate(self._BitXor, {"|"})
# A [ ^ B]*
def _BitXor(self):
return self._ExprFuncTemplate(self._BitAnd, {"XOR", "xor", "^"})
# A [ & B]*
def _BitAnd(self):
return self._ExprFuncTemplate(self._EqExpr, {"&"})
# A [ == B]*
def _EqExpr(self):
Val = self._RelExpr()
while self._IsOperator({"==", "!=", "EQ", "NE", "IN", "in", "!", "NOT", "not"}):
Op = self._Token
if Op in {"!", "NOT", "not"}:
if not self._IsOperator({"IN", "in"}):
raise BadExpression(ERR_REL_NOT_IN)
Op += ' ' + self._Token
try:
Val = self.Eval(Op, Val, self._RelExpr())
except WrnExpression as Warn:
self._WarnExcept = Warn
Val = Warn.result
return Val
# A [ > B]*
def _RelExpr(self):
return self._ExprFuncTemplate(self._ShiftExpr, {"<=", ">=", "<", ">", "LE", "GE", "LT", "GT"})
def _ShiftExpr(self):
return self._ExprFuncTemplate(self._AddExpr, {"<<", ">>"})
# A [ + B]*
def _AddExpr(self):
return self._ExprFuncTemplate(self._MulExpr, {"+", "-"})
# A [ * B]*
def _MulExpr(self):
return self._ExprFuncTemplate(self._UnaryExpr, {TAB_STAR, "/", "%"})
# [!]*A
def _UnaryExpr(self):
if self._IsOperator({"!", "NOT", "not"}):
Val = self._UnaryExpr()
try:
return self.Eval('not', Val)
except WrnExpression as Warn:
self._WarnExcept = Warn
return Warn.result
if self._IsOperator({"~"}):
Val = self._UnaryExpr()
try:
return self.Eval('~', Val)
except WrnExpression as Warn:
self._WarnExcept = Warn
return Warn.result
return self._IdenExpr()
# Parse identifier or encapsulated expression
def _IdenExpr(self):
Tk = self._GetToken()
if Tk == '(':
Val = self._ConExpr()
try:
# _GetToken may also raise BadExpression
if self._GetToken() != ')':
raise BadExpression(ERR_MATCH)
except BadExpression:
raise BadExpression(ERR_MATCH)
return Val
return Tk
# Skip whitespace or tab
def __SkipWS(self):
for Char in self._Expr[self._Idx:]:
if Char not in ' \t':
break
self._Idx += 1
# Try to convert string to number
def __IsNumberToken(self):
Radix = 10
if self._Token.lower()[0:2] == '0x' and len(self._Token) > 2:
Radix = 16
if self._Token.startswith('"') or self._Token.startswith('L"'):
Flag = 0
for Index in range(len(self._Token)):
if self._Token[Index] in {'"'}:
if self._Token[Index - 1] == '\\':
continue
Flag += 1
if Flag == 2 and self._Token.endswith('"'):
return True
if self._Token.startswith("'") or self._Token.startswith("L'"):
Flag = 0
for Index in range(len(self._Token)):
if self._Token[Index] in {"'"}:
if self._Token[Index - 1] == '\\':
continue
Flag += 1
if Flag == 2 and self._Token.endswith("'"):
return True
try:
self._Token = int(self._Token, Radix)
return True
except ValueError:
return False
except TypeError:
return False
# Parse array: {...}
def __GetArray(self):
Token = '{'
self._Idx += 1
self.__GetNList(True)
Token += self._LiteralToken
if self._Idx >= self._Len or self._Expr[self._Idx] != '}':
raise BadExpression(ERR_ARRAY_TOKEN % Token)
Token += '}'
# All whitespace and tabs in array are already stripped.
IsArray = IsGuid = False
if len(Token.split(',')) == 11 and len(Token.split(',{')) == 2 \
and len(Token.split('},')) == 1:
HexLen = [11, 6, 6, 5, 4, 4, 4, 4, 4, 4, 6]
HexList= Token.split(',')
if HexList[3].startswith('{') and \
not [Index for Index, Hex in enumerate(HexList) if len(Hex) > HexLen[Index]]:
IsGuid = True
if Token.lstrip('{').rstrip('}').find('{') == -1:
if not [Hex for Hex in Token.lstrip('{').rstrip('}').split(',') if len(Hex) > 4]:
IsArray = True
if not IsArray and not IsGuid:
raise BadExpression(ERR_ARRAY_TOKEN % Token)
self._Idx += 1
self._Token = self._LiteralToken = Token
return self._Token
# Parse string, the format must be: "..."
def __GetString(self):
Idx = self._Idx
# Skip left quote
self._Idx += 1
# Replace escape \\\", \"
if self._Expr[Idx] == '"':
Expr = self._Expr[self._Idx:].replace('\\\\', '//').replace('\\\"', '\\\'')
for Ch in Expr:
self._Idx += 1
if Ch == '"':
break
self._Token = self._LiteralToken = self._Expr[Idx:self._Idx]
if not self._Token.endswith('"'):
raise BadExpression(ERR_STRING_TOKEN % self._Token)
#Replace escape \\\', \'
elif self._Expr[Idx] == "'":
Expr = self._Expr[self._Idx:].replace('\\\\', '//').replace("\\\'", "\\\"")
for Ch in Expr:
self._Idx += 1
if Ch == "'":
break
self._Token = self._LiteralToken = self._Expr[Idx:self._Idx]
if not self._Token.endswith("'"):
raise BadExpression(ERR_STRING_TOKEN % self._Token)
self._Token = self._Token[1:-1]
return self._Token
# Get token that is comprised by alphanumeric, underscore or dot(used by PCD)
# @param IsAlphaOp: Indicate if parsing general token or script operator(EQ, NE...)
def __GetIdToken(self, IsAlphaOp = False):
IdToken = ''
for Ch in self._Expr[self._Idx:]:
if not self.__IsIdChar(Ch) or ('?' in self._Expr and Ch == ':'):
break
self._Idx += 1
IdToken += Ch
self._Token = self._LiteralToken = IdToken
if not IsAlphaOp:
self.__ResolveToken()
return self._Token
# Try to resolve token
def __ResolveToken(self):
if not self._Token:
raise BadExpression(ERR_EMPTY_TOKEN)
# PCD token
if PcdPattern.match(self._Token):
if self._Token not in self._Symb:
Ex = BadExpression(ERR_PCD_RESOLVE % self._Token)
Ex.Pcd = self._Token
raise Ex
self._Token = ValueExpression(self._Symb[self._Token], self._Symb)(True, self._Depth+1)
if not isinstance(self._Token, type('')):
self._LiteralToken = hex(self._Token)
return
if self._Token.startswith('"'):
self._Token = self._Token[1:-1]
elif self._Token in {"FALSE", "false", "False"}:
self._Token = False
elif self._Token in {"TRUE", "true", "True"}:
self._Token = True
else:
self.__IsNumberToken()
def __GetNList(self, InArray=False):
self._GetSingleToken()
if not self.__IsHexLiteral():
if InArray:
raise BadExpression(ERR_ARRAY_ELE % self._Token)
return self._Token
self.__SkipWS()
Expr = self._Expr[self._Idx:]
if not Expr.startswith(','):
return self._Token
NList = self._LiteralToken
while Expr.startswith(','):
NList += ','
self._Idx += 1
self.__SkipWS()
self._GetSingleToken()
if not self.__IsHexLiteral():
raise BadExpression(ERR_ARRAY_ELE % self._Token)
NList += self._LiteralToken
self.__SkipWS()
Expr = self._Expr[self._Idx:]
self._Token = self._LiteralToken = NList
return self._Token
def __IsHexLiteral(self):
if self._LiteralToken.startswith('{') and \
self._LiteralToken.endswith('}'):
return True
if gHexPattern.match(self._LiteralToken):
Token = self._LiteralToken[2:]
if not Token:
self._LiteralToken = '0x0'
else:
self._LiteralToken = '0x' + Token
return True
return False
def _GetToken(self):
return self.__GetNList()
@staticmethod
def __IsIdChar(Ch):
return Ch in '._:' or Ch.isalnum()
# Parse operand
def _GetSingleToken(self):
self.__SkipWS()
Expr = self._Expr[self._Idx:]
if Expr.startswith('L"'):
# Skip L
self._Idx += 1
UStr = self.__GetString()
self._Token = 'L"' + UStr + '"'
return self._Token
elif Expr.startswith("L'"):
# Skip L
self._Idx += 1
UStr = self.__GetString()
self._Token = "L'" + UStr + "'"
return self._Token
elif Expr.startswith("'"):
UStr = self.__GetString()
self._Token = "'" + UStr + "'"
return self._Token
elif Expr.startswith('UINT'):
Re = re.compile('(?:UINT8|UINT16|UINT32|UINT64)\((.+)\)')
try:
RetValue = Re.search(Expr).group(1)
except:
raise BadExpression('Invalid Expression %s' % Expr)
Idx = self._Idx
for Ch in Expr:
self._Idx += 1
if Ch == '(':
Prefix = self._Expr[Idx:self._Idx - 1]
Idx = self._Idx
if Ch == ')':
TmpValue = self._Expr[Idx :self._Idx - 1]
TmpValue = ValueExpression(TmpValue)(True)
TmpValue = '0x%x' % int(TmpValue) if not isinstance(TmpValue, type('')) else TmpValue
break
self._Token, Size = ParseFieldValue(Prefix + '(' + TmpValue + ')')
return self._Token
self._Token = ''
if Expr:
Ch = Expr[0]
Match = gGuidPattern.match(Expr)
if Match and not Expr[Match.end():Match.end()+1].isalnum() \
and Expr[Match.end():Match.end()+1] != '_':
self._Idx += Match.end()
self._Token = ValueExpression(GuidStringToGuidStructureString(Expr[0:Match.end()]))(True, self._Depth+1)
return self._Token
elif self.__IsIdChar(Ch):
return self.__GetIdToken()
elif Ch == '"':
return self.__GetString()
elif Ch == '{':
return self.__GetArray()
elif Ch == '(' or Ch == ')':
self._Idx += 1
self._Token = Ch
return self._Token
raise BadExpression(ERR_VALID_TOKEN % Expr)
# Parse operator
def _GetOperator(self):
self.__SkipWS()
LegalOpLst = ['&&', '||', '!=', '==', '>=', '<='] + self.NonLetterOpLst + ['?', ':']
self._Token = ''
Expr = self._Expr[self._Idx:]
# Reach end of expression
if not Expr:
return ''
# Script operator: LT, GT, LE, GE, EQ, NE, and, or, xor, not
if Expr[0].isalpha():
return self.__GetIdToken(True)
# Start to get regular operator: +, -, <, > ...
if Expr[0] not in self.NonLetterOpLst:
return ''
OpToken = ''
for Ch in Expr:
if Ch in self.NonLetterOpLst:
if Ch in ['!', '~'] and OpToken:
break
self._Idx += 1
OpToken += Ch
else:
break
if OpToken not in LegalOpLst:
raise BadExpression(ERR_OPERATOR_UNSUPPORT % OpToken)
self._Token = OpToken
return OpToken
class ValueExpressionEx(ValueExpression):
def __init__(self, PcdValue, PcdType, SymbolTable={}):
ValueExpression.__init__(self, PcdValue, SymbolTable)
self.PcdValue = PcdValue
self.PcdType = PcdType
def __call__(self, RealValue=False, Depth=0):
PcdValue = self.PcdValue
if "{CODE(" not in PcdValue:
try:
PcdValue = ValueExpression.__call__(self, RealValue, Depth)
if self.PcdType == TAB_VOID and (PcdValue.startswith("'") or PcdValue.startswith("L'")):
PcdValue, Size = ParseFieldValue(PcdValue)
PcdValueList = []
for I in range(Size):
PcdValueList.append('0x%02X'%(PcdValue & 0xff))
PcdValue = PcdValue >> 8
PcdValue = '{' + ','.join(PcdValueList) + '}'
elif self.PcdType in TAB_PCD_NUMERIC_TYPES and (PcdValue.startswith("'") or \
PcdValue.startswith('"') or PcdValue.startswith("L'") or PcdValue.startswith('L"') or PcdValue.startswith('{')):
raise BadExpression
except WrnExpression as Value:
PcdValue = Value.result
except BadExpression as Value:
if self.PcdType in TAB_PCD_NUMERIC_TYPES:
PcdValue = PcdValue.strip()
if PcdValue.startswith('{') and PcdValue.endswith('}'):
PcdValue = SplitPcdValueString(PcdValue[1:-1])
if isinstance(PcdValue, type([])):
TmpValue = 0
Size = 0
ValueType = ''
for Item in PcdValue:
Item = Item.strip()
if Item.startswith(TAB_UINT8):
ItemSize = 1
ValueType = TAB_UINT8
elif Item.startswith(TAB_UINT16):
ItemSize = 2
ValueType = TAB_UINT16
elif Item.startswith(TAB_UINT32):
ItemSize = 4
ValueType = TAB_UINT32
elif Item.startswith(TAB_UINT64):
ItemSize = 8
ValueType = TAB_UINT64
elif Item[0] in {'"', "'", 'L'}:
ItemSize = 0
ValueType = TAB_VOID
else:
ItemSize = 0
ValueType = TAB_UINT8
Item = ValueExpressionEx(Item, ValueType, self._Symb)(True)
if ItemSize == 0:
try:
tmpValue = int(Item, 0)
if tmpValue > 255:
raise BadExpression("Byte array number %s should less than 0xFF." % Item)
except BadExpression as Value:
raise BadExpression(Value)
except ValueError:
pass
ItemValue, ItemSize = ParseFieldValue(Item)
else:
ItemValue = ParseFieldValue(Item)[0]
if isinstance(ItemValue, type('')):
ItemValue = int(ItemValue, 0)
TmpValue = (ItemValue << (Size * 8)) | TmpValue
Size = Size + ItemSize
else:
try:
TmpValue, Size = ParseFieldValue(PcdValue)
except BadExpression as Value:
raise BadExpression("Type: %s, Value: %s, %s" % (self.PcdType, PcdValue, Value))
if isinstance(TmpValue, type('')):
try:
TmpValue = int(TmpValue)
except:
raise BadExpression(Value)
else:
PcdValue = '0x%0{}X'.format(Size) % (TmpValue)
if TmpValue < 0:
raise BadExpression('Type %s PCD Value is negative' % self.PcdType)
if self.PcdType == TAB_UINT8 and Size > 1:
raise BadExpression('Type %s PCD Value Size is Larger than 1 byte' % self.PcdType)
if self.PcdType == TAB_UINT16 and Size > 2:
raise BadExpression('Type %s PCD Value Size is Larger than 2 byte' % self.PcdType)
if self.PcdType == TAB_UINT32 and Size > 4:
raise BadExpression('Type %s PCD Value Size is Larger than 4 byte' % self.PcdType)
if self.PcdType == TAB_UINT64 and Size > 8:
raise BadExpression('Type %s PCD Value Size is Larger than 8 byte' % self.PcdType)
else:
try:
TmpValue = int(PcdValue)
TmpList = []
if TmpValue.bit_length() == 0:
PcdValue = '{0x00}'
else:
for I in range((TmpValue.bit_length() + 7) / 8):
TmpList.append('0x%02x' % ((TmpValue >> I * 8) & 0xff))
PcdValue = '{' + ', '.join(TmpList) + '}'
except:
if PcdValue.strip().startswith('{'):
PcdValueList = SplitPcdValueString(PcdValue.strip()[1:-1])
LabelDict = {}
NewPcdValueList = []
LabelOffset = 0
for Item in PcdValueList:
# compute byte offset of every LABEL
LabelList = _ReLabel.findall(Item)
Item = _ReLabel.sub('', Item)
Item = Item.strip()
if LabelList:
for Label in LabelList:
if not IsValidCName(Label):
raise BadExpression('%s is not a valid c variable name' % Label)
if Label not in LabelDict:
LabelDict[Label] = str(LabelOffset)
if Item.startswith(TAB_UINT8):
LabelOffset = LabelOffset + 1
elif Item.startswith(TAB_UINT16):
LabelOffset = LabelOffset + 2
elif Item.startswith(TAB_UINT32):
LabelOffset = LabelOffset + 4
elif Item.startswith(TAB_UINT64):
LabelOffset = LabelOffset + 8
else:
try:
ItemValue, ItemSize = ParseFieldValue(Item)
LabelOffset = LabelOffset + ItemSize
except:
LabelOffset = LabelOffset + 1
for Item in PcdValueList:
# for LABEL parse
Item = Item.strip()
try:
Item = _ReLabel.sub('', Item)
except:
pass
try:
OffsetList = _ReOffset.findall(Item)
except:
pass
# replace each offset, except errors
for Offset in OffsetList:
try:
Item = Item.replace('OFFSET_OF({})'.format(Offset), LabelDict[Offset])
except:
raise BadExpression('%s not defined' % Offset)
NewPcdValueList.append(Item)
AllPcdValueList = []
for Item in NewPcdValueList:
Size = 0
ValueStr = ''
TokenSpaceGuidName = ''
if Item.startswith(TAB_GUID) and Item.endswith(')'):
try:
TokenSpaceGuidName = re.search('GUID\((\w+)\)', Item).group(1)
except:
pass
if TokenSpaceGuidName and TokenSpaceGuidName in self._Symb:
Item = 'GUID(' + self._Symb[TokenSpaceGuidName] + ')'
elif TokenSpaceGuidName:
raise BadExpression('%s not found in DEC file' % TokenSpaceGuidName)
Item, Size = ParseFieldValue(Item)
for Index in range(0, Size):
ValueStr = '0x%02X' % (int(Item) & 255)
Item >>= 8
AllPcdValueList.append(ValueStr)
continue
elif Item.startswith('DEVICE_PATH') and Item.endswith(')'):
Item, Size = ParseFieldValue(Item)
AllPcdValueList.append(Item[1:-1])
continue
else:
ValueType = ""
if Item.startswith(TAB_UINT8):
ItemSize = 1
ValueType = TAB_UINT8
elif Item.startswith(TAB_UINT16):
ItemSize = 2
ValueType = TAB_UINT16
elif Item.startswith(TAB_UINT32):
ItemSize = 4
ValueType = TAB_UINT32
elif Item.startswith(TAB_UINT64):
ItemSize = 8
ValueType = TAB_UINT64
else:
ItemSize = 0
if ValueType:
TmpValue = ValueExpressionEx(Item, ValueType, self._Symb)(True)
else:
TmpValue = ValueExpressionEx(Item, self.PcdType, self._Symb)(True)
Item = '0x%x' % TmpValue if not isinstance(TmpValue, type('')) else TmpValue
if ItemSize == 0:
ItemValue, ItemSize = ParseFieldValue(Item)
if Item[0] not in {'"', 'L', '{'} and ItemSize > 1:
raise BadExpression("Byte array number %s should less than 0xFF." % Item)
else:
ItemValue = ParseFieldValue(Item)[0]
for I in range(0, ItemSize):
ValueStr = '0x%02X' % (int(ItemValue) & 255)
ItemValue >>= 8
AllPcdValueList.append(ValueStr)
Size += ItemSize
if Size > 0:
PcdValue = '{' + ','.join(AllPcdValueList) + '}'
else:
raise BadExpression("Type: %s, Value: %s, %s"%(self.PcdType, PcdValue, Value))
if PcdValue == 'True':
PcdValue = '1'
if PcdValue == 'False':
PcdValue = '0'
if RealValue:
return PcdValue
if __name__ == '__main__':
pass
while True:
input = raw_input('Input expr: ')
if input in 'qQ':
break
try:
print(ValueExpression(input)(True))
print(ValueExpression(input)(False))
except WrnExpression as Ex:
print(Ex.result)
print(str(Ex))
except Exception as Ex:
print(str(Ex))
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v10.services.types import recommendation_service
from .base import RecommendationServiceTransport, DEFAULT_CLIENT_INFO
class RecommendationServiceGrpcTransport(RecommendationServiceTransport):
"""gRPC backend transport for RecommendationService.
Service to manage recommendations.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn(
"client_cert_source is deprecated", DeprecationWarning
)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = (
SslCredentials().ssl_credentials
)
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def apply_recommendation(
self,
) -> Callable[
[recommendation_service.ApplyRecommendationRequest],
recommendation_service.ApplyRecommendationResponse,
]:
r"""Return a callable for the apply recommendation method over gRPC.
Applies given recommendations with corresponding apply
parameters.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `DatabaseError <>`__ `FieldError <>`__
`HeaderError <>`__ `InternalError <>`__ `MutateError <>`__
`QuotaError <>`__ `RecommendationError <>`__ `RequestError <>`__
`UrlFieldError <>`__
Returns:
Callable[[~.ApplyRecommendationRequest],
~.ApplyRecommendationResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "apply_recommendation" not in self._stubs:
self._stubs["apply_recommendation"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v10.services.RecommendationService/ApplyRecommendation",
request_serializer=recommendation_service.ApplyRecommendationRequest.serialize,
response_deserializer=recommendation_service.ApplyRecommendationResponse.deserialize,
)
return self._stubs["apply_recommendation"]
@property
def dismiss_recommendation(
self,
) -> Callable[
[recommendation_service.DismissRecommendationRequest],
recommendation_service.DismissRecommendationResponse,
]:
r"""Return a callable for the dismiss recommendation method over gRPC.
Dismisses given recommendations.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__
`RecommendationError <>`__ `RequestError <>`__
Returns:
Callable[[~.DismissRecommendationRequest],
~.DismissRecommendationResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "dismiss_recommendation" not in self._stubs:
self._stubs[
"dismiss_recommendation"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v10.services.RecommendationService/DismissRecommendation",
request_serializer=recommendation_service.DismissRecommendationRequest.serialize,
response_deserializer=recommendation_service.DismissRecommendationResponse.deserialize,
)
return self._stubs["dismiss_recommendation"]
def close(self):
self.grpc_channel.close()
__all__ = ("RecommendationServiceGrpcTransport",)
|
|
import os.path
import time
import types
from robot.errors import DataError
from robot.utils import secs_to_timestr, timestr_to_secs
from selenium import webdriver
from selenium.common.exceptions import NoSuchWindowException
from Selenium2Library.base import LibraryComponent, keyword
from Selenium2Library.locators.windowmanager import WindowManager
from Selenium2Library.utils import is_truthy, is_falsy
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
FIREFOX_PROFILE_DIR = os.path.join(ROOT_DIR, 'resources', 'firefoxprofile')
BROWSER_NAMES = {
'ff': "_make_ff",
'firefox': "_make_ff",
'ie': "_make_ie",
'internetexplorer': "_make_ie",
'googlechrome': "_make_chrome",
'gc': "_make_chrome",
'chrome': "_make_chrome",
'opera': "_make_opera",
'phantomjs': "_make_phantomjs",
'htmlunit': "_make_htmlunit",
'htmlunitwithjs': "_make_htmlunitwithjs",
'android': "_make_android",
'iphone': "_make_iphone",
'safari': "_make_safari",
'edge': "_make_edge"
}
class BrowserManagementKeywords(LibraryComponent):
def __init__(self, ctx):
LibraryComponent.__init__(self, ctx)
self._window_manager = WindowManager()
@keyword
def close_all_browsers(self):
"""Closes all open browsers and resets the browser cache.
After this keyword new indexes returned from `Open Browser` keyword
are reset to 1.
This keyword should be used in test or suite teardown to make sure
all browsers are closed.
"""
self.debug('Closing all browsers')
self.browsers.close_all()
@keyword
def close_browser(self):
"""Closes the current browser."""
if self.browsers.current:
self.debug('Closing browser with session '
'id {}'.format(self.browsers.current.session_id))
self.browsers.close()
@keyword
def open_browser(
self, url, browser='firefox', alias=None, remote_url=False,
desired_capabilities=None, ff_profile_dir=None):
"""Opens a new browser instance to given URL.
Returns the index of this browser instance which can be used later to
switch back to it. Index starts from 1 and is reset back to it when
`Close All Browsers` keyword is used. See `Switch Browser` for
example.
Optional alias is an alias for the browser instance and it can be used
for switching between browsers (just as index can be used). See `Switch
Browser` for more details.
Possible values for `browser` are as follows:
| firefox | FireFox |
| ff | FireFox |
| internetexplorer | Internet Explorer |
| ie | Internet Explorer |
| googlechrome | Google Chrome |
| gc | Google Chrome |
| chrome | Google Chrome |
| opera | Opera |
| phantomjs | PhantomJS |
| htmlunit | HTMLUnit |
| htmlunitwithjs | HTMLUnit with Javascipt support |
| android | Android |
| iphone | Iphone |
| safari | Safari |
| edge | Edge |
Note, that you will encounter strange behavior, if you open
multiple Internet Explorer browser instances. That is also why
`Switch Browser` only works with one IE browser at most.
For more information see:
http://selenium-grid.seleniumhq.org/faq.html#i_get_some_strange_errors_when_i_run_multiple_internet_explorer_instances_on_the_same_machine
Optional 'remote_url' is the url for a remote selenium server for example
http://127.0.0.1:4444/wd/hub. If you specify a value for remote you can
also specify 'desired_capabilities' which is a string in the form
key1:val1,key2:val2 that will be used to specify desired_capabilities
to the remote server. This is useful for doing things like specify a
proxy server for internet explorer or for specify browser and os if your
using saucelabs.com. 'desired_capabilities' can also be a dictonary
(created with 'Create Dictionary') to allow for more complex configurations.
Optional 'ff_profile_dir' is the path to the firefox profile dir if you
wish to overwrite the default.
"""
if is_truthy(remote_url):
self.info("Opening browser '%s' to base url '%s' through "
"remote server at '%s'" % (browser, url, remote_url))
else:
self.info("Opening browser '%s' to base url '%s'" % (browser, url))
browser_name = browser
browser = self._make_browser(browser_name, desired_capabilities,
ff_profile_dir, remote_url)
try:
browser.get(url)
except:
self.ctx.register_browser(browser, alias)
self.debug("Opened browser with session id %s but failed "
"to open url '%s'" % (browser.session_id, url))
raise
self.debug('Opened browser with session id %s' % browser.session_id)
return self.ctx.register_browser(browser, alias)
@keyword
def create_webdriver(self, driver_name, alias=None, kwargs={},
**init_kwargs):
"""Creates an instance of a WebDriver.
Like `Open Browser`, but allows passing arguments to a WebDriver's
__init__. _Open Browser_ is preferred over _Create Webdriver_ when
feasible.
Returns the index of this browser instance which can be used later to
switch back to it. Index starts from 1 and is reset back to it when
`Close All Browsers` keyword is used. See `Switch Browser` for
example.
`driver_name` must be the exact name of a WebDriver in
_selenium.webdriver_ to use. WebDriver names include: Firefox, Chrome,
Ie, Opera, Safari, PhantomJS, and Remote.
Use keyword arguments to specify the arguments you want to pass to
the WebDriver's __init__. The values of the arguments are not
processed in any way before being passed on. For Robot Framework
< 2.8, which does not support keyword arguments, create a keyword
dictionary and pass it in as argument `kwargs`. See the
[http://selenium.googlecode.com/git/docs/api/py/api.html|Selenium API Documentation]
for information about argument names and appropriate argument values.
Examples:
| # use proxy for Firefox | | | |
| ${proxy}= | Evaluate | sys.modules['selenium.webdriver'].Proxy() | sys, selenium.webdriver |
| ${proxy.http_proxy}= | Set Variable | localhost:8888 | |
| Create Webdriver | Firefox | proxy=${proxy} | |
| # use a proxy for PhantomJS | | | |
| ${service args}= | Create List | --proxy=192.168.132.104:8888 | |
| Create Webdriver | PhantomJS | service_args=${service args} | |
Example for Robot Framework < 2.8:
| # debug IE driver | | | |
| ${kwargs}= | Create Dictionary | log_level=DEBUG | log_file=%{HOMEPATH}${/}ie.log |
| Create Webdriver | Ie | kwargs=${kwargs} | |
"""
if not isinstance(kwargs, dict):
raise RuntimeError("kwargs must be a dictionary.")
for arg_name in kwargs:
if arg_name in init_kwargs:
raise RuntimeError("Got multiple values for argument '%s'." % arg_name)
init_kwargs[arg_name] = kwargs[arg_name]
driver_name = driver_name.strip()
try:
creation_func = getattr(webdriver, driver_name)
except AttributeError:
raise RuntimeError("'%s' is not a valid WebDriver name" % driver_name)
self.info("Creating an instance of the %s WebDriver" % driver_name)
driver = creation_func(**init_kwargs)
self.debug("Created %s WebDriver instance with session id %s" % (driver_name, driver.session_id))
return self.ctx.register_browser(driver, alias)
@keyword
def switch_browser(self, index_or_alias):
"""Switches between active browsers using index or alias.
Index is returned from `Open Browser` and alias can be given to it.
Example:
| Open Browser | http://google.com | ff |
| Location Should Be | http://google.com | |
| Open Browser | http://yahoo.com | ie | 2nd conn |
| Location Should Be | http://yahoo.com | |
| Switch Browser | 1 | # index |
| Page Should Contain | I'm feeling lucky | |
| Switch Browser | 2nd conn | # alias |
| Page Should Contain | More Yahoo! | |
| Close All Browsers | | |
Above example expects that there was no other open browsers when
opening the first one because it used index '1' when switching to it
later. If you aren't sure about that you can store the index into
a variable as below.
| ${id} = | Open Browser | http://google.com | *firefox |
| # Do something ... |
| Switch Browser | ${id} | | |
"""
try:
self.browsers.switch(index_or_alias)
self.debug('Switched to browser with Selenium session id %s'
% self.browser.session_id)
except (RuntimeError, DataError): # RF 2.6 uses RE, earlier DE
raise RuntimeError("No browser with index or alias '%s' found."
% index_or_alias)
@keyword
def close_window(self):
"""Closes currently opened pop-up window."""
self.browser.close()
@keyword
def get_window_identifiers(self):
"""Returns and logs id attributes of all windows known to the browser."""
return self._log_list(self._window_manager.get_window_ids(self.browser))
@keyword
def get_window_names(self):
"""Returns and logs names of all windows known to the browser."""
values = self._window_manager.get_window_names(self.browser)
# for backward compatibility, since Selenium 1 would always
# return this constant value for the main window
if len(values) and values[0] == 'undefined':
values[0] = 'selenium_main_app_window'
return self._log_list(values)
@keyword
def get_window_titles(self):
"""Returns and logs titles of all windows known to the browser."""
return self._log_list(self._window_manager.get_window_titles(self.browser))
@keyword
def maximize_browser_window(self):
"""Maximizes current browser window."""
self.browser.maximize_window()
@keyword
def get_window_size(self):
"""Returns current window size as `width` then `height`.
Example:
| ${width} | ${height}= | Get Window Size |
"""
size = self.browser.get_window_size()
return size['width'], size['height']
@keyword
def set_window_size(self, width, height):
"""Sets the `width` and `height` of the current window to the specified values.
Example:
| Set Window Size | ${800} | ${600} |
| ${width} | ${height}= | Get Window Size |
| Should Be Equal | ${width} | ${800} |
| Should Be Equal | ${height} | ${600} |
"""
return self.browser.set_window_size(width, height)
@keyword
def get_window_position(self):
"""Returns current window position as `x` then `y` (relative to the left and top of the screen).
Example:
| ${x} | ${y}= | Get Window Position |
"""
position = self.browser.get_window_position()
return position['x'], position['y']
@keyword
def set_window_position(self, x, y):
"""Sets the position x and y of the current window (relative to the left and top of the screen) to the specified values.
Example:
| Set Window Position | ${8} | ${10} |
| ${x} | ${y}= | Get Window Position |
| Should Be Equal | ${x} | ${8} |
| Should Be Equal | ${y} | ${10} |
"""
return self.browser.set_window_position(x, y)
@keyword
def select_frame(self, locator):
"""Sets frame identified by `locator` as current frame.
Key attributes for frames are `id` and `name.` See `introduction` for
details about locating elements.
"""
self.info("Selecting frame '%s'." % locator)
element = self.find_element(locator)
self.browser.switch_to.frame(element)
@keyword
def select_window(self, locator=None):
"""Selects the window matching locator and return previous window handle.
locator: any of name, title, url, window handle, excluded handle's list, or special words.
return: either current window handle before selecting, or None if no current window.
If the window is found, all subsequent commands use that window, until
this keyword is used again. If the window is not found, this keyword fails.
By default, when a locator value is provided,
it is matched against the title of the window and the
javascript name of the window. If multiple windows with
same identifier are found, the first one is selected.
There are some special locators for searching target window:
string 'main' (default): select the main window;
string 'self': only return current window handle;
string 'new': select the last-indexed window assuming it is the newest opened window
window list: select the first window not in given list (See 'List Windows' to get the list)
It is also possible to specify the approach Selenium2Library should take
to find a window by specifying a locator strategy:
| *Strategy* | *Example* | *Description* |
| title | Select Window `|` title=My Document | Matches by window title |
| name | Select Window `|` name=${name} | Matches by window javascript name |
| url | Select Window `|` url=http://google.com | Matches by window's current URL |
Example:
| Click Link | popup_link | # opens new window |
| Select Window | popupName |
| Title Should Be | Popup Title |
| Select Window | | | # Chooses the main window again |
"""
try:
return self.browser.current_window_handle
except NoSuchWindowException:
pass
finally:
self._window_manager.select(self.browser, locator)
@keyword
def get_log(self, log_type):
"""Get the log for a given selenium log type
The `log_type` argument defines which logs to get. Possible values are:
`browser`, `driver`, `client` or `server`
New in SeleniumLibrary 3.0
"""
return self.browser.get_log(log_type)
@keyword
def list_windows(self):
"""Return all current window handles as a list"""
return self.browser.window_handles
@keyword
def unselect_frame(self):
"""Sets the top frame as the current frame."""
self.browser.switch_to.default_content()
@keyword
def get_location(self):
"""Returns the current location."""
return self.browser.current_url
@keyword
def get_locations(self):
"""Returns and logs current locations of all windows known to the browser."""
return self._log_list(
[window_info[4] for window_info in
self._window_manager._get_window_infos(self.browser)])
@keyword
def get_source(self):
"""Returns the entire html source of the current page or frame."""
return self.browser.page_source
@keyword
def get_title(self):
"""Returns title of current page."""
return self.browser.title
@keyword
def location_should_be(self, url):
"""Verifies that current URL is exactly `url`."""
actual = self.get_location()
if actual != url:
raise AssertionError("Location should have been '%s' but was '%s'"
% (url, actual))
self.info("Current location is '%s'." % url)
@keyword
def location_should_contain(self, expected):
"""Verifies that current URL contains `expected`."""
actual = self.get_location()
if expected not in actual:
raise AssertionError("Location should have contained '%s' "
"but it was '%s'." % (expected, actual))
self.info("Current location contains '%s'." % expected)
@keyword
def log_location(self):
"""Logs and returns the current location."""
url = self.get_location()
self.info(url)
return url
@keyword
def log_source(self, loglevel='INFO'):
"""Logs and returns the entire html source of the current page or frame.
The `loglevel` argument defines the used log level. Valid log levels
are WARN, INFO (default), DEBUG, and NONE (no logging).
"""
source = self.get_source()
self.log(source, loglevel.upper())
return source
@keyword
def log_title(self):
"""Logs and returns the title of current page."""
title = self.get_title()
self.info(title)
return title
@keyword
def title_should_be(self, title):
"""Verifies that current page title equals `title`."""
actual = self.get_title()
if actual != title:
raise AssertionError("Title should have been '%s' but was '%s'"
% (title, actual))
self.info("Page title is '%s'." % title)
@keyword
def go_back(self):
"""Simulates the user clicking the "back" button on their browser."""
self.browser.back()
@keyword
def go_to(self, url):
"""Navigates the active browser instance to the provided URL."""
self.info("Opening url '%s'" % url)
self.browser.get(url)
@keyword
def reload_page(self):
"""Simulates user reloading page."""
self.browser.refresh()
@keyword
def get_selenium_speed(self):
"""Gets the delay in seconds that is waited after each Selenium command.
See `Set Selenium Speed` for an explanation."""
return secs_to_timestr(self.ctx._speed_in_secs)
@keyword
def get_selenium_timeout(self):
"""Gets the timeout in seconds that is used by various keywords.
See `Set Selenium Timeout` for an explanation."""
return secs_to_timestr(self.ctx._timeout_in_secs)
@keyword
def get_selenium_implicit_wait(self):
"""Gets the wait in seconds that is waited by Selenium.
See `Set Selenium Implicit Wait` for an explanation."""
return secs_to_timestr(self.ctx._implicit_wait_in_secs)
@keyword
def set_selenium_speed(self, seconds):
"""Sets the delay in seconds that is waited after each Selenium command.
This is useful mainly in slowing down the test execution to be able to
view the execution. `seconds` may be given in Robot Framework time
format. Returns the previous speed value in seconds.
One keyword may execute one or many Selenium commands and therefore
one keyword may slow down more than the ``seconds`` argument defines.
Example if delay is set to 1 second and because `Click Element`
executes two Selenium commands, then the total delay will be 2 seconds.
But because `Page Should Contain Element` executes only one selenium
command, then the total delay will be 1 second.
Example:
| Set Selenium Speed | .5 seconds |
"""
old_speed = self.ctx._speed_in_secs
self.ctx._speed_in_secs = timestr_to_secs(seconds)
for browser in self.browsers.browsers:
browser._speed = self.ctx._speed_in_secs
self._monkey_patch_speed(browser)
return old_speed
@keyword
def set_selenium_timeout(self, seconds):
"""Sets the timeout in seconds used by various keywords.
There are several `Wait ...` keywords that take timeout as an
argument. All of these timeout arguments are optional. The timeout
used by all of them can be set globally using this keyword.
See `Timeouts` for more information about timeouts.
The previous timeout value is returned by this keyword and can
be used to set the old value back later. The default timeout
is 5 seconds, but it can be altered in `importing`.
Example:
| ${orig timeout} = | Set Selenium Timeout | 15 seconds |
| Open page that loads slowly |
| Set Selenium Timeout | ${orig timeout} |
"""
old_timeout = self.get_selenium_timeout()
self.ctx._timeout_in_secs = timestr_to_secs(seconds)
for browser in self.browsers.get_open_browsers():
browser.set_script_timeout(self.ctx._timeout_in_secs)
return old_timeout
@keyword
def set_selenium_implicit_wait(self, seconds):
"""Sets Selenium 2's default implicit wait in seconds and
sets the implicit wait for all open browsers.
From selenium 2 function 'Sets a sticky timeout to implicitly
wait for an element to be found, or a command to complete.
This method only needs to be called one time per session.'
Example:
| ${orig wait} = | Set Selenium Implicit Wait | 10 seconds |
| Perform AJAX call that is slow |
| Set Selenium Implicit Wait | ${orig wait} |
"""
old_wait = self.get_selenium_implicit_wait()
self.ctx._implicit_wait_in_secs = timestr_to_secs(seconds)
for browser in self.browsers.get_open_browsers():
browser.implicitly_wait(self.ctx._implicit_wait_in_secs)
return old_wait
@keyword
def set_browser_implicit_wait(self, seconds):
"""Sets current browser's implicit wait in seconds.
From selenium 2 function 'Sets a sticky timeout to implicitly
wait for an element to be found, or a command to complete.
This method only needs to be called one time per session.'
Example:
| Set Browser Implicit Wait | 10 seconds |
See also `Set Selenium Implicit Wait`.
"""
implicit_wait_in_secs = timestr_to_secs(seconds)
self.browser.implicitly_wait(implicit_wait_in_secs)
def _get_browser_creation_function(self, browser_name):
func_name = BROWSER_NAMES.get(browser_name.lower().replace(' ', ''))
return getattr(self, func_name) if func_name else None
def _make_browser(self, browser_name, desired_capabilities=None,
profile_dir=None, remote=None):
creation_func = self._get_browser_creation_function(browser_name)
if not creation_func:
raise ValueError(browser_name + " is not a supported browser.")
browser = creation_func(remote, desired_capabilities, profile_dir)
browser.set_script_timeout(self.ctx._timeout_in_secs)
browser.implicitly_wait(self.ctx._implicit_wait_in_secs)
return browser
def _make_ff(self, remote, desired_capabilites, profile_dir):
if is_falsy(profile_dir):
profile_dir = FIREFOX_PROFILE_DIR
profile = webdriver.FirefoxProfile(profile_dir)
if is_truthy(remote):
browser = self._create_remote_web_driver(
webdriver.DesiredCapabilities.FIREFOX, remote,
desired_capabilites, profile)
else:
browser = webdriver.Firefox(firefox_profile=profile)
return browser
def _make_ie(self, remote, desired_capabilities, profile_dir):
return self._generic_make_browser(webdriver.Ie,
webdriver.DesiredCapabilities.INTERNETEXPLORER, remote, desired_capabilities)
def _make_chrome(self, remote, desired_capabilities, profile_dir):
return self._generic_make_browser(webdriver.Chrome,
webdriver.DesiredCapabilities.CHROME, remote, desired_capabilities)
def _make_opera(self, remote, desired_capabilities, profile_dir):
return self._generic_make_browser(webdriver.Opera,
webdriver.DesiredCapabilities.OPERA, remote, desired_capabilities)
def _make_phantomjs(self, remote, desired_capabilities, profile_dir):
return self._generic_make_browser(webdriver.PhantomJS,
webdriver.DesiredCapabilities.PHANTOMJS, remote, desired_capabilities)
def _make_htmlunit(self, remote, desired_capabilities, profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.HTMLUNIT, remote, desired_capabilities)
def _make_htmlunitwithjs(self, remote, desired_capabilities, profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.HTMLUNITWITHJS, remote, desired_capabilities)
def _make_android(self, remote, desired_capabilities, profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.ANDROID, remote, desired_capabilities)
def _make_iphone(self, remote, desired_capabilities, profile_dir):
return self._generic_make_browser(webdriver.Remote,
webdriver.DesiredCapabilities.IPHONE, remote, desired_capabilities)
def _make_safari(self, remote, desired_capabilities, profile_dir):
return self._generic_make_browser(webdriver.Safari,
webdriver.DesiredCapabilities.SAFARI, remote, desired_capabilities)
def _make_edge(self, remote, desired_capabilities, profile_dir):
if hasattr(webdriver, 'Edge'):
return self._generic_make_browser(webdriver.Edge,
webdriver.DesiredCapabilities.EDGE, remote, desired_capabilities)
else:
raise ValueError("Edge is not a supported browser with your version of Selenium python library. Please, upgrade to minimum required version 2.47.0.")
def _generic_make_browser(self, webdriver_type , desired_cap_type, remote_url, desired_caps):
'''most of the make browser functions just call this function which creates the
appropriate web-driver'''
if is_falsy(remote_url):
browser = webdriver_type()
else:
browser = self._create_remote_web_driver(desired_cap_type,
remote_url, desired_caps)
return browser
def _create_remote_web_driver(self, capabilities_type, remote_url, desired_capabilities=None, profile=None):
'''parses the string based desired_capabilities if neccessary and
creates the associated remote web driver'''
desired_capabilities_object = capabilities_type.copy()
if not isinstance(desired_capabilities, dict):
desired_capabilities = self._parse_capabilities_string(desired_capabilities)
desired_capabilities_object.update(desired_capabilities or {})
return webdriver.Remote(desired_capabilities=desired_capabilities_object,
command_executor=str(remote_url), browser_profile=profile)
def _parse_capabilities_string(self, capabilities_string):
'''parses the string based desired_capabilities which should be in the form
key1:val1,key2:val2
'''
desired_capabilities = {}
if is_falsy(capabilities_string):
return desired_capabilities
for cap in capabilities_string.split(","):
(key, value) = cap.split(":", 1)
desired_capabilities[key.strip()] = value.strip()
return desired_capabilities
def _get_speed(self, browser):
return browser._speed if hasattr(browser, '_speed') else 0.0
def _monkey_patch_speed(self, browser):
def execute(self, driver_command, params=None):
result = self._base_execute(driver_command, params)
speed = self._speed if hasattr(self, '_speed') else 0.0
if speed > 0:
time.sleep(speed)
return result
if not hasattr(browser, '_base_execute'):
browser._base_execute = browser.execute
browser.execute = types.MethodType(execute, browser)
def _log_list(self, items, what='item'):
msg = [
'Altogether {} {}.'.format(
len(items), what if len(items) == 1 else '{}s'.format(what))
]
for index, item in enumerate(items):
msg.append('{}: {}'.format(index + 1, item))
self.info('\n'.join(msg))
return items
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2009 Edgewall Software
# Copyright (C) 2003-2005 Daniel Lundin <[email protected]>
# Copyright (C) 2005-2006 Emmanuel Blot <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import re
import smtplib
from subprocess import Popen, PIPE
import time
from genshi.builder import tag
from trac import __version__
from trac.config import BoolOption, ExtensionOption, IntOption, Option
from trac.core import *
from trac.util.text import CRLF, fix_eol
from trac.util.translation import _, deactivate, reactivate
MAXHEADERLEN = 76
EMAIL_LOOKALIKE_PATTERN = (
# the local part
r"[a-zA-Z0-9.'+_-]+" '@'
# the domain name part (RFC:1035)
'(?:[a-zA-Z0-9_-]+\.)+' # labels (but also allow '_')
'[a-zA-Z](?:[-a-zA-Z\d]*[a-zA-Z\d])?' # TLD
)
class IEmailSender(Interface):
"""Extension point interface for components that allow sending e-mail."""
def send(self, from_addr, recipients, message):
"""Send message to recipients."""
class NotificationSystem(Component):
email_sender = ExtensionOption('notification', 'email_sender',
IEmailSender, 'SmtpEmailSender',
"""Name of the component implementing `IEmailSender`.
This component is used by the notification system to send emails.
Trac currently provides `SmtpEmailSender` for connecting to an SMTP
server, and `SendmailEmailSender` for running a `sendmail`-compatible
executable. (''since 0.12'')""")
smtp_enabled = BoolOption('notification', 'smtp_enabled', 'false',
"""Enable email notification.""")
smtp_from = Option('notification', 'smtp_from', 'trac@localhost',
"""Sender address to use in notification emails.""")
smtp_from_name = Option('notification', 'smtp_from_name', '',
"""Sender name to use in notification emails.""")
smtp_from_author = BoolOption('notification', 'smtp_from_author', 'false',
"""Use the action author as the sender of notification emails.
(''since 1.0'')""")
smtp_replyto = Option('notification', 'smtp_replyto', 'trac@localhost',
"""Reply-To address to use in notification emails.""")
smtp_always_cc = Option('notification', 'smtp_always_cc', '',
"""Email address(es) to always send notifications to,
addresses can be seen by all recipients (Cc:).""")
smtp_always_bcc = Option('notification', 'smtp_always_bcc', '',
"""Email address(es) to always send notifications to,
addresses do not appear publicly (Bcc:). (''since 0.10'')""")
smtp_default_domain = Option('notification', 'smtp_default_domain', '',
"""Default host/domain to append to address that do not specify
one.""")
ignore_domains = Option('notification', 'ignore_domains', '',
"""Comma-separated list of domains that should not be considered
part of email addresses (for usernames with Kerberos domains).""")
admit_domains = Option('notification', 'admit_domains', '',
"""Comma-separated list of domains that should be considered as
valid for email addresses (such as localdomain).""")
mime_encoding = Option('notification', 'mime_encoding', 'none',
"""Specifies the MIME encoding scheme for emails.
Valid options are 'base64' for Base64 encoding, 'qp' for
Quoted-Printable, and 'none' for no encoding, in which case mails will
be sent as 7bit if the content is all ASCII, or 8bit otherwise.
(''since 0.10'')""")
use_public_cc = BoolOption('notification', 'use_public_cc', 'false',
"""Recipients can see email addresses of other CC'ed recipients.
If this option is disabled (the default), recipients are put on BCC.
(''since 0.10'')""")
use_short_addr = BoolOption('notification', 'use_short_addr', 'false',
"""Permit email address without a host/domain (i.e. username only).
The SMTP server should accept those addresses, and either append
a FQDN or use local delivery. (''since 0.10'')""")
smtp_subject_prefix = Option('notification', 'smtp_subject_prefix',
'__default__',
"""Text to prepend to subject line of notification emails.
If the setting is not defined, then the [$project_name] prefix.
If no prefix is desired, then specifying an empty option
will disable it. (''since 0.10.1'')""")
def send_email(self, from_addr, recipients, message):
"""Send message to recipients via e-mail."""
self.email_sender.send(from_addr, recipients, message)
class SmtpEmailSender(Component):
"""E-mail sender connecting to an SMTP server."""
implements(IEmailSender)
smtp_server = Option('notification', 'smtp_server', 'localhost',
"""SMTP server hostname to use for email notifications.""")
smtp_port = IntOption('notification', 'smtp_port', 25,
"""SMTP server port to use for email notification.""")
smtp_user = Option('notification', 'smtp_user', '',
"""Username for SMTP server. (''since 0.9'')""")
smtp_password = Option('notification', 'smtp_password', '',
"""Password for SMTP server. (''since 0.9'')""")
use_tls = BoolOption('notification', 'use_tls', 'false',
"""Use SSL/TLS to send notifications over SMTP. (''since 0.10'')""")
def send(self, from_addr, recipients, message):
# Ensure the message complies with RFC2822: use CRLF line endings
message = fix_eol(message, CRLF)
self.log.info("Sending notification through SMTP at %s:%d to %s"
% (self.smtp_server, self.smtp_port, recipients))
server = smtplib.SMTP(self.smtp_server, self.smtp_port)
# server.set_debuglevel(True)
if self.use_tls:
server.ehlo()
if not server.esmtp_features.has_key('starttls'):
raise TracError(_("TLS enabled but server does not support " \
"TLS"))
server.starttls()
server.ehlo()
if self.smtp_user:
server.login(self.smtp_user.encode('utf-8'),
self.smtp_password.encode('utf-8'))
start = time.time()
server.sendmail(from_addr, recipients, message)
t = time.time() - start
if t > 5:
self.log.warning('Slow mail submission (%.2f s), '
'check your mail setup' % t)
if self.use_tls:
# avoid false failure detection when the server closes
# the SMTP connection with TLS enabled
import socket
try:
server.quit()
except socket.sslerror:
pass
else:
server.quit()
class SendmailEmailSender(Component):
"""E-mail sender using a locally-installed sendmail program."""
implements(IEmailSender)
sendmail_path = Option('notification', 'sendmail_path', 'sendmail',
"""Path to the sendmail executable.
The sendmail program must accept the `-i` and `-f` options.
(''since 0.12'')""")
def send(self, from_addr, recipients, message):
# Use native line endings in message
message = fix_eol(message, os.linesep)
self.log.info("Sending notification through sendmail at %s to %s"
% (self.sendmail_path, recipients))
cmdline = [self.sendmail_path, "-i", "-f", from_addr]
cmdline.extend(recipients)
self.log.debug("Sendmail command line: %s" % cmdline)
child = Popen(cmdline, bufsize=-1, stdin=PIPE, stdout=PIPE,
stderr=PIPE)
out, err = child.communicate(message)
if child.returncode or err:
raise Exception("Sendmail failed with (%s, %s), command: '%s'"
% (child.returncode, err.strip(), cmdline))
class Notify(object):
"""Generic notification class for Trac.
Subclass this to implement different methods.
"""
def __init__(self, env):
self.env = env
self.config = env.config
from trac.web.chrome import Chrome
self.template = Chrome(self.env).load_template(self.template_name,
method='text')
# FIXME: actually, we would need a different
# PermissionCache for each recipient
self.data = Chrome(self.env).populate_data(None, {'CRLF': CRLF})
def notify(self, resid):
(torcpts, ccrcpts) = self.get_recipients(resid)
self.begin_send()
self.send(torcpts, ccrcpts)
self.finish_send()
def get_recipients(self, resid):
"""Return a pair of list of subscribers to the resource 'resid'.
First list represents the direct recipients (To:), second list
represents the recipients in carbon copy (Cc:).
"""
raise NotImplementedError
def begin_send(self):
"""Prepare to send messages.
Called before sending begins.
"""
def send(self, torcpts, ccrcpts):
"""Send message to recipients."""
raise NotImplementedError
def finish_send(self):
"""Clean up after sending all messages.
Called after sending all messages.
"""
class NotifyEmail(Notify):
"""Baseclass for notification by email."""
from_email = 'trac+tickets@localhost'
subject = ''
template_name = None
nodomaddr_re = re.compile(r'[\w\d_\.\-]+')
addrsep_re = re.compile(r'[;\s,]+')
def __init__(self, env):
Notify.__init__(self, env)
addrfmt = EMAIL_LOOKALIKE_PATTERN
admit_domains = self.env.config.get('notification', 'admit_domains')
if admit_domains:
pos = addrfmt.find('@')
domains = '|'.join([x.strip() for x in \
admit_domains.replace('.','\.').split(',')])
addrfmt = r'%s@(?:(?:%s)|%s)' % (addrfmt[:pos], addrfmt[pos+1:],
domains)
self.shortaddr_re = re.compile(r'\s*(%s)\s*$' % addrfmt)
self.longaddr_re = re.compile(r'^\s*(.*)\s+<\s*(%s)\s*>\s*$' % addrfmt)
self._init_pref_encoding()
domains = self.env.config.get('notification', 'ignore_domains', '')
self._ignore_domains = [x.strip() for x in domains.lower().split(',')]
# Get the name and email addresses of all known users
self.name_map = {}
self.email_map = {}
for username, name, email in self.env.get_known_users():
if name:
self.name_map[username] = name
if email:
self.email_map[username] = email
def _init_pref_encoding(self):
from email.Charset import Charset, QP, BASE64, SHORTEST
self._charset = Charset()
self._charset.input_charset = 'utf-8'
self._charset.output_charset = 'utf-8'
self._charset.input_codec = 'utf-8'
self._charset.output_codec = 'utf-8'
pref = self.env.config.get('notification', 'mime_encoding').lower()
if pref == 'base64':
self._charset.header_encoding = BASE64
self._charset.body_encoding = BASE64
elif pref in ['qp', 'quoted-printable']:
self._charset.header_encoding = QP
self._charset.body_encoding = QP
elif pref == 'none':
self._charset.header_encoding = SHORTEST
self._charset.body_encoding = None
else:
raise TracError(_('Invalid email encoding setting: %(pref)s',
pref=pref))
def notify(self, resid, subject, author=None):
self.subject = subject
config = self.config['notification']
if not config.getbool('smtp_enabled'):
return
from_email, from_name = '', ''
if author and config.getbool('smtp_from_author'):
from_email = self.get_smtp_address(author)
if from_email:
from_name = self.name_map.get(author, '')
if not from_name:
mo = self.longaddr_re.search(author)
if mo:
from_name = mo.group(1)
if not from_email:
from_email = config.get('smtp_from')
from_name = config.get('smtp_from_name') or self.env.project_name
self.replyto_email = config.get('smtp_replyto')
self.from_email = from_email or self.replyto_email
self.from_name = from_name
if not self.from_email and not self.replyto_email:
raise TracError(tag(
tag.p(_('Unable to send email due to identity crisis.')),
tag.p(_('Neither %(from_)s nor %(reply_to)s are specified '
'in the configuration.',
from_=tag.b('notification.from'),
reply_to=tag.b('notification.reply_to')))),
_('SMTP Notification Error'))
Notify.notify(self, resid)
def format_header(self, key, name, email=None):
from email.Header import Header
maxlength = MAXHEADERLEN-(len(key)+2)
# Do not sent ridiculous short headers
if maxlength < 10:
raise TracError(_("Header length is too short"))
try:
tmp = name.encode('ascii')
header = Header(tmp, 'ascii', maxlinelen=maxlength)
except UnicodeEncodeError:
header = Header(name, self._charset, maxlinelen=maxlength)
if not email:
return header
else:
return '"%s" <%s>' % (header, email)
def add_headers(self, msg, headers):
for h in headers:
msg[h] = self.encode_header(h, headers[h])
def get_smtp_address(self, address):
if not address:
return None
def is_email(address):
pos = address.find('@')
if pos == -1:
return False
if address[pos+1:].lower() in self._ignore_domains:
return False
return True
if address == 'anonymous':
return None
if address in self.email_map:
address = self.email_map[address]
elif not is_email(address) and NotifyEmail.nodomaddr_re.match(address):
if self.config.getbool('notification', 'use_short_addr'):
return address
domain = self.config.get('notification', 'smtp_default_domain')
if domain:
address = "%s@%s" % (address, domain)
else:
self.env.log.info("Email address w/o domain: %s" % address)
return None
mo = self.shortaddr_re.search(address)
if mo:
return mo.group(1)
mo = self.longaddr_re.search(address)
if mo:
return mo.group(2)
self.env.log.info("Invalid email address: %s" % address)
return None
def encode_header(self, key, value):
if isinstance(value, tuple):
return self.format_header(key, value[0], value[1])
mo = self.longaddr_re.match(value)
if mo:
return self.format_header(key, mo.group(1), mo.group(2))
return self.format_header(key, value)
def send(self, torcpts, ccrcpts, mime_headers={}):
from email.MIMEText import MIMEText
from email.Utils import formatdate
stream = self.template.generate(**self.data)
# don't translate the e-mail stream
t = deactivate()
try:
body = stream.render('text', encoding='utf-8')
finally:
reactivate(t)
public_cc = self.config.getbool('notification', 'use_public_cc')
headers = {}
headers['X-Mailer'] = 'Trac %s, by Edgewall Software' % __version__
headers['X-Trac-Version'] = __version__
headers['X-Trac-Project'] = self.env.project_name
headers['X-URL'] = self.env.project_url
headers['Precedence'] = 'bulk'
headers['Auto-Submitted'] = 'auto-generated'
headers['Subject'] = self.subject
headers['From'] = (self.from_name, self.from_email) if self.from_name \
else self.from_email
headers['Reply-To'] = self.replyto_email
def build_addresses(rcpts):
"""Format and remove invalid addresses"""
return filter(lambda x: x, \
[self.get_smtp_address(addr) for addr in rcpts])
def remove_dup(rcpts, all):
"""Remove duplicates"""
tmp = []
for rcpt in rcpts:
if not rcpt in all:
tmp.append(rcpt)
all.append(rcpt)
return (tmp, all)
toaddrs = build_addresses(torcpts)
ccaddrs = build_addresses(ccrcpts)
accparam = self.config.get('notification', 'smtp_always_cc')
accaddrs = accparam and \
build_addresses(accparam.replace(',', ' ').split()) or []
bccparam = self.config.get('notification', 'smtp_always_bcc')
bccaddrs = bccparam and \
build_addresses(bccparam.replace(',', ' ').split()) or []
recipients = []
(toaddrs, recipients) = remove_dup(toaddrs, recipients)
(ccaddrs, recipients) = remove_dup(ccaddrs, recipients)
(accaddrs, recipients) = remove_dup(accaddrs, recipients)
(bccaddrs, recipients) = remove_dup(bccaddrs, recipients)
# if there is not valid recipient, leave immediately
if len(recipients) < 1:
self.env.log.info('no recipient for a ticket notification')
return
pcc = accaddrs
if public_cc:
pcc += ccaddrs
if toaddrs:
headers['To'] = ', '.join(toaddrs)
if pcc:
headers['Cc'] = ', '.join(pcc)
headers['Date'] = formatdate()
msg = MIMEText(body, 'plain')
# Message class computes the wrong type from MIMEText constructor,
# which does not take a Charset object as initializer. Reset the
# encoding type to force a new, valid evaluation
del msg['Content-Transfer-Encoding']
msg.set_charset(self._charset)
self.add_headers(msg, headers)
self.add_headers(msg, mime_headers)
NotificationSystem(self.env).send_email(self.from_email, recipients,
msg.as_string())
|
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import cPickle as pickle
from uuid import UUID
class HConditionBranch1ProcPart2_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HConditionBranch1ProcPart2_CompleteLHS.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HConditionBranch1ProcPart2_CompleteLHS, self).__init__(name='HConditionBranch1ProcPart2_CompleteLHS', num_nodes=3, edges=[])
# Add the edges
self.add_edges([(0, 2), (2, 1)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'MT_pre__UMLRT2Kiltera_MM'
p2
aS'MoTifRule'
p3
a.""")
self["MT_constraint__"] = """#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
"""
self["name"] = """"""
self["GUID__"] = UUID('45943b43-8284-4d2f-b429-2a0a69f4f6e7')
# Set the node attributes
self.vs[0]["MT_pivotOut__"] = """element1"""
self.vs[0]["MT_subtypeMatching__"] = False
self.vs[0]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_pivotIn__"] = """element1"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["MT_subtypes__"] = pickle.loads("""(lp1
S'MT_pre__Condition'
p2
a.""")
self.vs[0]["MT_dirty__"] = False
self.vs[0]["mm__"] = """MT_pre__ConditionBranch"""
self.vs[0]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[0]["GUID__"] = UUID('c45e782c-5468-4e80-b0cc-bca674ab9ba8')
self.vs[1]["MT_subtypeMatching__"] = True
self.vs[1]["MT_pre__classtype"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["MT_subtypes__"] = pickle.loads("""(lp1
S'MT_pre__New'
p2
aS'MT_pre__Inst'
p3
aS'MT_pre__LocalDef'
p4
aS'MT_pre__Listen'
p5
aS'MT_pre__ConditionSet'
p6
aS'MT_pre__Condition'
p7
aS'MT_pre__Print'
p8
aS'MT_pre__Match'
p9
aS'MT_pre__Delay'
p10
aS'MT_pre__Trigger_T'
p11
aS'MT_pre__Null'
p12
aS'MT_pre__Par'
p13
aS'MT_pre__ParIndexed'
p14
a.""")
self.vs[1]["MT_dirty__"] = False
self.vs[1]["mm__"] = """MT_pre__Proc"""
self.vs[1]["MT_pre__cardinality"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["MT_pre__name"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[1]["GUID__"] = UUID('bcb29718-e551-42c9-970e-090a332478b4')
self.vs[2]["MT_subtypeMatching__"] = False
self.vs[2]["MT_pre__associationType"] = """
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["MT_subtypes__"] = pickle.loads("""(lp1
.""")
self.vs[2]["MT_dirty__"] = False
self.vs[2]["mm__"] = """MT_pre__directLink_T"""
self.vs[2]["GUID__"] = UUID('efe4e420-9f09-4b20-aea2-06f4b1a91a5f')
def eval_classtype1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name1(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_classtype2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_cardinality2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_name2(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def eval_associationType3(self, attr_value, this):
#===============================================================================
# This code is executed when evaluating if a node shall be matched by this rule.
# You can access the value of the current node's attribute value by: attr_value.
# You can access any attribute x of this node by: this['x'].
# If the constraint relies on attribute values from other nodes,
# use the LHS/NAC constraint instead.
# The given constraint must evaluate to a boolean expression.
#===============================================================================
return True
def constraint(self, PreNode, graph):
"""
Executable constraint code.
@param PreNode: Function taking an integer as parameter
and returns the node corresponding to that label.
"""
#===============================================================================
# This code is executed after the nodes in the LHS have been matched.
# You can access a matched node labelled n by: PreNode('n').
# To access attribute x of node n, use: PreNode('n')['x'].
# The given constraint must evaluate to a boolean expression:
# returning True enables the rule to be applied,
# returning False forbids the rule from being applied.
#===============================================================================
return True
|
|
#!/usr/bin/env python
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import getpass
import imp
import netrc
import optparse
import os
import sys
import time
from pyversion import is_python3
if is_python3():
import urllib.request
else:
import urllib2
urllib = imp.new_module('urllib')
urllib.request = urllib2
try:
import kerberos
except ImportError:
kerberos = None
from color import SetDefaultColoring
from trace import SetTrace
from git_command import git, GitCommand
from git_config import init_ssh, close_ssh
from command import InteractiveCommand
from command import MirrorSafeCommand
from subcmds.version import Version
from editor import Editor
from error import DownloadError
from error import ManifestInvalidRevisionError
from error import ManifestParseError
from error import NoManifestException
from error import NoSuchProjectError
from error import RepoChangedException
from manifest_xml import XmlManifest
from pager import RunPager
from wrapper import WrapperPath, Wrapper
from subcmds import all_commands
if not is_python3():
# pylint:disable=W0622
input = raw_input
# pylint:enable=W0622
global_options = optparse.OptionParser(
usage="repo [-p|--paginate|--no-pager] COMMAND [ARGS]"
)
global_options.add_option('-p', '--paginate',
dest='pager', action='store_true',
help='display command output in the pager')
global_options.add_option('--no-pager',
dest='no_pager', action='store_true',
help='disable the pager')
global_options.add_option('--color',
choices=('auto', 'always', 'never'), default=None,
help='control color usage: auto, always, never')
global_options.add_option('--trace',
dest='trace', action='store_true',
help='trace git command execution')
global_options.add_option('--time',
dest='time', action='store_true',
help='time repo command execution')
global_options.add_option('--version',
dest='show_version', action='store_true',
help='display this version of repo')
class _Repo(object):
def __init__(self, repodir):
self.repodir = repodir
self.commands = all_commands
# add 'branch' as an alias for 'branches'
all_commands['branch'] = all_commands['branches']
def _Run(self, argv):
result = 0
name = None
glob = []
for i in range(len(argv)):
if not argv[i].startswith('-'):
name = argv[i]
if i > 0:
glob = argv[:i]
argv = argv[i + 1:]
break
if not name:
glob = argv
name = 'help'
argv = []
gopts, _gargs = global_options.parse_args(glob)
if gopts.trace:
SetTrace()
if gopts.show_version:
if name == 'help':
name = 'version'
else:
print('fatal: invalid usage of --version', file=sys.stderr)
return 1
SetDefaultColoring(gopts.color)
try:
cmd = self.commands[name]
except KeyError:
print("repo: '%s' is not a repo command. See 'repo help'." % name,
file=sys.stderr)
return 1
cmd.repodir = self.repodir
cmd.manifest = XmlManifest(cmd.repodir)
Editor.globalConfig = cmd.manifest.globalConfig
if not isinstance(cmd, MirrorSafeCommand) and cmd.manifest.IsMirror:
print("fatal: '%s' requires a working directory" % name,
file=sys.stderr)
return 1
try:
copts, cargs = cmd.OptionParser.parse_args(argv)
copts = cmd.ReadEnvironmentOptions(copts)
except NoManifestException as e:
print('error: in `%s`: %s' % (' '.join([name] + argv), str(e)),
file=sys.stderr)
print('error: manifest missing or unreadable -- please run init',
file=sys.stderr)
return 1
if not gopts.no_pager and not isinstance(cmd, InteractiveCommand):
config = cmd.manifest.globalConfig
if gopts.pager:
use_pager = True
else:
use_pager = config.GetBoolean('pager.%s' % name)
if use_pager is None:
use_pager = cmd.WantPager(copts)
if use_pager:
RunPager(config)
start = time.time()
try:
result = cmd.Execute(copts, cargs)
except (DownloadError, ManifestInvalidRevisionError,
NoManifestException) as e:
print('error: in `%s`: %s' % (' '.join([name] + argv), str(e)),
file=sys.stderr)
if isinstance(e, NoManifestException):
print('error: manifest missing or unreadable -- please run init',
file=sys.stderr)
result = 1
except NoSuchProjectError as e:
if e.name:
print('error: project %s not found' % e.name, file=sys.stderr)
else:
print('error: no project in current directory', file=sys.stderr)
result = 1
finally:
elapsed = time.time() - start
hours, remainder = divmod(elapsed, 3600)
minutes, seconds = divmod(remainder, 60)
if gopts.time:
if hours == 0:
print('real\t%dm%.3fs' % (minutes, seconds), file=sys.stderr)
else:
print('real\t%dh%dm%.3fs' % (hours, minutes, seconds),
file=sys.stderr)
return result
def _MyRepoPath():
return os.path.dirname(__file__)
def _CheckWrapperVersion(ver, repo_path):
if not repo_path:
repo_path = '~/bin/repo'
if not ver:
print('no --wrapper-version argument', file=sys.stderr)
sys.exit(1)
exp = Wrapper().VERSION
ver = tuple(map(int, ver.split('.')))
if len(ver) == 1:
ver = (0, ver[0])
exp_str = '.'.join(map(str, exp))
if exp[0] > ver[0] or ver < (0, 4):
print("""
!!! A new repo command (%5s) is available. !!!
!!! You must upgrade before you can continue: !!!
cp %s %s
""" % (exp_str, WrapperPath(), repo_path), file=sys.stderr)
sys.exit(1)
if exp > ver:
print("""
... A new repo command (%5s) is available.
... You should upgrade soon:
cp %s %s
""" % (exp_str, WrapperPath(), repo_path), file=sys.stderr)
def _CheckRepoDir(repo_dir):
if not repo_dir:
print('no --repo-dir argument', file=sys.stderr)
sys.exit(1)
def _PruneOptions(argv, opt):
i = 0
while i < len(argv):
a = argv[i]
if a == '--':
break
if a.startswith('--'):
eq = a.find('=')
if eq > 0:
a = a[0:eq]
if not opt.has_option(a):
del argv[i]
continue
i += 1
_user_agent = None
def _UserAgent():
global _user_agent
if _user_agent is None:
py_version = sys.version_info
os_name = sys.platform
if os_name == 'linux2':
os_name = 'Linux'
elif os_name == 'win32':
os_name = 'Win32'
elif os_name == 'cygwin':
os_name = 'Cygwin'
elif os_name == 'darwin':
os_name = 'Darwin'
p = GitCommand(
None, ['describe', 'HEAD'],
cwd = _MyRepoPath(),
capture_stdout = True)
if p.Wait() == 0:
repo_version = p.stdout
if len(repo_version) > 0 and repo_version[-1] == '\n':
repo_version = repo_version[0:-1]
if len(repo_version) > 0 and repo_version[0] == 'v':
repo_version = repo_version[1:]
else:
repo_version = 'unknown'
_user_agent = 'git-repo/%s (%s) git/%s Python/%d.%d.%d' % (
repo_version,
os_name,
'.'.join(map(str, git.version_tuple())),
py_version[0], py_version[1], py_version[2])
return _user_agent
class _UserAgentHandler(urllib.request.BaseHandler):
def http_request(self, req):
req.add_header('User-Agent', _UserAgent())
return req
def https_request(self, req):
req.add_header('User-Agent', _UserAgent())
return req
def _AddPasswordFromUserInput(handler, msg, req):
# If repo could not find auth info from netrc, try to get it from user input
url = req.get_full_url()
user, password = handler.passwd.find_user_password(None, url)
if user is None:
print(msg)
try:
user = input('User: ')
password = getpass.getpass()
except KeyboardInterrupt:
return
handler.passwd.add_password(None, url, user, password)
class _BasicAuthHandler(urllib.request.HTTPBasicAuthHandler):
def http_error_401(self, req, fp, code, msg, headers):
_AddPasswordFromUserInput(self, msg, req)
return urllib.request.HTTPBasicAuthHandler.http_error_401(
self, req, fp, code, msg, headers)
def http_error_auth_reqed(self, authreq, host, req, headers):
try:
old_add_header = req.add_header
def _add_header(name, val):
val = val.replace('\n', '')
old_add_header(name, val)
req.add_header = _add_header
return urllib.request.AbstractBasicAuthHandler.http_error_auth_reqed(
self, authreq, host, req, headers)
except:
reset = getattr(self, 'reset_retry_count', None)
if reset is not None:
reset()
elif getattr(self, 'retried', None):
self.retried = 0
raise
class _DigestAuthHandler(urllib.request.HTTPDigestAuthHandler):
def http_error_401(self, req, fp, code, msg, headers):
_AddPasswordFromUserInput(self, msg, req)
return urllib.request.HTTPDigestAuthHandler.http_error_401(
self, req, fp, code, msg, headers)
def http_error_auth_reqed(self, auth_header, host, req, headers):
try:
old_add_header = req.add_header
def _add_header(name, val):
val = val.replace('\n', '')
old_add_header(name, val)
req.add_header = _add_header
return urllib.request.AbstractDigestAuthHandler.http_error_auth_reqed(
self, auth_header, host, req, headers)
except:
reset = getattr(self, 'reset_retry_count', None)
if reset is not None:
reset()
elif getattr(self, 'retried', None):
self.retried = 0
raise
class _KerberosAuthHandler(urllib.request.BaseHandler):
def __init__(self):
self.retried = 0
self.context = None
self.handler_order = urllib.request.BaseHandler.handler_order - 50
def http_error_401(self, req, fp, code, msg, headers):
host = req.get_host()
retry = self.http_error_auth_reqed('www-authenticate', host, req, headers)
return retry
def http_error_auth_reqed(self, auth_header, host, req, headers):
try:
spn = "HTTP@%s" % host
authdata = self._negotiate_get_authdata(auth_header, headers)
if self.retried > 3:
raise urllib.request.HTTPError(req.get_full_url(), 401,
"Negotiate auth failed", headers, None)
else:
self.retried += 1
neghdr = self._negotiate_get_svctk(spn, authdata)
if neghdr is None:
return None
req.add_unredirected_header('Authorization', neghdr)
response = self.parent.open(req)
srvauth = self._negotiate_get_authdata(auth_header, response.info())
if self._validate_response(srvauth):
return response
except kerberos.GSSError:
return None
except:
self.reset_retry_count()
raise
finally:
self._clean_context()
def reset_retry_count(self):
self.retried = 0
def _negotiate_get_authdata(self, auth_header, headers):
authhdr = headers.get(auth_header, None)
if authhdr is not None:
for mech_tuple in authhdr.split(","):
mech, __, authdata = mech_tuple.strip().partition(" ")
if mech.lower() == "negotiate":
return authdata.strip()
return None
def _negotiate_get_svctk(self, spn, authdata):
if authdata is None:
return None
result, self.context = kerberos.authGSSClientInit(spn)
if result < kerberos.AUTH_GSS_COMPLETE:
return None
result = kerberos.authGSSClientStep(self.context, authdata)
if result < kerberos.AUTH_GSS_CONTINUE:
return None
response = kerberos.authGSSClientResponse(self.context)
return "Negotiate %s" % response
def _validate_response(self, authdata):
if authdata is None:
return None
result = kerberos.authGSSClientStep(self.context, authdata)
if result == kerberos.AUTH_GSS_COMPLETE:
return True
return None
def _clean_context(self):
if self.context is not None:
kerberos.authGSSClientClean(self.context)
self.context = None
def init_http():
handlers = [_UserAgentHandler()]
mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm()
try:
n = netrc.netrc()
for host in n.hosts:
p = n.hosts[host]
mgr.add_password(p[1], 'http://%s/' % host, p[0], p[2])
mgr.add_password(p[1], 'https://%s/' % host, p[0], p[2])
except netrc.NetrcParseError:
pass
except IOError:
pass
handlers.append(_BasicAuthHandler(mgr))
handlers.append(_DigestAuthHandler(mgr))
if kerberos:
handlers.append(_KerberosAuthHandler())
if 'http_proxy' in os.environ:
url = os.environ['http_proxy']
handlers.append(urllib.request.ProxyHandler({'http': url, 'https': url}))
if 'REPO_CURL_VERBOSE' in os.environ:
handlers.append(urllib.request.HTTPHandler(debuglevel=1))
handlers.append(urllib.request.HTTPSHandler(debuglevel=1))
urllib.request.install_opener(urllib.request.build_opener(*handlers))
def _Main(argv):
result = 0
opt = optparse.OptionParser(usage="repo wrapperinfo -- ...")
opt.add_option("--repo-dir", dest="repodir",
help="path to .repo/")
opt.add_option("--wrapper-version", dest="wrapper_version",
help="version of the wrapper script")
opt.add_option("--wrapper-path", dest="wrapper_path",
help="location of the wrapper script")
_PruneOptions(argv, opt)
opt, argv = opt.parse_args(argv)
_CheckWrapperVersion(opt.wrapper_version, opt.wrapper_path)
_CheckRepoDir(opt.repodir)
Version.wrapper_version = opt.wrapper_version
Version.wrapper_path = opt.wrapper_path
repo = _Repo(opt.repodir)
try:
try:
init_ssh()
init_http()
result = repo._Run(argv) or 0
finally:
close_ssh()
except KeyboardInterrupt:
print('aborted by user', file=sys.stderr)
result = 1
except ManifestParseError as mpe:
print('fatal: %s' % mpe, file=sys.stderr)
result = 1
except RepoChangedException as rce:
# If repo changed, re-exec ourselves.
#
argv = list(sys.argv)
argv.extend(rce.extra_args)
try:
os.execv(__file__, argv)
except OSError as e:
print('fatal: cannot restart repo after upgrade', file=sys.stderr)
print('fatal: %s' % e, file=sys.stderr)
result = 128
sys.exit(result)
if __name__ == '__main__':
_Main(sys.argv[1:])
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import netaddr
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron.common import constants
from neutron.common import ipv6_utils
from neutron import context
from neutron.ipam import driver
from neutron.ipam import exceptions as ipam_exc
from neutron.ipam import requests as ipam_req
from neutron import manager
from neutron.tests import base
from neutron.tests.unit.ipam import fake_driver
FAKE_IPAM_CLASS = 'neutron.tests.unit.ipam.fake_driver.FakeDriver'
class IpamSubnetRequestTestCase(base.BaseTestCase):
def setUp(self):
super(IpamSubnetRequestTestCase, self).setUp()
self.tenant_id = uuidutils.generate_uuid()
self.subnet_id = uuidutils.generate_uuid()
class TestIpamSubnetRequests(IpamSubnetRequestTestCase):
def test_subnet_request(self):
pool = ipam_req.SubnetRequest(self.tenant_id,
self.subnet_id)
self.assertEqual(self.tenant_id, pool.tenant_id)
self.assertEqual(self.subnet_id, pool.subnet_id)
self.assertIsNone(pool.gateway_ip)
self.assertIsNone(pool.allocation_pools)
def test_subnet_request_gateway(self):
request = ipam_req.SubnetRequest(self.tenant_id,
self.subnet_id,
gateway_ip='1.2.3.1')
self.assertEqual('1.2.3.1', str(request.gateway_ip))
def test_subnet_request_bad_gateway(self):
self.assertRaises(netaddr.core.AddrFormatError,
ipam_req.SubnetRequest,
self.tenant_id,
self.subnet_id,
gateway_ip='1.2.3.')
def test_subnet_request_with_range(self):
allocation_pools = [netaddr.IPRange('1.2.3.4', '1.2.3.5'),
netaddr.IPRange('1.2.3.7', '1.2.3.9')]
request = ipam_req.SubnetRequest(self.tenant_id,
self.subnet_id,
allocation_pools=allocation_pools)
self.assertEqual(allocation_pools, request.allocation_pools)
def test_subnet_request_range_not_list(self):
self.assertRaises(TypeError,
ipam_req.SubnetRequest,
self.tenant_id,
self.subnet_id,
allocation_pools=1)
def test_subnet_request_bad_range(self):
self.assertRaises(TypeError,
ipam_req.SubnetRequest,
self.tenant_id,
self.subnet_id,
allocation_pools=['1.2.3.4'])
def test_subnet_request_different_versions(self):
pools = [netaddr.IPRange('0.0.0.1', '0.0.0.2'),
netaddr.IPRange('::1', '::2')]
self.assertRaises(ValueError,
ipam_req.SubnetRequest,
self.tenant_id,
self.subnet_id,
allocation_pools=pools)
def test_subnet_request_overlap(self):
pools = [netaddr.IPRange('0.0.0.10', '0.0.0.20'),
netaddr.IPRange('0.0.0.8', '0.0.0.10')]
self.assertRaises(ValueError,
ipam_req.SubnetRequest,
self.tenant_id,
self.subnet_id,
allocation_pools=pools)
class TestIpamAnySubnetRequest(IpamSubnetRequestTestCase):
def test_subnet_request(self):
request = ipam_req.AnySubnetRequest(self.tenant_id,
self.subnet_id,
constants.IPv4,
24,
gateway_ip='0.0.0.1')
self.assertEqual(24, request.prefixlen)
def test_subnet_request_bad_prefix_type(self):
self.assertRaises(netaddr.core.AddrFormatError,
ipam_req.AnySubnetRequest,
self.tenant_id,
self.subnet_id,
constants.IPv4,
'A')
def test_subnet_request_bad_prefix(self):
self.assertRaises(netaddr.core.AddrFormatError,
ipam_req.AnySubnetRequest,
self.tenant_id,
self.subnet_id,
constants.IPv4,
33)
self.assertRaises(netaddr.core.AddrFormatError,
ipam_req.AnySubnetRequest,
self.tenant_id,
self.subnet_id,
constants.IPv6,
129)
def test_subnet_request_bad_gateway(self):
self.assertRaises(ValueError,
ipam_req.AnySubnetRequest,
self.tenant_id,
self.subnet_id,
constants.IPv6,
64,
gateway_ip='2000::1')
def test_subnet_request_allocation_pool_wrong_version(self):
pools = [netaddr.IPRange('0.0.0.4', '0.0.0.5')]
self.assertRaises(ValueError,
ipam_req.AnySubnetRequest,
self.tenant_id,
self.subnet_id,
constants.IPv6,
64,
allocation_pools=pools)
def test_subnet_request_allocation_pool_not_in_net(self):
pools = [netaddr.IPRange('0.0.0.64', '0.0.0.128')]
self.assertRaises(ValueError,
ipam_req.AnySubnetRequest,
self.tenant_id,
self.subnet_id,
constants.IPv4,
25,
allocation_pools=pools)
class TestIpamSpecificSubnetRequest(IpamSubnetRequestTestCase):
def test_subnet_request(self):
request = ipam_req.SpecificSubnetRequest(self.tenant_id,
self.subnet_id,
'1.2.3.0/24',
gateway_ip='1.2.3.1')
self.assertEqual(24, request.prefixlen)
self.assertEqual(netaddr.IPAddress('1.2.3.1'), request.gateway_ip)
self.assertEqual(netaddr.IPNetwork('1.2.3.0/24'), request.subnet_cidr)
def test_subnet_request_bad_gateway(self):
self.assertRaises(ValueError,
ipam_req.SpecificSubnetRequest,
self.tenant_id,
self.subnet_id,
'2001::1',
gateway_ip='2000::1')
class TestAddressRequest(base.BaseTestCase):
# This class doesn't test much. At least running through all of the
# constructors may shake out some trivial bugs.
EUI64 = ipam_req.AutomaticAddressRequest.EUI64
def setUp(self):
super(TestAddressRequest, self).setUp()
def test_specific_address_ipv6(self):
request = ipam_req.SpecificAddressRequest('2000::45')
self.assertEqual(netaddr.IPAddress('2000::45'), request.address)
def test_specific_address_ipv4(self):
request = ipam_req.SpecificAddressRequest('1.2.3.32')
self.assertEqual(netaddr.IPAddress('1.2.3.32'), request.address)
def test_any_address(self):
ipam_req.AnyAddressRequest()
def test_automatic_address_request_eui64(self):
subnet_cidr = '2607:f0d0:1002:51::/64'
port_mac = 'aa:bb:cc:dd:ee:ff'
eui_addr = str(ipv6_utils.get_ipv6_addr_by_EUI64(subnet_cidr,
port_mac))
request = ipam_req.AutomaticAddressRequest(
address_type=self.EUI64,
prefix=subnet_cidr,
mac=port_mac)
self.assertEqual(request.address, netaddr.IPAddress(eui_addr))
def test_automatic_address_request_invalid_address_type_raises(self):
self.assertRaises(ipam_exc.InvalidAddressType,
ipam_req.AutomaticAddressRequest,
address_type='kaboom')
def test_automatic_address_request_eui64_no_mac_raises(self):
self.assertRaises(ipam_exc.AddressCalculationFailure,
ipam_req.AutomaticAddressRequest,
address_type=self.EUI64,
prefix='meh')
def test_automatic_address_request_eui64_alien_param_raises(self):
self.assertRaises(ipam_exc.AddressCalculationFailure,
ipam_req.AutomaticAddressRequest,
address_type=self.EUI64,
mac='meh',
alien='et',
prefix='meh')
class TestIpamDriverLoader(base.BaseTestCase):
def setUp(self):
super(TestIpamDriverLoader, self).setUp()
self.ctx = context.get_admin_context()
def _verify_fake_ipam_driver_is_loaded(self, driver_name):
mgr = manager.NeutronManager
ipam_driver = mgr.load_class_for_provider('neutron.ipam_drivers',
driver_name)
self.assertEqual(
fake_driver.FakeDriver, ipam_driver,
"loaded ipam driver should be FakeDriver")
def _verify_import_error_is_generated(self, driver_name):
mgr = manager.NeutronManager
self.assertRaises(ImportError, mgr.load_class_for_provider,
'neutron.ipam_drivers',
driver_name)
def test_ipam_driver_is_loaded_by_class(self):
self._verify_fake_ipam_driver_is_loaded(FAKE_IPAM_CLASS)
def test_ipam_driver_is_loaded_by_name(self):
self._verify_fake_ipam_driver_is_loaded('fake')
def test_ipam_driver_raises_import_error(self):
self._verify_import_error_is_generated(
'neutron.tests.unit.ipam_req.SomeNonExistentClass')
def test_ipam_driver_raises_import_error_for_none(self):
self._verify_import_error_is_generated(None)
def _load_ipam_driver(self, driver_name, subnet_pool_id):
cfg.CONF.set_override("ipam_driver", driver_name)
return driver.Pool.get_instance(subnet_pool_id, self.ctx)
def test_ipam_driver_is_loaded_from_ipam_driver_config_value(self):
ipam_driver = self._load_ipam_driver('fake', None)
self.assertIsInstance(
ipam_driver, fake_driver.FakeDriver,
"loaded ipam driver should be of type FakeDriver")
@mock.patch(FAKE_IPAM_CLASS)
def test_ipam_driver_is_loaded_with_subnet_pool_id(self, ipam_mock):
subnet_pool_id = 'SomePoolID'
self._load_ipam_driver('fake', subnet_pool_id)
ipam_mock.assert_called_once_with(subnet_pool_id, self.ctx)
class TestAddressRequestFactory(base.BaseTestCase):
def test_specific_address_request_is_loaded(self):
for address in ('10.12.0.15', 'fffe::1'):
ip = {'ip_address': address}
self.assertIsInstance(
ipam_req.AddressRequestFactory.get_request(None, None, ip),
ipam_req.SpecificAddressRequest)
def test_any_address_request_is_loaded(self):
for addr in [None, '']:
ip = {'ip_address': addr}
self.assertIsInstance(
ipam_req.AddressRequestFactory.get_request(None, None, ip),
ipam_req.AnyAddressRequest)
def test_automatic_address_request_is_loaded(self):
ip = {'mac': '6c:62:6d:de:cf:49',
'subnet_cidr': '2001:470:abcd::/64',
'eui64_address': True}
self.assertIsInstance(
ipam_req.AddressRequestFactory.get_request(None, None, ip),
ipam_req.AutomaticAddressRequest)
class TestSubnetRequestFactory(IpamSubnetRequestTestCase):
def _build_subnet_dict(self, id=None, cidr='192.168.1.0/24',
prefixlen=8, ip_version=4):
subnet = {'cidr': cidr,
'prefixlen': prefixlen,
'ip_version': ip_version,
'tenant_id': self.tenant_id,
'gateway_ip': None,
'allocation_pools': None,
'id': id or self.subnet_id}
subnetpool = {'ip_version': ip_version,
'default_prefixlen': prefixlen}
return subnet, subnetpool
def test_specific_subnet_request_is_loaded(self):
addresses = [
'10.12.0.15/24',
'10.12.0.0/24',
'fffe::1/64',
'fffe::/64']
for address in addresses:
subnet, subnetpool = self._build_subnet_dict(cidr=address)
self.assertIsInstance(
ipam_req.SubnetRequestFactory.get_request(None,
subnet,
subnetpool),
ipam_req.SpecificSubnetRequest)
def test_any_address_request_is_loaded_for_ipv4(self):
subnet, subnetpool = self._build_subnet_dict(cidr=None, ip_version=4)
self.assertIsInstance(
ipam_req.SubnetRequestFactory.get_request(None,
subnet,
subnetpool),
ipam_req.AnySubnetRequest)
def test_any_address_request_is_loaded_for_ipv6(self):
subnet, subnetpool = self._build_subnet_dict(cidr=None, ip_version=6)
self.assertIsInstance(
ipam_req.SubnetRequestFactory.get_request(None,
subnet,
subnetpool),
ipam_req.AnySubnetRequest)
def test_args_are_passed_to_specific_request(self):
subnet, subnetpool = self._build_subnet_dict()
request = ipam_req.SubnetRequestFactory.get_request(None,
subnet,
subnetpool)
self.assertIsInstance(request,
ipam_req.SpecificSubnetRequest)
self.assertEqual(self.tenant_id, request.tenant_id)
self.assertEqual(self.subnet_id, request.subnet_id)
self.assertIsNone(request.gateway_ip)
self.assertIsNone(request.allocation_pools)
class TestGetRequestFactory(base.BaseTestCase):
def setUp(self):
super(TestGetRequestFactory, self).setUp()
cfg.CONF.set_override('ipam_driver', 'fake')
self.driver = driver.Pool.get_instance(None, None)
def test_get_subnet_request_factory(self):
self.assertEqual(
self.driver.get_subnet_request_factory(),
ipam_req.SubnetRequestFactory)
def test_get_address_request_factory(self):
self.assertEqual(
self.driver.get_address_request_factory(),
ipam_req.AddressRequestFactory)
|
|
#########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import unittest
from os import path
from nose.plugins.attrib import attr
from cloudify_rest_client.exceptions import UserUnauthorizedError
from manager_rest.storage import models
from manager_rest.test.base_test import LATEST_API_VERSION
from manager_rest.storage.models_states import ExecutionState
from .test_base import SecurityTestBase
RUNNING_EXECUTIONS_MESSAGE = 'There are running executions for this deployment'
UNAUTHORIZED_ERROR_MESSAGE = '401: User unauthorized'
NOT_FOUND_ERROR_MESSAGE = '404: Requested Blueprint with ID ' \
'`blueprint_id` was not found'
@attr(client_min_version=1, client_max_version=LATEST_API_VERSION)
class AuthorizationTests(SecurityTestBase):
def setUp(self):
super(AuthorizationTests, self).setUp()
self.blueprint_path = path.join(
self.get_blueprint_path('mock_blueprint'), 'empty_blueprint.yaml')
self.admin_client = self.get_secured_client(
username='alice', password='alice_password'
)
self.default_client = self.get_secured_client(
username='bob', password='bob_password'
)
self.suspended_client = self.get_secured_client(
username='clair', password='clair_password'
)
def test_blueprint_operations(self):
# test
self._test_upload_blueprints()
self._test_list_blueprints()
self._test_get_blueprints()
self._test_delete_blueprints()
def test_deployment_operations(self):
# setup
self.admin_client.blueprints.upload(
self.blueprint_path, 'bp_example_1')
# test
self._test_create_deployments()
self._test_list_deployments()
self._test_get_deployments()
self._test_delete_deployments()
def test_execution_operations(self):
# setup
self.admin_client.blueprints.upload(
self.blueprint_path, 'blueprint_1')
self.admin_client.deployments.create('blueprint_1', 'deployment_1')
self.admin_client.blueprints.upload(
self.blueprint_path, 'blueprint_2')
self.admin_client.deployments.create('blueprint_2', 'deployment_2')
# test
self._test_list_executions()
execution1_id, execution2_id = self._test_start_executions()
self._test_get_executions(execution1_id, execution2_id)
self._test_update_executions(execution1_id)
self._test_cancel_executions(execution1_id, execution2_id)
def test_node_operations(self):
# setup
self.admin_client.blueprints.upload(
self.blueprint_path, 'blueprint_1')
self.admin_client.deployments.create('blueprint_1', 'deployment_1')
# test
self._test_list_nodes()
self._test_get_nodes()
def test_node_instance_operations(self):
# setup
self.admin_client.blueprints.upload(
self.blueprint_path, 'blueprint_1')
self.admin_client.deployments.create('blueprint_1', 'deployment_1')
# test
node_instances = self._test_list_node_instances()
instance_id = self._test_get_node_instance(node_instances[0]['id'])
self._test_update_node_instances(instance_id)
def test_token_client_is_not_breaching(self):
admin_token_client, default_token_client = self._test_get_token()
self._test_blueprint_upload_with_token(admin_token_client,
default_token_client)
self._test_get_blueprint_with_token(admin_token_client,
default_token_client)
self._test_blueprint_list_with_token(admin_token_client,
default_token_client)
self._test_blueprint_delete_with_token(admin_token_client,
default_token_client)
@attr(client_min_version=2.1,
client_max_version=LATEST_API_VERSION)
# todo: mt: handle authorization
@unittest.skip("temporarily disabled")
def test_maintenance_mode(self):
self._test_get_status_maintenance_mode()
self._test_activate_maintenance_mode()
self._test_deactivate_maintenance_mode()
##################
# token methods
##################
def _test_blueprint_upload_with_token(self,
admin_token_client,
default_token_client):
# admins and default users should be able to upload blueprints...
token_bp_example_1 = admin_token_client.blueprints.upload(
self.blueprint_path, 'token_bp_example_1')
self._assert_resource_id(token_bp_example_1, 'token_bp_example_1')
token_bp_example_2 = default_token_client.blueprints.upload(
self.blueprint_path, 'token_bp_example_2')
self._assert_resource_id(token_bp_example_2, 'token_bp_example_2')
def _test_get_token(self):
# admins and default users should be able to get a token...
admin_token = self.admin_client.tokens.get().value
admin_token_client = self.get_secured_client(token=admin_token)
default_token = self.default_client.tokens.get().value
default_token_client = self.get_secured_client(token=default_token)
# ... but suspended users should not be able to get a token
self._assert_unauthorized(self.suspended_client.tokens.get)
return admin_token_client, default_token_client
def _test_blueprint_list_with_token(self,
admin_token_client,
default_token_client):
# admins and default users should be able so list blueprints
expected_ids = {'token_bp_example_1', 'token_bp_example_2'}
blueprints_list = admin_token_client.blueprints.list()
self._assert_resources_list_ids(blueprints_list, expected_ids)
blueprints_list = default_token_client.blueprints.list()
self._assert_resources_list_ids(blueprints_list, expected_ids)
def _test_get_blueprint_with_token(self,
admin_token_client,
default_token_client):
# admins and default users should be able so list blueprints
blueprint = admin_token_client.blueprints.get('token_bp_example_1')
self._assert_resource_id(blueprint, 'token_bp_example_1')
blueprint = default_token_client.blueprints.get('token_bp_example_1')
self._assert_resource_id(blueprint, 'token_bp_example_1')
@staticmethod
def _test_blueprint_delete_with_token(admin_token_client,
default_token_client):
# admins and default users should be able to delete a blueprint...
admin_token_client.blueprints.delete('token_bp_example_1')
default_token_client.blueprints.delete('token_bp_example_2')
####################
# blueprint methods
####################
def _test_upload_blueprints(self):
# admins and default users should be able to upload blueprints...
blueprint_1 = self.admin_client.blueprints.upload(
self.blueprint_path, 'blueprint_1')
self._assert_resource_id(blueprint_1, 'blueprint_1')
blueprint_2 = self.default_client.blueprints.upload(
self.blueprint_path, 'blueprint_2')
self._assert_resource_id(blueprint_2, 'blueprint_2')
# ...but suspended users should not
self._assert_unauthorized(self.suspended_client.blueprints.upload,
self.blueprint_path, 'dummy_bp')
def _test_list_blueprints(self):
# admins and default users should be able so list blueprints...
blueprints_list = self.admin_client.blueprints.list()
expected_ids = {'blueprint_1', 'blueprint_2'}
self._assert_resources_list_ids(blueprints_list, expected_ids)
blueprints_list = self.default_client.blueprints.list()
self._assert_resources_list_ids(blueprints_list, expected_ids)
# ...but suspended users should not
self._assert_unauthorized(self.suspended_client.blueprints.list)
def _test_get_blueprints(self):
# admins and default users should be able to get blueprints
self._assert_resource_id(
self.admin_client.blueprints.get('blueprint_1'),
expected_id='blueprint_1')
self._assert_resource_id(
self.default_client.blueprints.get('blueprint_1'),
expected_id='blueprint_1')
# suspended users should not be able to get any blueprint
self._assert_unauthorized(self.suspended_client.blueprints.get,
'blueprint_1')
def _test_delete_blueprints(self):
# admins and default users should be able to delete blueprints...
self.admin_client.blueprints.delete('blueprint_1')
self.default_client.blueprints.delete('blueprint_2')
# ...but suspended users should not
self._assert_unauthorized(self.suspended_client.blueprints.delete,
'dummpy_bp')
#####################
# deployment methods
#####################
def _test_delete_deployments(self):
# admins and default users should be able to delete deployments...
self.wait_for_deployment_creation(self.admin_client, 'dp_example_1')
self.admin_client.deployments.delete('dp_example_1')
self.wait_for_deployment_creation(self.default_client, 'dp_example_2')
self.default_client.deployments.delete('dp_example_2')
# ...but suspended users should not
self._assert_unauthorized(self.suspended_client.deployments.delete,
'dp_example_1')
def _test_get_deployments(self):
# admins and default users should be able to get
# deployments...
dp_example_1 = self.admin_client.deployments.get('dp_example_1')
self._assert_resource_id(dp_example_1, expected_id='dp_example_1')
dp_example_1 = self.default_client.deployments.get('dp_example_1')
self._assert_resource_id(dp_example_1, expected_id='dp_example_1')
# ...but suspended users should not
self._assert_unauthorized(self.suspended_client.deployments.get,
'dp_example_1')
def _test_list_deployments(self):
# admins and default users should be able so list deployments
deployments_list = self.admin_client.deployments.list()
expected_ids = {'dp_example_1', 'dp_example_2'}
self._assert_resources_list_ids(deployments_list, expected_ids)
deployments_list = self.default_client.deployments.list()
self._assert_resources_list_ids(deployments_list, expected_ids)
# ...but suspended users should not
self._assert_unauthorized(self.suspended_client.deployments.list)
def _test_create_deployments(self):
# admins and default users should be able to create deployments...
self.admin_client.deployments.create('bp_example_1', 'dp_example_1')
self.default_client.deployments.create('bp_example_1', 'dp_example_2')
# ...but suspended users should not
self._assert_unauthorized(self.suspended_client.deployments.create,
'dummy_bp', 'dummy_dp')
####################
# execution methods
####################
def _test_cancel_executions(self, execution1_id, execution2_id):
# preparing executions for delete
self._reset_execution_status_in_db(execution1_id)
self._reset_execution_status_in_db(execution2_id)
self.default_client.executions.update(execution1_id, 'pending')
self.default_client.executions.update(execution2_id, 'pending')
# admins and default users should be able to cancel executions...
self.admin_client.executions.cancel(execution1_id)
self.default_client.executions.cancel(execution2_id)
# ...but suspended users should not
self._assert_unauthorized(self.suspended_client.executions.cancel,
execution2_id)
def _test_update_executions(self, execution_id):
# admins and default users should be able to update executions...
self._reset_execution_status_in_db(execution_id)
execution = self.admin_client.executions.update(
execution_id, 'pending')
self._assert_execution(execution,
expected_blueprint_id='blueprint_1',
expected_deployment_id='deployment_1',
expected_workflow_name='install',
expected_status='pending')
execution = self.admin_client.executions.update(
execution_id, 'cancelling')
self._assert_execution(execution,
expected_blueprint_id='blueprint_1',
expected_deployment_id='deployment_1',
expected_workflow_name='install',
expected_status='cancelling')
execution = self.default_client.executions.update(
execution_id, 'cancelled')
self._assert_execution(execution,
expected_blueprint_id='blueprint_1',
expected_deployment_id='deployment_1',
expected_workflow_name='install',
expected_status='cancelled')
# ...but suspended users should not
self._assert_unauthorized(self.suspended_client.executions.update,
execution_id, 'dummy-status')
def _test_get_executions(self, execution1_id, execution2_id):
# admins and default users should be able to get executions...
execution_1 = self.admin_client.executions.get(execution1_id)
self._assert_execution(execution_1,
expected_blueprint_id='blueprint_1',
expected_deployment_id='deployment_1',
expected_workflow_name='install')
execution_2 = self.default_client.executions.get(execution2_id)
self._assert_execution(execution_2,
expected_blueprint_id='blueprint_2',
expected_deployment_id='deployment_2',
expected_workflow_name='install')
# ...but suspended users should not
self._assert_unauthorized(self.suspended_client.executions.get,
'dp_example_1')
def _test_start_executions(self):
# admins and default users should be able to start executions...
execution1 = self.admin_client.executions.start(
deployment_id='deployment_1', workflow_id='install')
execution2 = self.default_client.executions.start(
deployment_id='deployment_2', workflow_id='install')
# ...but suspended users should not
self._assert_unauthorized(self.suspended_client.executions.start,
'dummy_dp', 'install')
self.wait_for_deployment_creation(self.admin_client, 'deployment_1')
self.wait_for_deployment_creation(self.admin_client, 'deployment_2')
return execution1['id'], execution2['id']
def _test_list_executions(self):
# admins and default users should be able so list executions
executions_list = self.admin_client.executions.list()
self.assertEqual(len(executions_list), 2)
executions_list = self.default_client.executions.list()
self.assertEqual(len(executions_list), 2)
# ...but suspended users should not
self._assert_unauthorized(self.suspended_client.executions.list)
#################
# node methods
#################
def _test_get_nodes(self):
# admins and default users should be able to get nodes
node1 = self.admin_client.nodes.get(deployment_id='deployment_1',
node_id='mock_node')
self._assert_node(node1, 'mock_node', 'blueprint_1', 'deployment_1',
'cloudify.nodes.Root', 1)
node1 = self.default_client.nodes.get(deployment_id='deployment_1',
node_id='mock_node')
self._assert_node(node1, 'mock_node', 'blueprint_1', 'deployment_1',
'cloudify.nodes.Root', 1)
# but suspended users should not
self._assert_unauthorized(self.suspended_client.nodes.get,
'deployment_1', 'mock_node')
def _test_list_nodes(self):
# admins and default users should be able to list nodes...
nodes_list = self.admin_client.nodes.list()
self.assertEqual(len(nodes_list), 1)
nodes_list = self.default_client.nodes.list()
self.assertEqual(len(nodes_list), 1)
# ...but suspended users should not
self._assert_unauthorized(self.suspended_client.nodes.list)
#########################
# node instance methods
#########################
def _test_update_node_instances(self, instance_id):
# admins and default users should be able to update nodes instances
node_instance = self.admin_client.node_instances.update(
instance_id, state='testing_state',
runtime_properties={'prop1': 'value1'},
version=1)
self._assert_node_instance(node_instance, 'mock_node',
'deployment_1', 'testing_state',
{'prop1': 'value1'})
node_instance = self.default_client.node_instances.update(
instance_id, state='testing_state',
runtime_properties={'prop1': 'value1'},
version=2)
self._assert_node_instance(node_instance, 'mock_node',
'deployment_1', 'testing_state',
{'prop1': 'value1'})
# ...but suspended users should not
self._assert_unauthorized(
self.suspended_client.node_instances.update, instance_id,
'testing_state')
def _test_get_node_instance(self, instance_id):
# admins and default users should be able to get
# nodes instances..
node_instance = self.admin_client.node_instances.get(instance_id)
self._assert_node_instance(node_instance, 'mock_node',
'deployment_1', 'uninitialized')
node_instance = self.default_client.node_instances.get(instance_id)
self._assert_node_instance(node_instance, 'mock_node',
'deployment_1', 'uninitialized')
# ...but suspended users should not
self._assert_unauthorized(self.suspended_client.node_instances.get,
instance_id)
return instance_id
def _test_list_node_instances(self):
# admins and default users should be able to list
# node instances..
node_instances = self.admin_client.node_instances.list()
self.assertEqual(len(node_instances), 1)
node_instances = self.default_client.node_instances.list()
self.assertEqual(len(node_instances), 1)
# ...but suspended users should not
self._assert_unauthorized(self.suspended_client.node_instances.list)
return node_instances
###########################
# maintenance mode methods
###########################
def _test_get_status_maintenance_mode(self):
deactivating_status = 'deactivated'
# admins and default users should be able to get the
# maintenance mode status...
state = self.admin_client.maintenance_mode.status()
self.assertEqual(state.status, deactivating_status)
state = self.default_client.maintenance_mode.status()
self.assertEqual(state.status, deactivating_status)
# ...but suspended users should not
self._assert_unauthorized(
self.suspended_client.maintenance_mode.status)
def _test_activate_maintenance_mode(self):
activated_status = 'activated'
# admins should be able to activate maintenance mode...
state = self.admin_client.maintenance_mode.activate()
self.assertEqual(state.status, activated_status)
self.admin_client.maintenance_mode.deactivate()
# ...but default and suspended users should not
self._assert_unauthorized(
self.default_client.maintenance_mode.activate)
self._assert_unauthorized(
self.suspended_client.maintenance_mode.activate)
def _test_deactivate_maintenance_mode(self):
deactivating_status = 'deactivated'
# admins should be able to deactivate maintenance mode...
self.admin_client.maintenance_mode.activate()
state = self.admin_client.maintenance_mode.deactivate()
self.assertEqual(state.status, deactivating_status)
# ...but default users and suspended users should not
self._assert_unauthorized(
self.default_client.maintenance_mode.deactivate)
self._assert_unauthorized(
self.suspended_client.maintenance_mode.deactivate)
#############################
# utility methods
#############################
def _assert_resource_id(self, resource, expected_id):
self.assertEqual(expected_id, resource['id'])
def _assert_resources_list_ids(self, resources_list, expected_ids):
self.assertEquals(len(expected_ids), len(resources_list))
resources_ids = set([resource.id for resource in resources_list])
self.assertEquals(expected_ids, resources_ids)
def _assert_execution(self, execution, expected_blueprint_id,
expected_deployment_id, expected_workflow_name,
expected_status=None):
self.assertEqual(expected_blueprint_id, execution['blueprint_id'])
self.assertEqual(expected_deployment_id, execution['deployment_id'])
self.assertEqual(expected_workflow_name, execution['workflow_id'])
if expected_status:
self.assertEqual(expected_status, execution['status'])
def _assert_node(self, node, expected_node_id, expected_blueprint_id,
expected_deployment_id, expected_node_type,
expected_num_of_instances):
self.assertEqual(expected_node_id, node['id'])
self.assertEqual(expected_blueprint_id, node['blueprint_id'])
self.assertEqual(expected_deployment_id, node['deployment_id'])
self.assertEqual(expected_node_type, node['type'])
self.assertEqual(expected_num_of_instances,
node['number_of_instances'])
def _assert_node_instance(self, node_instance, expected_node_id,
expected_deployment_id, expected_state,
expected_runtime_properties=None,
expected_version=None):
self.assertEqual(expected_node_id, node_instance['node_id'])
self.assertEqual(expected_deployment_id,
node_instance['deployment_id'])
self.assertEqual(expected_state, node_instance['state'])
if expected_runtime_properties:
self.assertEqual(expected_runtime_properties,
node_instance.runtime_properties)
if expected_version:
self.assertEqual(expected_version, node_instance.version)
def _assert_unauthorized(self, method, *args):
self.assertRaisesRegexp(UserUnauthorizedError,
UNAUTHORIZED_ERROR_MESSAGE,
method,
*args)
def _reset_execution_status_in_db(self, execution_id):
execution = self.sm.get(models.Execution, execution_id)
execution.status = ExecutionState.STARTED
execution.error = ''
self.sm.update(execution)
updated_execution = self.admin_client.executions.get(
execution_id=execution_id)
self.assertEqual(ExecutionState.STARTED, updated_execution['status'])
|
|
from os import path
from itertools import chain
from webassets import six
from webassets.six.moves import map
from webassets.six.moves import zip
try:
import glob2 as glob
from glob import has_magic
except ImportError:
import glob
from glob import has_magic
from .bundle import Bundle, is_url
from .cache import get_cache
from .version import get_versioner, get_manifest
from .updater import get_updater
from .utils import urlparse
__all__ = ('Environment', 'RegisterError')
class RegisterError(Exception):
pass
class ConfigStorage(object):
"""This is the backend which :class:`Environment` uses to store
its configuration values.
Environment-subclasses like the one used by ``django-assets`` will
often want to use a custom ``ConfigStorage`` as well, building upon
whatever configuration the framework is using.
The goal in designing this class therefore is to make it easy for
subclasses to change the place the data is stored: Only
_meth:`__getitem__`, _meth:`__setitem__`, _meth:`__delitem__` and
_meth:`__contains__` need to be implemented.
One rule: The default storage is case-insensitive, and custom
environments should maintain those semantics.
A related reason is why we don't inherit from ``dict``. It would
require us to re-implement a whole bunch of methods, like pop() etc.
"""
def __init__(self, env):
self.env = env
def get(self, key, default=None):
try:
return self.__getitem__(key)
except KeyError:
return default
def update(self, d):
for key in d:
self.__setitem__(key, d[key])
def setdefault(self, key, value):
if not key in self:
self.__setitem__(key, value)
return value
return self.__getitem__(key)
def __contains__(self, key):
raise NotImplementedError()
def __getitem__(self, key):
raise NotImplementedError()
def __setitem__(self, key, value):
raise NotImplementedError()
def __delitem__(self, key):
raise NotImplementedError()
def _get_deprecated(self, key):
"""For deprecated keys, fake the values as good as we can.
Subclasses need to call this in __getitem__."""
pass
def _set_deprecated(self, key, value):
"""Same for __setitem__."""
pass
def url_prefix_join(prefix, fragment):
"""Join url prefix with fragment."""
# Ensures urljoin will not cut the last part.
prefix += prefix[-1:] != '/' and '/' or ''
return urlparse.urljoin(prefix, fragment)
class Resolver(object):
"""Responsible for resolving user-specified :class:`Bundle`
contents to actual files, as well as to urls.
In this base version, this is essentially responsible for searching
the load path for the queried file.
A custom implementation of this class is tremendously useful when
integrating with frameworks, which usually have some system to
spread static files across applications or modules.
The class is designed for maximum extensibility.
"""
def __init__(self, env):
self.env = env
def glob(self, basedir, expr):
"""Generator that runs when a glob expression needs to be
resolved. Yields a list of absolute filenames.
"""
expr = path.join(basedir, expr)
for filename in glob.iglob(expr):
if path.isdir(filename):
continue
yield filename
def consider_single_directory(self, directory, item):
"""Searches for ``item`` within ``directory``. Is able to
resolve glob instructions.
Subclasses can call this when they have narrowed done the
location of a bundle item to a single directory.
"""
expr = path.join(directory, item)
if has_magic(expr):
# Note: No error if glob returns an empty list
return list(self.glob(directory, item))
else:
if path.exists(expr):
return expr
raise IOError("'%s' does not exist" % expr)
def search_env_directory(self, item):
"""This is called by :meth:`search_for_source` when no
:attr:`Environment.load_path` is set.
"""
return self.consider_single_directory(self.env.directory, item)
def search_load_path(self, item):
"""This is called by :meth:`search_for_source` when a
:attr:`Environment.load_path` is set.
If you want to change how the load path is processed,
overwrite this method.
"""
if has_magic(item):
# We glob all paths.
result = []
for path in self.env.load_path:
result.extend(list(self.glob(path, item)))
return result
else:
# Single file, stop when we find the first match, or error
# out otherwise. We still use glob() because then the load_path
# itself can contain globs. Neat!
for path in self.env.load_path:
result = list(self.glob(path, item))
if result:
return result
raise IOError("'%s' not found in load path: %s" % (
item, self.env.load_path))
def search_for_source(self, item):
"""Called by :meth:`resolve_source` after determining that
``item`` is a relative filesystem path.
You should always overwrite this method, and let
:meth:`resolve_source` deal with absolute paths, urls and
other types of items that a bundle may contain.
"""
if self.env.load_path:
return self.search_load_path(item)
else:
return self.search_env_directory(item)
def query_url_mapping(self, filepath):
"""Searches the environment-wide url mapping (based on the
urls assigned to each directory in the load path). Returns
the correct url for ``filepath``.
Subclasses should be sure that they really want to call this
method, instead of simply falling back to ``super()``.
"""
# Build a list of dir -> url mappings
mapping = list(self.env.url_mapping.items())
try:
mapping.append((self.env.directory, self.env.url))
except EnvironmentError:
# Rarely, directory/url may not be set. That's ok.
pass
# Make sure paths are absolute, normalized, and sorted by length
mapping = list(map(
lambda p_u: (path.normpath(path.abspath(p_u[0])), p_u[1]),
mapping))
mapping.sort(key=lambda i: len(i[0]), reverse=True)
needle = path.normpath(filepath)
for candidate, url in mapping:
if needle.startswith(candidate):
# Found it!
rel_path = needle[len(candidate)+1:]
return url_prefix_join(url, rel_path)
raise ValueError('Cannot determine url for %s' % filepath)
def resolve_source(self, item):
"""Given ``item`` from a Bundle's contents, this has to
return the final value to use, usually an absolute
filesystem path.
.. note::
It is also allowed to return urls and bundle instances
(or generally anything else the calling :class:`Bundle`
instance may be able to handle). Indeed this is the
reason why the name of this method does not imply a
return type.
The incoming item is usually a relative path, but may also be
an absolute path, or a url. These you will commonly want to
return unmodified.
This method is also allowed to resolve ``item`` to multiple
values, in which case a list should be returned. This is
commonly used if ``item`` includes glob instructions
(wildcards).
.. note::
Instead of this, subclasses should consider implementing
:meth:`search_for_source` instead.
"""
# Pass through some things unscathed
if not isinstance(item, six.string_types):
# Don't stand in the way of custom values.
return item
if is_url(item) or path.isabs(item):
return item
return self.search_for_source(item)
def resolve_output_to_path(self, target, bundle):
"""Given ``target``, this has to return the absolute
filesystem path to which the output file of ``bundle``
should be written.
``target`` may be a relative or absolute path, and is
usually taking from the :attr:`Bundle.output` property.
If a version-placeholder is used (``%(version)s``, it is
still unresolved at this point.
"""
return path.join(self.env.directory, target)
def resolve_source_to_url(self, filepath, item):
"""Given the absolute filesystem path in ``filepath``, as
well as the original value from :attr:`Bundle.contents` which
resolved to this path, this must return the absolute url
through which the file is to be referenced.
Depending on the use case, either the ``filepath`` or the
``item`` argument will be more helpful in generating the url.
This method should raise a ``ValueError`` if the url cannot
be determined.
"""
return self.query_url_mapping(filepath)
def resolve_output_to_url(self, target):
"""Given ``target``, this has to return the url through
which the output file can be referenced.
``target`` may be a relative or absolute path, and is
usually taking from the :attr:`Bundle.output` property.
This is different from :meth:`resolve_source_to_url` in
that you do not passed along the result of
:meth:`resolve_output_to_path`. This is because in many
use cases, the filesystem is not available at the point
where the output url is needed (the media server may on
a different machine).
"""
if not path.isabs(target):
# If relative, output files are written to env.directory,
# thus we can simply base all values off of env.url.
return url_prefix_join(self.env.url, target)
else:
# If an absolute output path was specified, then search
# the url mappings.
return self.query_url_mapping(target)
class BundleRegistry(object):
def __init__(self):
self._named_bundles = {}
self._anon_bundles = []
def __iter__(self):
return chain(six.itervalues(self._named_bundles), self._anon_bundles)
def __getitem__(self, name):
return self._named_bundles[name]
def __contains__(self, name):
return name in self._named_bundles
def __len__(self):
return len(self._named_bundles) + len(self._anon_bundles)
def __bool__(self):
return True
__nonzero__ = __bool__ # For Python 2
def register(self, name, *args, **kwargs):
"""Register a :class:`Bundle` with the given ``name``.
This can be called in multiple ways:
- With a single :class:`Bundle` instance::
env.register('jquery', jquery_bundle)
- With a dictionary, registering multiple bundles at once:
bundles = {'js': js_bundle, 'css': css_bundle}
env.register(bundles)
.. note::
This is a convenient way to use a :doc:`loader <loaders>`:
env.register(YAMLLoader('assets.yaml').load_bundles())
- With many arguments, creating a new bundle on the fly::
env.register('all_js', jquery_bundle, 'common.js',
filters='rjsmin', output='packed.js')
"""
# Register a dict
if isinstance(name, dict) and not args and not kwargs:
for name, bundle in name.items():
self.register(name, bundle)
return
if len(args) == 0:
raise TypeError('at least two arguments are required')
else:
if len(args) == 1 and not kwargs and isinstance(args[0], Bundle):
bundle = args[0]
else:
bundle = Bundle(*args, **kwargs)
if name in self._named_bundles:
if self._named_bundles[name] == bundle:
pass # ignore
else:
raise RegisterError('Another bundle is already registered '+
'as "%s": %s' % (name, self._named_bundles[name]))
else:
self._named_bundles[name] = bundle
bundle.env = self # take ownership
return bundle
def add(self, *bundles):
"""Register a list of bundles with the environment, without
naming them.
This isn't terribly useful in most cases. It exists primarily
because in some cases, like when loading bundles by searching
in templates for the use of an "assets" tag, no name is available.
"""
for bundle in bundles:
self._anon_bundles.append(bundle)
bundle.env = self # take ownership
# Those are config keys used by the environment. Framework-wrappers may
# find this list useful if they desire to prefix those settings. For example,
# in Django, it would be ASSETS_DEBUG. Other config keys are encouraged to use
# their own namespacing, so they don't need to be prefixed. For example, a
# filter setting might be CSSMIN_BIN.
env_options = [
'directory', 'url', 'debug', 'cache', 'updater', 'auto_build',
'url_expire', 'versions', 'manifest', 'load_path', 'url_mapping']
class BaseEnvironment(BundleRegistry):
"""Abstract base class for :class:`Environment` with slightly more generic
assumptions, to ease subclassing.
"""
config_storage_class = None
resolver_class = Resolver
def __init__(self, **config):
BundleRegistry.__init__(self)
self._config = self.config_storage_class(self)
self.resolver = self.resolver_class(self)
# directory, url currently do not have default values
#
# some thought went into these defaults:
# - enable url_expire, because we want to encourage the right thing
# - default to hash versions, for the same reason: they're better
# - manifest=cache because hash versions are slow
self.config.setdefault('debug', False)
self.config.setdefault('cache', True)
self.config.setdefault('url_expire', None)
self.config.setdefault('auto_build', True)
self.config.setdefault('manifest', 'cache')
self.config.setdefault('versions', 'hash')
self.config.setdefault('updater', 'timestamp')
self.config.setdefault('load_path', [])
self.config.setdefault('url_mapping', {})
self.config.update(config)
def append_path(self, path, url=None):
"""Appends ``path`` to :attr:`load_path`, and adds a
corresponding entry to :attr:`url_mapping`.
"""
self.load_path.append(path)
if url:
self.url_mapping[path] = url
@property
def config(self):
"""Key-value configuration. Keys are case-insensitive.
"""
# This is a property so that user are not tempted to assign
# a custom dictionary which won't uphold our caseless semantics.
return self._config
def _set_debug(self, debug):
self.config['debug'] = debug
def _get_debug(self):
return self.config['debug']
debug = property(_get_debug, _set_debug, doc=
"""Enable/disable debug mode. Possible values are:
``False``
Production mode. Bundles will be merged and filters applied.
``True``
Enable debug mode. Bundles will output their individual source
files.
*"merge"*
Merge the source files, but do not apply filters.
""")
def _set_cache(self, enable):
self.config['cache'] = enable
def _get_cache(self):
cache = get_cache(self.config['cache'], self)
if cache != self.config['cache']:
self.config['cache'] = cache
return cache
cache = property(_get_cache, _set_cache, doc=
"""Controls the behavior of the cache. The cache will speed up rebuilding
of your bundles, by caching individual filter results. This can be
particularly useful while developing, if your bundles would otherwise take
a long time to rebuild.
Possible values are:
``False``
Do not use the cache.
``True`` (default)
Cache using default location, a ``.webassets-cache`` folder inside
:attr:`directory`.
*custom path*
Use the given directory as the cache directory.
""")
def _set_auto_build(self, value):
self.config['auto_build'] = value
def _get_auto_build(self):
return self.config['auto_build']
auto_build = property(_get_auto_build, _set_auto_build, doc=
"""Controls whether bundles should be automatically built, and
rebuilt, when required (if set to ``True``), or whether they
must be built manually be the user, for example via a management
command.
This is a good setting to have enabled during debugging, and can
be very convenient for low-traffic sites in production as well.
However, there is a cost in checking whether the source files
have changed, so if you care about performance, or if your build
process takes very long, then you may want to disable this.
By default automatic building is enabled.
""")
def _set_manifest(self, manifest):
self.config['manifest'] = manifest
def _get_manifest(self):
manifest = get_manifest(self.config['manifest'], env=self)
if manifest != self.config['manifest']:
self.config['manifest'] = manifest
return manifest
manifest = property(_get_manifest, _set_manifest, doc=
"""A manifest persists information about the versions bundles
are at.
The Manifest plays a role only if you insert the bundle version
in your output filenames, or append the version as a querystring
to the url (via the ``url_expire`` option). It serves two
purposes:
- Without a manifest, it may be impossible to determine the
version at runtime. In a deployed app, the media files may
be stored on a different server entirely, and be
inaccessible from the application code. The manifest,
if shipped with your application, is what still allows to
construct the proper URLs.
- Even if it were possible to determine the version at
runtime without a manifest, it may be a costly process,
and using a manifest may give you better performance. If
you use a hash-based version for example, this hash would
need to be recalculated every time a new process is
started.
Valid values are:
``"cache"`` (default)
The cache is used to remember version information. This
is useful to avoid recalculating the version hash.
``"file:{path}"``
Stores version information in a file at {path}. If not
path is given, the manifest will be stored as
``.webassets-manifest`` in ``Environment.directory``.
``"json:{path}"``
Same as "file:{path}", but uses JSON to store the information.
``False``, ``None``
No manifest is used.
Any custom manifest implementation.
The default value is ``None``.
""")
def _set_versions(self, versions):
self.config['versions'] = versions
def _get_versions(self):
versions = get_versioner(self.config['versions'])
if versions != self.config['versions']:
self.config['versions'] = versions
return versions
versions = property(_get_versions, _set_versions, doc=
"""Defines what should be used as a Bundle ``version``.
A bundle's version is what is appended to URLs when the
``url_expire`` option is enabled, and the version can be part
of a Bundle's output filename by use of the ``%(version)s``
placeholder.
Valid values are:
``timestamp``
The version is determined by looking at the mtime of a
bundle's output file.
``hash`` (default)
The version is a hash over the output file's content.
``False``, ``None``
Functionality that requires a version is disabled. This
includes the ``url_expire`` option, the ``auto_build``
option, and support for the %(version)s placeholder.
Any custom version implementation.
""")
def set_updater(self, updater):
self.config['updater'] = updater
def get_updater(self):
updater = get_updater(self.config['updater'])
if updater != self.config['updater']:
self.config['updater'] = updater
return updater
updater = property(get_updater, set_updater, doc=
"""Controls how the ``auto_build`` option should determine
whether a bundle needs to be rebuilt.
``"timestamp"`` (default)
Rebuild bundles if the source file timestamp exceeds the existing
output file's timestamp.
``"always"``
Always rebuild bundles (avoid in production environments).
Any custom version implementation.
""")
def _set_url_expire(self, url_expire):
self.config['url_expire'] = url_expire
def _get_url_expire(self):
return self.config['url_expire']
url_expire = property(_get_url_expire, _set_url_expire, doc=
"""If you send your assets to the client using a
*far future expires* header (to minimize the 304 responses
your server has to send), you need to make sure that assets
will be reloaded by the browser when they change.
If this is set to ``True``, then the Bundle URLs generated by
webassets will have their version (see ``Environment.versions``)
appended as a querystring.
An alternative approach would be to use the ``%(version)s``
placeholder in the bundle output file.
The default behavior (indicated by a ``None`` value) is to add
an expiry querystring if the bundle does not use a version
placeholder.
""")
def _set_directory(self, directory):
self.config['directory'] = directory
def _get_directory(self):
try:
return path.abspath(self.config['directory'])
except KeyError:
raise EnvironmentError(
'The environment has no "directory" configured')
directory = property(_get_directory, _set_directory, doc=
"""The base directory to which all paths will be relative to,
unless :attr:`load_paths` are given, in which case this will
only serve as the output directory.
In the url space, it is mapped to :attr:`urls`.
""")
def _set_url(self, url):
self.config['url'] = url
def _get_url(self):
try:
return self.config['url']
except KeyError:
raise EnvironmentError(
'The environment has no "url" configured')
url = property(_get_url, _set_url, doc=
"""The url prefix used to construct urls for files in
:attr:`directory`.
To define url spaces for other directories, see
:attr:`url_mapping`.
""")
def _set_load_path(self, load_path):
self.config['load_path'] = load_path
def _get_load_path(self):
return self.config['load_path']
load_path = property(_get_load_path, _set_load_path, doc=
"""An list of directories that will be searched for source files.
If this is set, source files will only be looked for in these
directories, and :attr:`directory` is used as a location for
output files only.
.. note:
You are free to add :attr:`directory` to your load path as
well.
.. note:
Items on the load path are allowed to contain globs.
To modify this list, you should use :meth:`append_path`, since
it makes it easy to add the corresponding url prefix to
:attr:`url_mapping`.
""")
def _set_url_mapping(self, url_mapping):
self.config['url_mapping'] = url_mapping
def _get_url_mapping(self):
return self.config['url_mapping']
url_mapping = property(_get_url_mapping, _set_url_mapping, doc=
"""A dictionary of directory -> url prefix mappings that will
be considered when generating urls, in addition to the pair of
:attr:`directory` and :attr:`url`, which is always active.
You should use :meth:`append_path` to add directories to the
load path along with their respective url spaces, instead of
modifying this setting directly.
""")
class DictConfigStorage(ConfigStorage):
"""Using a lower-case dict for configuration values.
"""
def __init__(self, *a, **kw):
self._dict = {}
ConfigStorage.__init__(self, *a, **kw)
def __contains__(self, key):
return self._dict.__contains__(key.lower())
def __getitem__(self, key):
key = key.lower()
value = self._get_deprecated(key)
if not value is None:
return value
return self._dict.__getitem__(key)
def __setitem__(self, key, value):
key = key.lower()
if not self._set_deprecated(key, value):
self._dict.__setitem__(key.lower(), value)
def __delitem__(self, key):
self._dict.__delitem__(key.lower())
class Environment(BaseEnvironment):
"""Owns a collection of bundles, and a set of configuration values which
will be used when processing these bundles.
"""
config_storage_class = DictConfigStorage
def __init__(self, directory=None, url=None, **more_config):
super(Environment, self).__init__(**more_config)
if directory is not None:
self.directory = directory
if url is not None:
self.url = url
def parse_debug_value(value):
"""Resolve the given string value to a debug option.
Can be used to deal with os environment variables, for example.
"""
if value is None:
return value
value = value.lower()
if value in ('true', '1'):
return True
elif value in ('false', '0'):
return False
elif value in ('merge',):
return 'merge'
else:
raise ValueError()
|
|
# -*- test-case-name: xquotient.test.test_grabber -*-
from epsilon import hotfix
hotfix.require('twisted', 'deferredgenerator_tfailure')
import time, datetime
from twisted.mail import pop3, pop3client
from twisted.internet import protocol, defer, ssl, error
from twisted.python import log, components, failure
from twisted.protocols import policies
from nevow import loaders, tags, athena
from nevow.flat import flatten
from nevow.athena import expose
from epsilon import descriptor, extime
from axiom import item, attributes, iaxiom
from axiom.dependency import dependsOn
from axiom.upgrade import registerUpgrader
from xmantissa import ixmantissa, webtheme, liveform
from xmantissa.webapp import PrivateApplication
from xmantissa.scrolltable import ScrollingFragment, AttributeColumn, TYPE_FRAGMENT
from xmantissa.stats import BandwidthMeasuringFactory
from xquotient.mail import DeliveryAgent
PROTOCOL_LOGGING = True
class Status(item.Item):
"""
Represents the latest status of a particular grabber.
"""
when = attributes.timestamp(doc="""
Time at which this status was set.
""")
message = attributes.text(doc="""
A short string describing the current state of the grabber.
""")
success = attributes.boolean(doc="""
Flag indicating whether this status indicates a successful action
or not.
""")
changeObservers = attributes.inmemory(doc="""
List of single-argument callables which will be invoked each time
this status changes.
""")
def __repr__(self):
return '<Status %r>' % (self.message,)
def activate(self):
self.changeObservers = []
self.message = u"idle"
def addChangeObserver(self, observer):
self.changeObservers.append(observer)
return lambda: self.changeObservers.remove(observer)
def setStatus(self, message, success=True):
self.when = extime.Time()
self.message = message
self.success = success
for L in self.changeObservers:
try:
L(message)
except:
log.err(None, "Failure in status update")
class GrabberBenefactor(item.Item):
"""
Installs a GrabberConfiguration (and any requisite website
powerups) on avatars.
"""
endowed = attributes.integer(doc="""
The number of avatars who have been endowed by this benefactor.
""", default=0)
powerupNames = ["xquotient.grabber.GrabberConfiguration"]
class GrabberConfiguration(item.Item):
"""
Manages the creation, operation, and destruction of grabbers
(items which retrieve information from remote sources).
"""
schemaVersion = 3
paused = attributes.boolean(doc="""
Flag indicating whether grabbers created by this Item will be
allowed to run.
""", default=False)
privateApplication = dependsOn(PrivateApplication)
deliveryAgent = dependsOn(DeliveryAgent)
def addGrabber(self, username, password, domain, ssl):
# DO IT
if ssl:
port = 995
else:
port = 110
pg = POP3Grabber(
store=self.store,
username=username,
password=password,
domain=domain,
port=port,
config=self,
ssl=ssl)
# DO IT *NOW*
iaxiom.IScheduler(self.store).schedule(pg, extime.Time())
# OR MAYBE A LITTLE LATER
item.declareLegacyItem(GrabberConfiguration.typeName, 1, dict(
paused=attributes.boolean(default=False),
installedOn=attributes.reference()))
def _grabberConfiguration1to2(old):
new = old.upgradeVersion(GrabberConfiguration.typeName, 1, 2,
paused=old.paused,
privateApplication = old.store.findOrCreate(PrivateApplication),
deliveryAgent = old.store.findOrCreate(DeliveryAgent))
return new
registerUpgrader(_grabberConfiguration1to2, GrabberConfiguration.typeName, 1, 2)
item.declareLegacyItem(GrabberConfiguration.typeName, 2, dict(
paused=attributes.boolean(default=False),
scheduler=attributes.reference(),
privateApplication=attributes.reference(),
deliveryAgent=attributes.reference(),
))
def _grabberConfiguration2to3(old):
"""
Copy all the remaining attributes.
"""
new = old.upgradeVersion(GrabberConfiguration.typeName, 2, 3,
paused=old.paused,
privateApplication = old.store.findOrCreate(PrivateApplication),
deliveryAgent = old.store.findOrCreate(DeliveryAgent))
return new
registerUpgrader(_grabberConfiguration2to3, GrabberConfiguration.typeName, 2, 3)
class POP3UID(item.Item):
grabberID = attributes.text(doc="""
A string identifying the email-address/port parts of a
configured grabber
""", indexed=True)
value = attributes.bytes(doc="""
A POP3 UID which has already been retrieved.
""", indexed=True)
failed = attributes.boolean(doc="""
When set, indicates that an attempt was made to retrieve this UID,
but for some reason was unsuccessful.
""", indexed=True, default=False)
class POP3Grabber(item.Item):
"""
Item for retrieving email messages from a remote POP server.
"""
config = attributes.reference(doc="""
The L{GrabberConfiguration} which created this grabber.
""")
status = attributes.reference(doc="""
The current state of this grabber. This indicates whether a grab
is currently being run, if a password is incorrect, etc.
""")
paused = attributes.boolean(doc="""
Flag indicating whether this particular grabber will try to get
scheduled to retrieve messages.
""", default=False)
username = attributes.text(doc="""
Username in the remote system with which to authenticate.
""", allowNone=False)
password = attributes.text(doc="""
Password in the remote system with which to authenticate.
""", allowNone=False)
domain = attributes.text(doc="""
The address of the remote system to which to connect.
""", allowNone=False)
port = attributes.integer(doc="""
TCP port number on the remote system to which to connect.
""", default=110)
ssl = attributes.boolean(doc="""
Flag indicating whether to connect using SSL (note: this does not
affect whether TLS will be negotiated post-connection.)
""", default=False)
messageCount = attributes.integer(doc="""
The number of messages which have been retrieved by this grabber.
""", default=0)
running = attributes.inmemory(doc="""
Flag indicating whether an attempt to retrieve messages is
currently in progress. Only one attempt is allowed outstanding at
any given time.
""")
protocol = attributes.inmemory(doc="""
While self.running=True this attribute will point to the
ControlledPOP3GrabberProtocol that is grabbing stuff
for me""")
connector = attributes.inmemory(doc="""
implementor of L{twisted.internet.interfaces.IConnector}, representing
our connection to the POP server
""")
scheduled = attributes.timestamp(doc="""
When this grabber is next scheduled to run.
""")
debug = attributes.boolean(doc="""
Flag indicating whether to log traffic from this grabber or not.
""", default=False)
created = attributes.timestamp(doc="""
Creation time of this grabber. Used when deciding whether a grabbed
message is old enough to automatically archive.
""")
_pop3uids = attributes.inmemory(doc="""
A set of strings representing all the POP3 UIDs which have already been
downloaded by this grabber.
""")
class installedOn(descriptor.attribute):
def get(self):
return self.config.installedOn
def __init__(self, **kw):
if 'created' not in kw:
kw['created'] = extime.Time()
return super(POP3Grabber, self).__init__(**kw)
def activate(self):
self._pop3uids = None
self.running = False
self.protocol = None
if self.status is None:
self.status = Status(store=self.store, message=u'idle')
def delete(self):
iaxiom.IScheduler(self.store).unscheduleAll(self)
if self.running:
if self.protocol is not None:
self.protocol.stop()
self.protocol.grabber = None
else:
self.connector.disconnect()
self.deleteFromStore()
def grab(self):
# Don't run concurrently, ever.
if self.running:
return
self.running = True
from twisted.internet import reactor
port = self.port
if self.ssl:
if port is None:
port = 995
connect = lambda h, p, f: reactor.connectSSL(h, p, f, ssl.ClientContextFactory())
else:
if port is None:
port = 110
connect = reactor.connectTCP
factory = POP3GrabberFactory(self, self.ssl)
if self.debug:
factory = policies.TrafficLoggingFactory(
factory,
'pop3client-%d-%f' % (self.storeID, time.time()))
self.status.setStatus(u"Connecting to %s:%d..." % (self.domain, port))
self.connector = connect(self.domain, port, BandwidthMeasuringFactory(factory, 'pop3-grabber'))
def run(self):
"""
Retrieve some messages from the account associated with this
grabber.
"""
try:
if not self.paused:
try:
self.grab()
except:
log.err(None, "Failure in scheduled event")
finally:
# XXX This is not a good way for things to work. Different, later.
delay = datetime.timedelta(seconds=300)
self.scheduled = extime.Time() + delay
return self.scheduled
def _grabberID(self):
if self.ssl and self.port == 995 or not self.ssl and self.port == 110:
port = 'default'
else:
port = self.port
return '%s@%s:%s' % (self.username, self.domain, port)
grabberID = property(_grabberID)
def shouldRetrieve(self, uidList):
"""
Return a list of (index, uid) pairs from C{uidList} which have not
already been grabbed.
"""
if self._pop3uids is None:
before = time.time()
# Load all the POP3 UIDs at once and put them in a set for
# super-fast lookup later.
self._pop3uids = set(self.store.query(POP3UID, POP3UID.grabberID == self.grabberID).getColumn("value"))
after = time.time()
log.msg(interface=iaxiom.IStatEvent, stat_pop3uid_load_time=after - before)
log.msg(interface=iaxiom.IStatEvent, stat_pop3uid_check=len(uidList))
return [pair for pair in uidList if pair[1] not in self._pop3uids]
def markSuccess(self, uid, msg):
"""
Mark the retrieval of a message as successful with a particular UID.
This grabber will no longer retrieve the message with that UID from the
server.
Archive that message if its sent date indicates it was sent more than
one day before this grabber was created.
@param uid: a POP3 UID specified by the server
@type uid: L{str}
@param msg: a L{xquotient.exmess.Message} which corresponds to that
UID.
@return: None
"""
if msg.sentWhen + datetime.timedelta(days=1) < self.created:
msg.archive()
log.msg(interface=iaxiom.IStatEvent, stat_messages_grabbed=1,
userstore=self.store)
POP3UID(store=self.store, grabberID=self.grabberID, value=uid)
if self._pop3uids is not None:
self._pop3uids.add(uid)
def markFailure(self, uid, err):
POP3UID(store=self.store, grabberID=self.grabberID, value=uid, failed=True)
if self._pop3uids is not None:
self._pop3uids.add(uid)
class POP3GrabberProtocol(pop3.AdvancedPOP3Client):
_rate = 50
_delay = 2.0
# An hour without bytes from the server and we'll just give up. The exact
# duration is arbitrary. It is intended to be long enough to deal with
# really slow servers or really big mailboxes or some combination of the
# two, but still short enough so that if something actually hangs we won't
# be stuck on it for long enough so as to upset the user. This is probably
# an insufficient solution to the problem of hung SSL connections, which is
# the problem it is primarily targetted at solving.
timeout = (60 * 60)
def timeoutConnection(self):
"""
Idle timeout expired while waiting for some bytes from the server.
Disassociate the protocol object from the POP3Grabber and drop the
connection.
"""
msg = u"Timed out waiting for server response."
addr, peer = self.transport.getHost(), self.transport.getPeer()
log.msg("POP3GrabberProtocol/%s->%s timed out" % (addr, peer))
self.setStatus(msg)
self.transientFailure(failure.Failure(error.TimeoutError(msg)))
self.stoppedRunning()
self.transport.loseConnection()
def setCredentials(self, username, password):
self._username = username
self._password = password
def _consumerFactory(self, msg):
def consume(line):
msg.lineReceived(line)
return consume
def serverGreeting(self, status):
def ebGrab(err):
log.err(err, "Failure while grabbing")
self.setStatus(u'Internal error: ' + unicode(err.getErrorMessage()))
self.transport.loseConnection()
return self._grab().addErrback(ebGrab)
def _grab(self):
source = self.getSource()
d = defer.waitForDeferred(self.login(self._username, self._password))
self.setStatus(u"Logging in...")
yield d
try:
d.getResult()
except pop3client.ServerErrorResponse, e:
self.setStatus(
u'Login failed: ' + str(e).decode('ascii', 'replace'),
False)
self.transport.loseConnection()
return
except pop3.InsecureAuthenticationDisallowed:
self.setStatus(
u'Login aborted: server not secure.',
False)
self.transport.loseConnection()
return
except (error.ConnectionDone, error.ConnectionLost):
self.setStatus(u"Connection lost", False)
return
except:
f = failure.Failure()
log.err(f, "Failure logging in")
self.setStatus(
u'Login failed: internal error.',
False)
self.transport.loseConnection()
return
N = 100
# Up to N (index, uid) pairs which have been received but not
# checked against shouldRetrieve
uidWorkingSet = []
# All the (index, uid) pairs which should be retrieved
uidList = []
# Consumer for listUID - adds to the working set and processes
# a batch if appropriate.
def consumeUIDLine(ent):
uidWorkingSet.append(ent)
if len(uidWorkingSet) >= N:
processBatch()
def processBatch():
L = self.shouldRetrieve(uidWorkingSet)
L.sort()
uidList.extend(L)
del uidWorkingSet[:]
d = defer.waitForDeferred(self.listUID(consumeUIDLine))
self.setStatus(u"Retrieving message list...")
yield d
try:
d.getResult()
except (error.ConnectionDone, error.ConnectionLost):
self.setStatus(u"Connection lost", False)
return
except:
f = failure.Failure()
log.err(f, "Failure retrieving UIDL")
self.setStatus(unicode(f.getErrorMessage()), False)
self.transport.loseConnection()
return
# Clean up any stragglers.
if uidWorkingSet:
processBatch()
log.msg(
'%s: Retrieving %d messages.' % (self.getSource(),
len(uidList)))
# XXX This is a bad loop.
for idx, uid in uidList:
if self.stopped:
return
if self.paused():
break
rece = self.createMIMEReceiver(source)
if rece is None:
return # ONO
d = defer.waitForDeferred(self.retrieve(idx, self._consumerFactory(rece)))
self.setStatus(u"Downloading %d of %d" % (idx, uidList[-1][0]))
yield d
try:
d.getResult()
except (error.ConnectionDone, error.ConnectionLost):
self.setStatus(unicode(u"Connection lost"), False)
return
except:
f = failure.Failure()
rece.connectionLost()
self.markFailure(uid, f)
if f.check(pop3client.LineTooLong):
# reschedule, the connection has dropped
self.transientFailure(f)
break
else:
log.err(f, "Failure retrieving message")
else:
try:
rece.eomReceived()
except:
# message could not be delivered.
f = failure.Failure()
log.err(f, "Failure delivering message")
self.markFailure(uid, f)
else:
self.markSuccess(uid, rece.message)
self.setStatus(u"Logging out...")
d = defer.waitForDeferred(self.quit())
yield d
try:
d.getResult()
except (error.ConnectionDone, error.ConnectionLost):
self.setStatus(u"idle")
except:
f = failure.Failure()
log.err(f, "Failure quitting")
self.setStatus(unicode(f.getErrorMessage()), False)
else:
self.setStatus(u"idle")
self.transport.loseConnection()
_grab = defer.deferredGenerator(_grab)
def connectionLost(self, reason):
# XXX change status here - maybe?
pop3.AdvancedPOP3Client.connectionLost(self, reason)
self.stoppedRunning()
stopped = False
def stop(self):
self.stopped = True
class ControlledPOP3GrabberProtocol(POP3GrabberProtocol):
def _transact(self, *a, **kw):
return self.grabber.store.transact(*a, **kw)
def getSource(self):
return u'pop3://' + self.grabber.grabberID
def setStatus(self, msg, success=True):
if self.grabber is not None:
self._transact(self.grabber.status.setStatus, msg, success)
def shouldRetrieve(self, uidList):
if self.grabber is not None:
return self._transact(self.grabber.shouldRetrieve, uidList)
def createMIMEReceiver(self, source):
if self.grabber is not None:
def createIt():
agent = self.grabber.config.deliveryAgent
return agent.createMIMEReceiver(source)
return self._transact(createIt)
def markSuccess(self, uid, msg):
if self.grabber is not None:
return self._transact(self.grabber.markSuccess, uid, msg)
def markFailure(self, uid, reason):
if self.grabber is not None:
return self._transact(self.grabber.markFailure, uid, reason)
def paused(self):
if self.grabber is not None:
return self.grabber.paused
_transient = False
def transientFailure(self, f):
self._transient = True
def stoppedRunning(self):
if self.grabber is None:
return
self.grabber.running = False
if self._transient:
iaxiom.IScheduler(self.grabber.store).reschedule(
self.grabber,
self.grabber.scheduled,
extime.Time())
self.grabber = None
class POP3GrabberFactory(protocol.ClientFactory):
protocol = ControlledPOP3GrabberProtocol
def __init__(self, grabber, ssl):
"""
@param grabber: The L{POP3Grabber} item driving this factory.
@param ssl: A flag indicating whether an SSL connection will be attempted.
"""
self.grabber = grabber
self.ssl = ssl
def clientConnectionFailed(self, connector, reason):
self.grabber.status.setStatus(u"Connection failed: " + reason.getErrorMessage())
self.grabber.running = False
self.grabber.protocol = None
def buildProtocol(self, addr):
self.grabber.status.setStatus(u"Connection established...")
p = protocol.ClientFactory.buildProtocol(self, addr)
if self.ssl:
p.allowInsecureLogin = True
p.setCredentials(
self.grabber.username.encode('ascii'),
self.grabber.password.encode('ascii'))
p.grabber = self.grabber
self.grabber.protocol = p
return p
# This might be useful when we get an IMAP grabber online.
# grabberTypes = {
# 'POP3': POP3Grabber,
# }
class GrabberConfigFragment(athena.LiveFragment):
fragmentName = 'grabber-configuration'
live = 'athena'
jsClass = u'Quotient.Grabber.Controller'
title = 'Incoming'
def head(self):
return ()
def render_addGrabberForm(self, ctx, data):
f = liveform.LiveForm(
self.addGrabber,
[liveform.Parameter('domain',
liveform.TEXT_INPUT,
unicode,
u'Domain',
u'The domain which hosts the account.'),
liveform.Parameter('username',
liveform.TEXT_INPUT,
unicode,
u'Username',
u'The username portion of the address from which to retrieve messages.'),
liveform.Parameter('password1',
liveform.PASSWORD_INPUT,
unicode,
u'Password',
u'The password for the remote account.'),
liveform.Parameter('password2',
liveform.PASSWORD_INPUT,
unicode,
u'Repeat Password'),
# Along with the above, this might be useful if we had an IMAP grabber.
# liveform.Parameter('protocol',
# liveform.Choice(grabberTypes.keys()),
# lambda value: grabberTypes[value],
# u'Super secret computer science stuff',
# 'POP3'),
liveform.Parameter('ssl',
liveform.CHECKBOX_INPUT,
bool,
u'Use SSL to fetch messages')],
description='Add Grabber')
f.jsClass = u'Quotient.Grabber.AddGrabberFormWidget'
f.setFragmentParent(self)
f.docFactory = webtheme.getLoader('liveform-compact')
return ctx.tag[f]
wt = None
def getEditGrabberForm(self, targetID):
if self.wt is None:
self.wt = self.original.privateApplication
grabber = self.wt.fromWebID(targetID)
f = liveform.LiveForm(
lambda **kwargs: self.editGrabber(grabber, **kwargs),
(liveform.Parameter('password1',
liveform.PASSWORD_INPUT,
unicode,
u'New Password'),
liveform.Parameter('password2',
liveform.PASSWORD_INPUT,
unicode,
u'Repeat Password'),
liveform.Parameter('ssl',
liveform.CHECKBOX_INPUT,
bool,
'Use SSL',
default=grabber.ssl)),
description='Edit Grabber')
grabber.grab()
f.setFragmentParent(self)
return unicode(flatten(f), 'utf-8')
expose(getEditGrabberForm)
def editGrabber(self, grabber, password1, password2, ssl):
if password1 != password2:
raise ValueError("Passwords don't match")
if ssl != grabber.ssl:
if ssl:
port = 995
else:
port = 110
grabber.port = port
grabber.ssl = ssl
if password1 and password2:
grabber.password = password1
self.callRemote('hideEditForm')
return u'Well Done'
def addGrabber(self, domain, username, password1, password2, ssl):
if password1 != password2:
raise ValueError("Passwords don't match")
self.original.addGrabber(username, password1, domain, ssl)
def render_POP3Grabbers(self, ctx, data):
self.configuredGrabbersView = ConfiguredGrabbersView(self.original.store)
self.configuredGrabbersView.setFragmentParent(self)
return self.configuredGrabbersView
components.registerAdapter(GrabberConfigFragment, GrabberConfiguration, ixmantissa.INavigableFragment)
class LiveStatusFragment(athena.LiveFragment):
docFactory = loaders.stan(tags.span(render=tags.directive('liveFragment')))
jsClass = u'Quotient.Grabber.StatusWidget'
_pending = False
_pendingStatus = None
def __init__(self, status):
self.status = status
def statusChanged(self, newStatus):
if self._pending:
self._pendingStatus = newStatus
else:
self._pending = True
self.callRemote('setStatus', newStatus).addCallback(self._unpend)
def _unpend(self, ign):
pendingStatus = self._pendingStatus
self._pendingStatus = None
self._pending = False
if pendingStatus is not None:
self.statusChanged(pendingStatus)
def startObserving(self):
self.removeObserver = self.status.addChangeObserver(self.statusChanged)
return self.status.message
expose(startObserving)
class StatusColumn(AttributeColumn):
def __init__(self, attribute, fragment):
super(StatusColumn, self).__init__(attribute)
self.fragment = fragment
def extractValue(self, model, item):
f = LiveStatusFragment(item.status)
f.setFragmentParent(self.fragment)
return unicode(flatten(f), 'utf-8')
def getType(self):
return TYPE_FRAGMENT
class ConfiguredGrabbersView(ScrollingFragment):
jsClass = u'Quotient.Grabber.ScrollingWidget'
def __init__(self, store):
ScrollingFragment.__init__(self, store, POP3Grabber, None,
[POP3Grabber.username,
POP3Grabber.domain,
POP3Grabber.paused,
StatusColumn(POP3Grabber.status, self)])
self.docFactory = webtheme.getLoader(self.fragmentName)
def action_delete(self, grabber):
grabber.delete()
def action_pause(self, grabber):
grabber.paused = True
def action_resume(self, grabber):
grabber.paused = False
|
|
# Copyright 2015 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from mock import MagicMock
from mock import patch
from oslo_utils import netutils
from testtools.matchers import Is, Equals, Not
from trove.common.instance import ServiceStatuses
from trove.guestagent import backup
from trove.guestagent.datastore.experimental.couchdb import (
manager as couchdb_manager)
from trove.guestagent.datastore.experimental.couchdb import (
service as couchdb_service)
from trove.guestagent import pkg as pkg
from trove.guestagent import volume
from trove.tests.unittests import trove_testtools
class GuestAgentCouchDBManagerTest(trove_testtools.TestCase):
def setUp(self):
super(GuestAgentCouchDBManagerTest, self).setUp()
self.real_status = couchdb_service.CouchDBAppStatus.set_status
class FakeInstanceServiceStatus(object):
status = ServiceStatuses.NEW
def save(self):
pass
couchdb_service.CouchDBAppStatus.set_status = MagicMock(
return_value=FakeInstanceServiceStatus())
self.context = trove_testtools.TroveTestContext(self)
self.manager = couchdb_manager.Manager()
self.pkg = couchdb_service.packager
self.real_db_app_status = couchdb_service.CouchDBAppStatus
self.origin_os_path_exists = os.path.exists
self.origin_format = volume.VolumeDevice.format
self.origin_migrate_data = volume.VolumeDevice.migrate_data
self.origin_mount = volume.VolumeDevice.mount
self.origin_mount_points = volume.VolumeDevice.mount_points
self.origin_stop_db = couchdb_service.CouchDBApp.stop_db
self.origin_start_db = couchdb_service.CouchDBApp.start_db
self.original_get_ip = netutils.get_my_ipv4
self.orig_make_host_reachable = (
couchdb_service.CouchDBApp.make_host_reachable)
self.orig_backup_restore = backup.restore
self.orig_create_users = couchdb_service.CouchDBAdmin.create_user
self.orig_delete_user = couchdb_service.CouchDBAdmin.delete_user
self.orig_list_users = couchdb_service.CouchDBAdmin.list_users
self.orig_get_user = couchdb_service.CouchDBAdmin.get_user
self.orig_grant_access = couchdb_service.CouchDBAdmin.grant_access
self.orig_revoke_access = couchdb_service.CouchDBAdmin.revoke_access
self.orig_list_access = couchdb_service.CouchDBAdmin.list_access
self.orig_enable_root = couchdb_service.CouchDBAdmin.enable_root
self.orig_is_root_enabled = (
couchdb_service.CouchDBAdmin.is_root_enabled)
self.orig_create_databases = (
couchdb_service.CouchDBAdmin.create_database)
self.orig_list_databases = couchdb_service.CouchDBAdmin.list_databases
self.orig_delete_database = (
couchdb_service.CouchDBAdmin.delete_database)
def tearDown(self):
super(GuestAgentCouchDBManagerTest, self).tearDown()
couchdb_service.packager = self.pkg
couchdb_service.CouchDBAppStatus.set_status = self.real_db_app_status
os.path.exists = self.origin_os_path_exists
volume.VolumeDevice.format = self.origin_format
volume.VolumeDevice.migrate_data = self.origin_migrate_data
volume.VolumeDevice.mount = self.origin_mount
volume.VolumeDevice.mount_points = self.origin_mount_points
couchdb_service.CouchDBApp.stop_db = self.origin_stop_db
couchdb_service.CouchDBApp.start_db = self.origin_start_db
netutils.get_my_ipv4 = self.original_get_ip
couchdb_service.CouchDBApp.make_host_reachable = (
self.orig_make_host_reachable)
backup.restore = self.orig_backup_restore
couchdb_service.CouchDBAdmin.create_user = self.orig_create_users
couchdb_service.CouchDBAdmin.delete_user = self.orig_delete_user
couchdb_service.CouchDBAdmin.list_users = self.orig_list_users
couchdb_service.CouchDBAdmin.get_user = self.orig_get_user
couchdb_service.CouchDBAdmin.grant_access = self.orig_grant_access
couchdb_service.CouchDBAdmin.revoke_access = self.orig_revoke_access
couchdb_service.CouchDBAdmin.list_access = self.orig_list_access
couchdb_service.CouchDBAdmin.enable_root = self.orig_enable_root
couchdb_service.CouchDBAdmin.is_root_enabled = (
self.orig_is_root_enabled)
couchdb_service.CouchDBAdmin.create_database = (
self.orig_create_databases)
couchdb_service.CouchDBAdmin.list_databases = self.orig_list_databases
couchdb_service.CouchDBAdmin.delete_database = (
self.orig_delete_database)
def test_update_status(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
self.manager.update_status(self.context)
mock_status.update.assert_any_call()
def _prepare_dynamic(self, packages=None, databases=None,
config_content=None, device_path='/dev/vdb',
is_db_installed=True, backup_id=None,
overrides=None):
mock_status = MagicMock()
mock_app = MagicMock()
self.manager.appStatus = mock_status
self.manager.app = mock_app
mount_point = '/var/lib/couchdb'
mock_status.begin_install = MagicMock(return_value=None)
mock_app.install_if_needed = MagicMock(return_value=None)
mock_app.make_host_reachable = MagicMock(return_value=None)
mock_app.restart = MagicMock(return_value=None)
mock_app.start_db = MagicMock(return_value=None)
mock_app.stop_db = MagicMock(return_value=None)
os.path.exists = MagicMock(return_value=True)
volume.VolumeDevice.format = MagicMock(return_value=None)
volume.VolumeDevice.migrate_data = MagicMock(return_value=None)
volume.VolumeDevice.mount = MagicMock(return_value=None)
volume.VolumeDevice.mount_points = MagicMock(return_value=[])
backup.restore = MagicMock(return_value=None)
backup_info = {'id': backup_id,
'location': 'fake-location',
'type': 'CouchDBBackup',
'checksum': 'fake-checksum'} if backup_id else None
couchdb_service.CouchDBAdmin.create_database = MagicMock(
return_value=None)
couchdb_service.CouchDBAdmin.create_user = MagicMock(return_value=None)
with patch.object(pkg.Package, 'pkg_is_installed',
return_value=MagicMock(
return_value=is_db_installed)):
self.manager.prepare(context=self.context, packages=packages,
config_contents=config_content,
databases=databases,
memory_mb='2048', users=None,
device_path=device_path,
mount_point=mount_point,
backup_info=backup_info,
overrides=None,
cluster_config=None)
# verification/assertion
mock_status.begin_install.assert_any_call()
mock_app.install_if_needed.assert_any_call(packages)
mock_app.make_host_reachable.assert_any_call()
mock_app.change_permissions.assert_any_call()
if backup_id:
backup.restore.assert_any_call(self.context,
backup_info,
mount_point)
def test_prepare_pkg(self):
self._prepare_dynamic(['couchdb'])
def test_prepare_no_pkg(self):
self._prepare_dynamic([])
def test_prepare_from_backup(self):
self._prepare_dynamic(['couchdb'], backup_id='123abc456')
def test_prepare_database(self):
self._prepare_dynamic(databases=['db1'])
def test_restart(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
with patch.object(couchdb_service.CouchDBApp, 'restart',
return_value=None):
# invocation
self.manager.restart(self.context)
# verification/assertion
couchdb_service.CouchDBApp.restart.assert_any_call()
def test_stop_db(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBApp.stop_db = MagicMock(return_value=None)
# invocation
self.manager.stop_db(self.context)
# verification/assertion
couchdb_service.CouchDBApp.stop_db.assert_any_call(
do_not_start_on_reboot=False)
def test_reset_configuration(self):
try:
configuration = {'config_contents': 'some junk'}
self.manager.reset_configuration(self.context, configuration)
except Exception:
self.fail("reset_configuration raised exception unexpectedly.")
def test_rpc_ping(self):
output = self.manager.rpc_ping(self.context)
self.assertTrue(output)
def test_create_user(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.create_user = MagicMock(return_value=None)
self.manager.create_user(self.context, ['user1'])
couchdb_service.CouchDBAdmin.create_user.assert_any_call(['user1'])
def test_delete_user(self):
user = ['user1']
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.delete_user = MagicMock(return_value=None)
self.manager.delete_user(self.context, user)
couchdb_service.CouchDBAdmin.delete_user.assert_any_call(user)
def test_list_users(self):
couchdb_service.CouchDBAdmin.list_users = MagicMock(
return_value=['user1'])
users = self.manager.list_users(self.context)
self.assertThat(users, Equals(['user1']))
couchdb_service.CouchDBAdmin.list_users.assert_any_call(
None, None, False)
def test_get_user(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.get_user = MagicMock(
return_value=['user1'])
self.manager.get_user(self.context, 'user1', None)
couchdb_service.CouchDBAdmin.get_user.assert_any_call(
'user1', None)
def test_grant_access(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.grant_access = MagicMock(
return_value=None)
self.manager.grant_access(self.context, 'user1', None, ['db1'])
couchdb_service.CouchDBAdmin.grant_access.assert_any_call(
'user1', ['db1'])
def test_revoke_access(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.revoke_access = MagicMock(
return_value=None)
self.manager.revoke_access(self.context, 'user1', None, ['db1'])
couchdb_service.CouchDBAdmin.revoke_access.assert_any_call(
'user1', ['db1'])
def test_list_access(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.list_access = MagicMock(
return_value=['user1'])
self.manager.list_access(self.context, 'user1', None)
couchdb_service.CouchDBAdmin.list_access.assert_any_call(
'user1', None)
def test_enable_root(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.enable_root = MagicMock(
return_value=True)
result = self.manager.enable_root(self.context)
self.assertThat(result, Equals(True))
def test_is_root_enabled(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.is_root_enabled = MagicMock(
return_value=True)
result = self.manager.is_root_enabled(self.context)
self.assertThat(result, Equals(True))
def test_create_databases(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.create_database = MagicMock(
return_value=None)
self.manager.create_database(self.context, ['db1'])
couchdb_service.CouchDBAdmin.create_database.assert_any_call(['db1'])
def test_delete_database(self):
databases = ['db1']
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.delete_database = MagicMock(
return_value=None)
self.manager.delete_database(self.context, databases)
couchdb_service.CouchDBAdmin.delete_database.assert_any_call(
databases)
def test_list_databases(self):
mock_status = MagicMock()
self.manager.appStatus = mock_status
couchdb_service.CouchDBAdmin.list_databases = MagicMock(
return_value=['database1'])
databases = self.manager.list_databases(self.context)
self.assertThat(databases, Not(Is(None)))
self.assertThat(databases, Equals(['database1']))
couchdb_service.CouchDBAdmin.list_databases.assert_any_call(
None, None, False)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for iBoot PDU driver module."""
import mock
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.drivers.modules import iboot
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_iboot_info()
class IBootPrivateMethodTestCase(db_base.DbTestCase):
def test__parse_driver_info_good(self):
node = obj_utils.create_test_node(
self.context,
driver='fake_iboot',
driver_info=INFO_DICT)
info = iboot._parse_driver_info(node)
self.assertIsNotNone(info.get('address'))
self.assertIsNotNone(info.get('username'))
self.assertIsNotNone(info.get('password'))
self.assertIsNotNone(info.get('port'))
self.assertIsNotNone(info.get('relay_id'))
def test__parse_driver_info_good_with_explicit_port(self):
info = dict(INFO_DICT)
info['iboot_port'] = '1234'
node = obj_utils.create_test_node(
self.context,
driver='fake_iboot',
driver_info=info)
info = iboot._parse_driver_info(node)
self.assertEqual(1234, info.get('port'))
def test__parse_driver_info_good_with_explicit_relay_id(self):
info = dict(INFO_DICT)
info['iboot_relay_id'] = '2'
node = obj_utils.create_test_node(
self.context,
driver='fake_iboot',
driver_info=info)
info = iboot._parse_driver_info(node)
self.assertEqual(2, info.get('relay_id'))
def test__parse_driver_info_missing_address(self):
info = dict(INFO_DICT)
del info['iboot_address']
node = obj_utils.create_test_node(
self.context,
driver='fake_iboot',
driver_info=info)
self.assertRaises(exception.MissingParameterValue,
iboot._parse_driver_info,
node)
def test__parse_driver_info_missing_username(self):
info = dict(INFO_DICT)
del info['iboot_username']
node = obj_utils.create_test_node(
self.context,
driver='fake_iboot',
driver_info=info)
self.assertRaises(exception.MissingParameterValue,
iboot._parse_driver_info,
node)
def test__parse_driver_info_missing_password(self):
info = dict(INFO_DICT)
del info['iboot_password']
node = obj_utils.create_test_node(
self.context,
driver='fake_iboot',
driver_info=info)
self.assertRaises(exception.MissingParameterValue,
iboot._parse_driver_info,
node)
def test__parse_driver_info_bad_port(self):
info = dict(INFO_DICT)
info['iboot_port'] = 'not-integer'
node = obj_utils.create_test_node(
self.context,
driver='fake_iboot',
driver_info=info)
self.assertRaises(exception.InvalidParameterValue,
iboot._parse_driver_info,
node)
def test__parse_driver_info_bad_relay_id(self):
info = dict(INFO_DICT)
info['iboot_relay_id'] = 'not-integer'
node = obj_utils.create_test_node(
self.context,
driver='fake_iboot',
driver_info=info)
self.assertRaises(exception.InvalidParameterValue,
iboot._parse_driver_info,
node)
@mock.patch.object(iboot, '_get_connection')
def test__power_status_on(self, mock_get_conn):
mock_connection = mock.Mock()
mock_connection.get_relays.return_value = [True]
mock_get_conn.return_value = mock_connection
node = obj_utils.create_test_node(
self.context,
driver='fake_iboot',
driver_info=INFO_DICT)
info = iboot._parse_driver_info(node)
status = iboot._power_status(info)
self.assertEqual(states.POWER_ON, status)
mock_get_conn.assert_called_once_with(info)
mock_connection.get_relays.assert_called_once_with()
@mock.patch.object(iboot, '_get_connection')
def test__power_status_off(self, mock_get_conn):
mock_connection = mock.Mock()
mock_connection.get_relays.return_value = [False]
mock_get_conn.return_value = mock_connection
node = obj_utils.create_test_node(
self.context,
driver='fake_iboot',
driver_info=INFO_DICT)
info = iboot._parse_driver_info(node)
status = iboot._power_status(info)
self.assertEqual(states.POWER_OFF, status)
mock_get_conn.assert_called_once_with(info)
mock_connection.get_relays.assert_called_once_with()
@mock.patch.object(iboot, '_get_connection')
def test__power_status_exception(self, mock_get_conn):
mock_connection = mock.Mock()
mock_connection.get_relays.return_value = None
mock_get_conn.return_value = mock_connection
node = obj_utils.create_test_node(
self.context,
driver='fake_iboot',
driver_info=INFO_DICT)
info = iboot._parse_driver_info(node)
self.assertRaises(exception.IBootOperationError,
iboot._power_status,
info)
mock_get_conn.assert_called_once_with(info)
mock_connection.get_relays.assert_called_once_with()
@mock.patch.object(iboot, '_get_connection')
def test__power_status_exception_type_error(self, mock_get_conn):
mock_connection = mock.Mock()
side_effect = TypeError("Surprise!")
mock_connection.get_relays.side_effect = side_effect
mock_get_conn.return_value = mock_connection
node = obj_utils.create_test_node(
self.context,
driver='fake_iboot',
driver_info=INFO_DICT)
info = iboot._parse_driver_info(node)
self.assertRaises(exception.IBootOperationError,
iboot._power_status,
info)
mock_get_conn.assert_called_once_with(info)
mock_connection.get_relays.assert_called_once_with()
@mock.patch.object(iboot, '_get_connection')
def test__power_status_exception_index_error(self, mock_get_conn):
mock_connection = mock.Mock()
side_effect = IndexError("Gotcha!")
mock_connection.get_relays.side_effect = side_effect
mock_get_conn.return_value = mock_connection
node = obj_utils.create_test_node(
self.context,
driver='fake_iboot',
driver_info=INFO_DICT)
info = iboot._parse_driver_info(node)
status = iboot._power_status(info)
self.assertEqual(states.ERROR, status)
mock_get_conn.assert_called_once_with(info)
mock_connection.get_relays.assert_called_once_with()
@mock.patch.object(iboot, '_get_connection')
def test__power_status_error(self, mock_get_conn):
mock_connection = mock.Mock()
mock_connection.get_relays.return_value = list()
mock_get_conn.return_value = mock_connection
node = obj_utils.create_test_node(
self.context,
driver='fake_iboot',
driver_info=INFO_DICT)
info = iboot._parse_driver_info(node)
status = iboot._power_status(info)
self.assertEqual(states.ERROR, status)
mock_get_conn.assert_called_once_with(info)
mock_connection.get_relays.assert_called_once_with()
class IBootDriverTestCase(db_base.DbTestCase):
def setUp(self):
super(IBootDriverTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_iboot')
self.driver = driver_factory.get_driver('fake_iboot')
self.node = obj_utils.create_test_node(
self.context,
driver='fake_iboot',
driver_info=INFO_DICT)
self.info = iboot._parse_driver_info(self.node)
def test_get_properties(self):
expected = iboot.COMMON_PROPERTIES
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(iboot, '_power_status')
@mock.patch.object(iboot, '_switch')
def test_set_power_state_good(self, mock_switch, mock_power_status):
mock_power_status.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.power.set_power_state(task, states.POWER_ON)
# ensure functions were called with the valid parameters
mock_switch.assert_called_once_with(self.info, True)
mock_power_status.assert_called_once_with(self.info)
@mock.patch.object(iboot, '_power_status')
@mock.patch.object(iboot, '_switch')
def test_set_power_state_bad(self, mock_switch, mock_power_status):
mock_power_status.return_value = states.POWER_OFF
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.PowerStateFailure,
task.driver.power.set_power_state,
task, states.POWER_ON)
# ensure functions were called with the valid parameters
mock_switch.assert_called_once_with(self.info, True)
mock_power_status.assert_called_once_with(self.info)
@mock.patch.object(iboot, '_power_status')
@mock.patch.object(iboot, '_switch')
def test_set_power_state_invalid_parameter(self, mock_switch,
mock_power_status):
mock_power_status.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.set_power_state,
task, states.NOSTATE)
@mock.patch.object(iboot, '_power_status')
@mock.patch.object(iboot, '_switch')
def test_reboot_good(self, mock_switch, mock_power_status):
manager = mock.MagicMock()
mock_power_status.return_value = states.POWER_ON
manager.attach_mock(mock_switch, 'switch')
expected = [mock.call.switch(self.info, False),
mock.call.switch(self.info, True)]
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.power.reboot(task)
self.assertEqual(manager.mock_calls, expected)
@mock.patch.object(iboot, '_power_status')
@mock.patch.object(iboot, '_switch')
def test_reboot_bad(self, mock_switch, mock_power_status):
manager = mock.MagicMock()
mock_power_status.return_value = states.POWER_OFF
manager.attach_mock(mock_switch, 'switch')
expected = [mock.call.switch(self.info, False),
mock.call.switch(self.info, True)]
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.PowerStateFailure,
task.driver.power.reboot, task)
self.assertEqual(manager.mock_calls, expected)
@mock.patch.object(iboot, '_power_status')
def test_get_power_state(self, mock_power_status):
mock_power_status.return_value = states.POWER_ON
with task_manager.acquire(self.context, self.node.uuid) as task:
state = task.driver.power.get_power_state(task)
self.assertEqual(state, states.POWER_ON)
# ensure functions were called with the valid parameters
mock_power_status.assert_called_once_with(self.info)
@mock.patch.object(iboot, '_parse_driver_info')
def test_validate_good(self, parse_drv_info_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.power.validate(task)
self.assertEqual(1, parse_drv_info_mock.call_count)
@mock.patch.object(iboot, '_parse_driver_info')
def test_validate_fails(self, parse_drv_info_mock):
side_effect = exception.InvalidParameterValue("Bad input")
parse_drv_info_mock.side_effect = side_effect
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.power.validate, task)
self.assertEqual(1, parse_drv_info_mock.call_count)
|
|
from __future__ import unicode_literals
from __future__ import absolute_import
from collections import namedtuple
import logging
import re
import sys
from operator import attrgetter
import six
from docker.errors import APIError
from docker.utils import create_host_config, LogConfig
from . import __version__
from .config import DOCKER_CONFIG_KEYS, merge_environment
from .const import (
DEFAULT_TIMEOUT,
LABEL_CONTAINER_NUMBER,
LABEL_ONE_OFF,
LABEL_PROJECT,
LABEL_SERVICE,
LABEL_VERSION,
LABEL_CONFIG_HASH,
)
from .container import Container
from .legacy import check_for_legacy_containers
from .progress_stream import stream_output, StreamOutputError
from .utils import json_hash
log = logging.getLogger(__name__)
DOCKER_START_KEYS = [
'cap_add',
'cap_drop',
'devices',
'dns',
'dns_search',
'env_file',
'extra_hosts',
'read_only',
'net',
'log_driver',
'pid',
'privileged',
'restart',
'volumes_from',
'security_opt',
]
VALID_NAME_CHARS = '[a-zA-Z0-9]'
class BuildError(Exception):
def __init__(self, service, reason):
self.service = service
self.reason = reason
class ConfigError(ValueError):
pass
class NeedsBuildError(Exception):
def __init__(self, service):
self.service = service
VolumeSpec = namedtuple('VolumeSpec', 'external internal mode')
ServiceName = namedtuple('ServiceName', 'project service number')
ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
class Service(object):
def __init__(self, name, client=None, project='default', links=None, external_links=None, volumes_from=None, net=None, **options):
if not re.match('^%s+$' % VALID_NAME_CHARS, name):
raise ConfigError('Invalid service name "%s" - only %s are allowed' % (name, VALID_NAME_CHARS))
if not re.match('^%s+$' % VALID_NAME_CHARS, project):
raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS))
if 'image' in options and 'build' in options:
raise ConfigError('Service %s has both an image and build path specified. A service can either be built to image or use an existing image, not both.' % name)
if 'image' not in options and 'build' not in options:
raise ConfigError('Service %s has neither an image nor a build path specified. Exactly one must be provided.' % name)
self.name = name
self.client = client
self.project = project
self.links = links or []
self.external_links = external_links or []
self.volumes_from = volumes_from or []
self.net = net or None
self.options = options
def containers(self, stopped=False, one_off=False):
containers = [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters={'label': self.labels(one_off=one_off)})]
if not containers:
check_for_legacy_containers(
self.client,
self.project,
[self.name],
stopped=stopped,
one_off=one_off)
return containers
def get_container(self, number=1):
"""Return a :class:`compose.container.Container` for this service. The
container must be active, and match `number`.
"""
labels = self.labels() + ['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]
for container in self.client.containers(filters={'label': labels}):
return Container.from_ps(self.client, container)
raise ValueError("No container found for %s_%s" % (self.name, number))
def start(self, **options):
for c in self.containers(stopped=True):
self.start_container_if_stopped(c, **options)
def stop(self, **options):
for c in self.containers():
log.info("Stopping %s..." % c.name)
c.stop(**options)
def kill(self, **options):
for c in self.containers():
log.info("Killing %s..." % c.name)
c.kill(**options)
def restart(self, **options):
for c in self.containers():
log.info("Restarting %s..." % c.name)
c.restart(**options)
def scale(self, desired_num):
"""
Adjusts the number of containers to the specified number and ensures
they are running.
- creates containers until there are at least `desired_num`
- stops containers until there are at most `desired_num` running
- starts containers until there are at least `desired_num` running
- removes all stopped containers
"""
if not self.can_be_scaled():
log.warn('Service %s specifies a port on the host. If multiple containers '
'for this service are created on a single host, the port will clash.'
% self.name)
# Create enough containers
containers = self.containers(stopped=True)
while len(containers) < desired_num:
containers.append(self.create_container())
running_containers = []
stopped_containers = []
for c in containers:
if c.is_running:
running_containers.append(c)
else:
stopped_containers.append(c)
running_containers.sort(key=lambda c: c.number)
stopped_containers.sort(key=lambda c: c.number)
# Stop containers
while len(running_containers) > desired_num:
c = running_containers.pop()
log.info("Stopping %s..." % c.name)
c.stop(timeout=1)
stopped_containers.append(c)
# Start containers
while len(running_containers) < desired_num:
c = stopped_containers.pop(0)
log.info("Starting %s..." % c.name)
self.start_container(c)
running_containers.append(c)
self.remove_stopped()
def remove_stopped(self, **options):
for c in self.containers(stopped=True):
if not c.is_running:
log.info("Removing %s..." % c.name)
c.remove(**options)
def create_container(self,
one_off=False,
insecure_registry=False,
do_build=True,
previous_container=None,
number=None,
quiet=False,
**override_options):
"""
Create a container for this service. If the image doesn't exist, attempt to pull
it.
"""
self.ensure_image_exists(
do_build=do_build,
insecure_registry=insecure_registry,
)
container_options = self._get_container_create_options(
override_options,
number or self._next_container_number(one_off=one_off),
one_off=one_off,
previous_container=previous_container,
)
if 'name' in container_options and not quiet:
log.info("Creating %s..." % container_options['name'])
return Container.create(self.client, **container_options)
def ensure_image_exists(self,
do_build=True,
insecure_registry=False):
if self.image():
return
if self.can_be_built():
if do_build:
self.build()
else:
raise NeedsBuildError(self)
else:
self.pull(insecure_registry=insecure_registry)
def image(self):
try:
return self.client.inspect_image(self.image_name)
except APIError as e:
if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
return None
else:
raise
@property
def image_name(self):
if self.can_be_built():
return self.full_name
else:
return self.options['image']
def convergence_plan(self,
allow_recreate=True,
smart_recreate=False):
containers = self.containers(stopped=True)
if not containers:
return ConvergencePlan('create', [])
if smart_recreate and not self._containers_have_diverged(containers):
stopped = [c for c in containers if not c.is_running]
if stopped:
return ConvergencePlan('start', stopped)
return ConvergencePlan('noop', containers)
if not allow_recreate:
return ConvergencePlan('start', containers)
return ConvergencePlan('recreate', containers)
def _containers_have_diverged(self, containers):
config_hash = self.config_hash()
has_diverged = False
for c in containers:
container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None)
if container_config_hash != config_hash:
log.debug(
'%s has diverged: %s != %s',
c.name, container_config_hash, config_hash,
)
has_diverged = True
return has_diverged
def execute_convergence_plan(self,
plan,
insecure_registry=False,
do_build=True,
timeout=DEFAULT_TIMEOUT):
(action, containers) = plan
if action == 'create':
container = self.create_container(
insecure_registry=insecure_registry,
do_build=do_build,
)
self.start_container(container)
return [container]
elif action == 'recreate':
return [
self.recreate_container(
c,
insecure_registry=insecure_registry,
timeout=timeout
)
for c in containers
]
elif action == 'start':
for c in containers:
self.start_container_if_stopped(c)
return containers
elif action == 'noop':
for c in containers:
log.info("%s is up-to-date" % c.name)
return containers
else:
raise Exception("Invalid action: {}".format(action))
def recreate_container(self,
container,
insecure_registry=False,
timeout=DEFAULT_TIMEOUT):
"""Recreate a container.
The original container is renamed to a temporary name so that data
volumes can be copied to the new container, before the original
container is removed.
"""
log.info("Recreating %s..." % container.name)
try:
container.stop(timeout=timeout)
except APIError as e:
if (e.response.status_code == 500
and e.explanation
and 'no such process' in str(e.explanation)):
pass
else:
raise
# Use a hopefully unique container name by prepending the short id
self.client.rename(
container.id,
'%s_%s' % (container.short_id, container.name))
new_container = self.create_container(
insecure_registry=insecure_registry,
do_build=False,
previous_container=container,
number=container.labels.get(LABEL_CONTAINER_NUMBER),
quiet=True,
)
self.start_container(new_container)
container.remove()
return new_container
def start_container_if_stopped(self, container):
if container.is_running:
return container
else:
log.info("Starting %s..." % container.name)
return self.start_container(container)
def start_container(self, container):
container.start()
return container
def config_hash(self):
return json_hash(self.config_dict())
def config_dict(self):
return {
'options': self.options,
'image_id': self.image()['Id'],
}
def get_dependency_names(self):
net_name = self.get_net_name()
return (self.get_linked_names() +
self.get_volumes_from_names() +
([net_name] if net_name else []))
def get_linked_names(self):
return [s.name for (s, _) in self.links]
def get_volumes_from_names(self):
return [s.name for s in self.volumes_from if isinstance(s, Service)]
def get_net_name(self):
if isinstance(self.net, Service):
return self.net.name
else:
return
def get_container_name(self, number, one_off=False):
# TODO: Implement issue #652 here
return build_container_name(self.project, self.name, number, one_off)
# TODO: this would benefit from github.com/docker/docker/pull/11943
# to remove the need to inspect every container
def _next_container_number(self, one_off=False):
numbers = [
Container.from_ps(self.client, container).number
for container in self.client.containers(
all=True,
filters={'label': self.labels(one_off=one_off)})
]
return 1 if not numbers else max(numbers) + 1
def _get_links(self, link_to_self):
links = []
for service, link_name in self.links:
for container in service.containers():
links.append((container.name, link_name or service.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
if link_to_self:
for container in self.containers():
links.append((container.name, self.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
for external_link in self.external_links:
if ':' not in external_link:
link_name = external_link
else:
external_link, link_name = external_link.split(':')
links.append((external_link, link_name))
return links
def _get_volumes_from(self):
volumes_from = []
for volume_source in self.volumes_from:
if isinstance(volume_source, Service):
containers = volume_source.containers(stopped=True)
if not containers:
volumes_from.append(volume_source.create_container().id)
else:
volumes_from.extend(map(attrgetter('id'), containers))
elif isinstance(volume_source, Container):
volumes_from.append(volume_source.id)
return volumes_from
def _get_net(self):
if not self.net:
return "bridge"
if isinstance(self.net, Service):
containers = self.net.containers()
if len(containers) > 0:
net = 'container:' + containers[0].id
else:
log.warning("Warning: Service %s is trying to use reuse the network stack "
"of another service that is not running." % (self.net.name))
net = None
elif isinstance(self.net, Container):
net = 'container:' + self.net.id
else:
net = self.net
return net
def _get_container_create_options(
self,
override_options,
number,
one_off=False,
previous_container=None):
add_config_hash = (not one_off and not override_options)
container_options = dict(
(k, self.options[k])
for k in DOCKER_CONFIG_KEYS if k in self.options)
container_options.update(override_options)
container_options['name'] = self.get_container_name(number, one_off)
if add_config_hash:
config_hash = self.config_hash()
if 'labels' not in container_options:
container_options['labels'] = {}
container_options['labels'][LABEL_CONFIG_HASH] = config_hash
log.debug("Added config hash: %s" % config_hash)
if 'detach' not in container_options:
container_options['detach'] = True
# If a qualified hostname was given, split it into an
# unqualified hostname and a domainname unless domainname
# was also given explicitly. This matches the behavior of
# the official Docker CLI in that scenario.
if ('hostname' in container_options
and 'domainname' not in container_options
and '.' in container_options['hostname']):
parts = container_options['hostname'].partition('.')
container_options['hostname'] = parts[0]
container_options['domainname'] = parts[2]
if 'ports' in container_options or 'expose' in self.options:
ports = []
all_ports = container_options.get('ports', []) + self.options.get('expose', [])
for port in all_ports:
port = str(port)
if ':' in port:
port = port.split(':')[-1]
if '/' in port:
port = tuple(port.split('/'))
ports.append(port)
container_options['ports'] = ports
override_options['binds'] = merge_volume_bindings(
container_options.get('volumes') or [],
previous_container)
if 'volumes' in container_options:
container_options['volumes'] = dict(
(parse_volume_spec(v).internal, {})
for v in container_options['volumes'])
container_options['environment'] = merge_environment(
self.options.get('environment'),
override_options.get('environment'))
if previous_container:
container_options['environment']['affinity:container'] = ('=' + previous_container.id)
container_options['image'] = self.image_name
container_options['labels'] = build_container_labels(
container_options.get('labels', {}),
self.labels(one_off=one_off),
number)
# Delete options which are only used when starting
for key in DOCKER_START_KEYS:
container_options.pop(key, None)
container_options['host_config'] = self._get_container_host_config(
override_options,
one_off=one_off)
return container_options
def _get_container_host_config(self, override_options, one_off=False):
options = dict(self.options, **override_options)
port_bindings = build_port_bindings(options.get('ports') or [])
privileged = options.get('privileged', False)
cap_add = options.get('cap_add', None)
cap_drop = options.get('cap_drop', None)
log_config = LogConfig(type=options.get('log_driver', 'json-file'))
pid = options.get('pid', None)
security_opt = options.get('security_opt', None)
dns = options.get('dns', None)
if isinstance(dns, six.string_types):
dns = [dns]
dns_search = options.get('dns_search', None)
if isinstance(dns_search, six.string_types):
dns_search = [dns_search]
restart = parse_restart_spec(options.get('restart', None))
extra_hosts = build_extra_hosts(options.get('extra_hosts', None))
read_only = options.get('read_only', None)
devices = options.get('devices', None)
return create_host_config(
links=self._get_links(link_to_self=one_off),
port_bindings=port_bindings,
binds=options.get('binds'),
volumes_from=self._get_volumes_from(),
privileged=privileged,
network_mode=self._get_net(),
devices=devices,
dns=dns,
dns_search=dns_search,
restart_policy=restart,
cap_add=cap_add,
cap_drop=cap_drop,
log_config=log_config,
extra_hosts=extra_hosts,
read_only=read_only,
pid_mode=pid,
security_opt=security_opt
)
def build(self, no_cache=False):
log.info('Building %s...' % self.name)
path = six.binary_type(self.options['build'])
build_output = self.client.build(
path=path,
tag=self.image_name,
stream=True,
rm=True,
nocache=no_cache,
dockerfile=self.options.get('dockerfile', None),
)
try:
all_events = stream_output(build_output, sys.stdout)
except StreamOutputError as e:
raise BuildError(self, unicode(e))
# Ensure the HTTP connection is not reused for another
# streaming command, as the Docker daemon can sometimes
# complain about it
self.client.close()
image_id = None
for event in all_events:
if 'stream' in event:
match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
if match:
image_id = match.group(1)
if image_id is None:
raise BuildError(self, event if all_events else 'Unknown')
return image_id
def can_be_built(self):
return 'build' in self.options
@property
def full_name(self):
"""
The tag to give to images built for this service.
"""
return '%s_%s' % (self.project, self.name)
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.project),
'{0}={1}'.format(LABEL_SERVICE, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False")
]
def can_be_scaled(self):
for port in self.options.get('ports', []):
if ':' in str(port):
return False
return True
def pull(self, insecure_registry=False):
if 'image' not in self.options:
return
repo, tag = parse_repository_tag(self.options['image'])
tag = tag or 'latest'
log.info('Pulling %s (%s:%s)...' % (self.name, repo, tag))
output = self.client.pull(
repo,
tag=tag,
stream=True,
insecure_registry=insecure_registry)
stream_output(output, sys.stdout)
# Names
def build_container_name(project, service, number, one_off=False):
bits = [project, service]
if one_off:
bits.append('run')
return '_'.join(bits + [str(number)])
# Images
def parse_repository_tag(s):
if ":" not in s:
return s, ""
repo, tag = s.rsplit(":", 1)
if "/" in tag:
return s, ""
return repo, tag
# Volumes
def merge_volume_bindings(volumes_option, previous_container):
"""Return a list of volume bindings for a container. Container data volumes
are replaced by those from the previous container.
"""
volume_bindings = dict(
build_volume_binding(parse_volume_spec(volume))
for volume in volumes_option or []
if ':' in volume)
if previous_container:
volume_bindings.update(
get_container_data_volumes(previous_container, volumes_option))
return volume_bindings.values()
def get_container_data_volumes(container, volumes_option):
"""Find the container data volumes that are in `volumes_option`, and return
a mapping of volume bindings for those volumes.
"""
volumes = []
volumes_option = volumes_option or []
container_volumes = container.get('Volumes') or {}
image_volumes = container.image_config['ContainerConfig'].get('Volumes') or {}
for volume in set(volumes_option + image_volumes.keys()):
volume = parse_volume_spec(volume)
# No need to preserve host volumes
if volume.external:
continue
volume_path = container_volumes.get(volume.internal)
# New volume, doesn't exist in the old container
if not volume_path:
continue
# Copy existing volume from old container
volume = volume._replace(external=volume_path)
volumes.append(build_volume_binding(volume))
return dict(volumes)
def build_volume_binding(volume_spec):
return volume_spec.internal, "{}:{}:{}".format(*volume_spec)
def parse_volume_spec(volume_config):
parts = volume_config.split(':')
if len(parts) > 3:
raise ConfigError("Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config)
if len(parts) == 1:
return VolumeSpec(None, parts[0], 'rw')
if len(parts) == 2:
parts.append('rw')
external, internal, mode = parts
if mode not in ('rw', 'ro'):
raise ConfigError("Volume %s has invalid mode (%s), should be "
"one of: rw, ro." % (volume_config, mode))
return VolumeSpec(external, internal, mode)
# Ports
def build_port_bindings(ports):
port_bindings = {}
for port in ports:
internal_port, external = split_port(port)
if internal_port in port_bindings:
port_bindings[internal_port].append(external)
else:
port_bindings[internal_port] = [external]
return port_bindings
def split_port(port):
parts = str(port).split(':')
if not 1 <= len(parts) <= 3:
raise ConfigError('Invalid port "%s", should be '
'[[remote_ip:]remote_port:]port[/protocol]' % port)
if len(parts) == 1:
internal_port, = parts
return internal_port, None
if len(parts) == 2:
external_port, internal_port = parts
return internal_port, external_port
external_ip, external_port, internal_port = parts
return internal_port, (external_ip, external_port or None)
# Labels
def build_container_labels(label_options, service_labels, number, one_off=False):
labels = label_options or {}
labels.update(label.split('=', 1) for label in service_labels)
labels[LABEL_CONTAINER_NUMBER] = str(number)
labels[LABEL_VERSION] = __version__
return labels
# Restart policy
def parse_restart_spec(restart_config):
if not restart_config:
return None
parts = restart_config.split(':')
if len(parts) > 2:
raise ConfigError("Restart %s has incorrect format, should be "
"mode[:max_retry]" % restart_config)
if len(parts) == 2:
name, max_retry_count = parts
else:
name, = parts
max_retry_count = 0
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
# Extra hosts
def build_extra_hosts(extra_hosts_config):
if not extra_hosts_config:
return {}
if isinstance(extra_hosts_config, list):
extra_hosts_dict = {}
for extra_hosts_line in extra_hosts_config:
if not isinstance(extra_hosts_line, six.string_types):
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
host, ip = extra_hosts_line.split(':')
extra_hosts_dict.update({host.strip(): ip.strip()})
extra_hosts_config = extra_hosts_dict
if isinstance(extra_hosts_config, dict):
return extra_hosts_config
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.