gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import json
import os
from sqlalchemy import (
Table, Column, Integer, String, UniqueConstraint, MetaData
)
from sqlalchemy.types import UserDefinedType
from twisted.trial.unittest import TestCase
from aludel.database import (
get_engine, make_table, CollectionMissingError, _PrefixedTables,
CollectionMetadata, TableCollection,
)
from .doubles import FakeReactorThreads
class DatabaseTestCase(TestCase):
def setUp(self):
connection_string = os.environ.get(
"ALUDEL_TEST_CONNECTION_STRING", "sqlite://")
self.engine = get_engine(
connection_string, reactor=FakeReactorThreads())
self._drop_tables()
self.conn = self.successResultOf(self.engine.connect())
def tearDown(self):
self.successResultOf(self.conn.close())
self._drop_tables()
assert self.successResultOf(self.engine.table_names()) == []
def _drop_tables(self):
# NOTE: This is a blocking operation!
md = MetaData(bind=self.engine._engine)
md.reflect()
md.drop_all()
class Test_PrefixedTables(DatabaseTestCase):
def test_get_table_name_not_implemented(self):
"""
.get_table_name() should raise a NotImplementedError.
"""
my_tables = _PrefixedTables("prefix", self.conn)
err = self.assertRaises(
NotImplementedError, my_tables.get_table_name, 'foo')
assert err.args[0] == "_PrefixedTables should not be used directly."
def test_exists_not_implemented(self):
"""
.exists() should raise a NotImplementedError.
"""
my_tables = _PrefixedTables("prefix", self.conn)
err = self.assertRaises(NotImplementedError, my_tables.exists)
assert err.args[0] == "_PrefixedTables should not be used directly."
def test__execute_query_happy(self):
"""
._execute_query() should query the database and return a result.
"""
my_tables = _PrefixedTables("prefix", self.conn)
result = self.successResultOf(my_tables._execute_query("SELECT 42;"))
rows = self.successResultOf(result.fetchall())
assert rows == [(42,)]
def test__execute_error(self):
"""
._execute_query() should fail if given an invalid query.
"""
my_tables = _PrefixedTables("prefix", self.conn)
self.failureResultOf(my_tables._execute_query("SELECT ;;"))
def test_execute_query_not_implemented(self):
"""
.execute_query() should raise a NotImplementedError.
"""
my_tables = _PrefixedTables("prefix", self.conn)
err = self.assertRaises(
NotImplementedError, my_tables.execute_query, "SELECT 42;")
assert err.args[0] == "_PrefixedTables should not be used directly."
def test_execute_fetchall_not_implemented(self):
"""
.execute_fetchall() should raise a NotImplementedError.
"""
my_tables = _PrefixedTables("prefix", self.conn)
err = self.assertRaises(
NotImplementedError, my_tables.execute_fetchall, "SELECT 42;")
assert err.args[0] == "_PrefixedTables should not be used directly."
class TestCollectionMetadata(DatabaseTestCase):
def test_create_new(self):
"""
.create() should create the appropriately named table.
"""
cmd = CollectionMetadata('MyTables', self.conn)
has_table_d = self.engine.has_table(cmd.collection_metadata.name)
assert self.successResultOf(has_table_d) is False
assert self.successResultOf(cmd.exists()) is False
self.successResultOf(cmd.create())
has_table_d = self.engine.has_table(cmd.collection_metadata.name)
assert self.successResultOf(has_table_d) is True
assert self.successResultOf(cmd.exists()) is True
def test_create_exists(self):
"""
.create() should do nothing if the table already exists.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
has_table_d = self.engine.has_table(cmd.collection_metadata.name)
assert self.successResultOf(has_table_d) is True
assert self.successResultOf(cmd.exists()) is True
# Create again, assert that everything still exists.
self.successResultOf(cmd.create())
has_table_d = self.engine.has_table(cmd.collection_metadata.name)
assert self.successResultOf(has_table_d) is True
assert self.successResultOf(cmd.exists()) is True
def test_collection_exists_no_table(self):
"""
.collection_exists() should return None if the metadata table does not
exist.
"""
cmd = CollectionMetadata('MyTables', self.conn)
assert self.successResultOf(cmd.collection_exists('foo')) is None
def test_collection_exists_no_metadata(self):
"""
.collection_exists() should return False if there is no metadata for
the provided name.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
assert self.successResultOf(cmd.collection_exists('foo')) is False
def test_collection_exists_with_metadata(self):
"""
.collection_exists() should return True if there is metadata for the
provided name.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
self.successResultOf(cmd.create_collection('foo', {'bar': 'baz'}))
assert self.successResultOf(cmd.collection_exists('foo')) is True
def test_collection_exists_cached(self):
"""
.collection_exists() should return a cached result for the provided
name.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
cmd._existence_cache['foo'] = True
assert self.successResultOf(cmd.collection_exists('foo')) is True
def test_get_metadata_no_table(self):
"""
.get_metadata() should fail with CollectionMissingError if the metadata
table does not exist.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.failureResultOf(cmd.get_metadata('foo'), CollectionMissingError)
def test_get_metadata_missing_collection(self):
"""
.get_metadata() should fail with CollectionMissingError if there is no
metadata for the provided name.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
self.failureResultOf(cmd.get_metadata('foo'), CollectionMissingError)
def test_get_metadata(self):
"""
.get_metadata() should fetch metadata from the database.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
self.successResultOf(cmd.create_collection('foo', {'bar': 'baz'}))
assert self.successResultOf(cmd.get_metadata('foo')) == {'bar': 'baz'}
def test_get_metadata_updates_existence_cache(self):
"""
.get_metadata() should update the existence cache.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
self.successResultOf(cmd.create_collection('foo', {'bar': 'baz'}))
# Set this back to False because create_collection updated it.
cmd._existence_cache['foo'] = False
assert self.successResultOf(cmd.get_metadata('foo')) == {'bar': 'baz'}
assert cmd._existence_cache['foo'] is True
def test_get_metadata_updates_existence_cache_missing_collection(self):
"""
.get_metadata() should update the existence cache.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
assert 'foo' not in cmd._existence_cache
self.failureResultOf(cmd.get_metadata('foo'), CollectionMissingError)
assert cmd._existence_cache['foo'] is False
def test_get_all_metadata(self):
"""
.get_all_metadata() should fetch all metadata from the database.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
self.successResultOf(cmd.create_collection('foo', {'a': 1}))
self.successResultOf(cmd.create_collection('bar', {'b': 2}))
metadata = self.successResultOf(cmd.get_all_metadata())
assert metadata == {'foo': {'a': 1}, 'bar': {'b': 2}}
def test__decode_all_metadata_with_none(self):
"""
._decode_all_metadata() should ignore empty metadata entries.
"""
cmd = CollectionMetadata('MyTables', None)
metadata = {'foo': json.dumps({'a': 1}), 'bar': None}
assert cmd._decode_all_metadata(metadata) == {'foo': {'a': 1}}
def test_set_metadata(self):
"""
.set_metadata() should update the database.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
self.successResultOf(cmd.create_collection('foo'))
assert self.successResultOf(cmd.get_metadata('foo')) == {}
self.successResultOf(cmd.set_metadata('foo', {'bar': 'baz'}))
assert self.successResultOf(cmd.get_metadata('foo')) == {'bar': 'baz'}
def test_create_collection_no_table(self):
"""
.create_collection() should call .create() before creating the
collection if the metadata table does not exist.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create_collection('foo'))
assert cmd._existence_cache['foo'] is True
assert self.successResultOf(cmd.get_metadata('foo')) == {}
def test_create_collection_no_metadata(self):
"""
.create_collection() should create a collection metadata entry with an
empty dict if no metadata is provided.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
self.successResultOf(cmd.create_collection('foo'))
assert cmd._existence_cache['foo'] is True
assert self.successResultOf(cmd.get_metadata('foo')) == {}
def test_create_collection_with_metadata(self):
"""
.create_collection() should create a collection metadata entry with the
provided metadata.
"""
cmd = CollectionMetadata('MyTables', self.conn)
self.successResultOf(cmd.create())
self.successResultOf(cmd.create_collection('foo', {'bar': 'baz'}))
assert cmd._existence_cache['foo'] is True
assert self.successResultOf(cmd.get_metadata('foo')) == {'bar': 'baz'}
class TestTableCollection(DatabaseTestCase):
def _get_cmd(self, collection_cls):
"""
Create and return a CollectionMetadata instance for collection_cls.
"""
cmd = CollectionMetadata(collection_cls.collection_type(), self.conn)
self.successResultOf(cmd.create())
return cmd
def test_collection_type_class_name(self):
"""
.collection_type() should return the class name if the COLLECTION_TYPE
attr is unset.
"""
class MyTables(TableCollection):
pass
assert MyTables.collection_type() == 'MyTables'
my_tables = MyTables("prefix", connection=None)
assert my_tables.collection_type() == 'MyTables'
def test_collection_type_explicit_name(self):
"""
.collection_type() should return the COLLECTION_TYPE attr if set.
"""
class MyTables(TableCollection):
COLLECTION_TYPE = 'YourTables'
assert MyTables.collection_type() == 'YourTables'
my_tables = MyTables("prefix", connection=None)
assert my_tables.collection_type() == 'YourTables'
def test_init_uses_provided_collection_metadata(self):
"""
TableCollection should use the collection_metadata it's given, if any.
"""
cmd = self._get_cmd(TableCollection)
my_tables = TableCollection("foo", None, collection_metadata=cmd)
assert my_tables._collection_metadata is cmd
def test_init_uses_builds_collection_metadata(self):
"""
TableCollection should build a collection_metadata if none is given.
"""
my_tables = TableCollection("foo", None)
assert isinstance(my_tables._collection_metadata, CollectionMetadata)
def test_get_table_name(self):
"""
.get_table_name() should build an appropriate table name from the
collection type, collection name, and table name.
"""
class MyTables(TableCollection):
pass
my_tables = MyTables("prefix", connection=None)
assert my_tables.get_table_name("thing") == "MyTables_prefix_thing"
def test_make_table(self):
"""
Class attributes built by make_table() should be replaced by instance
attributes that are SQLAlchemy Table instances with the correct table
names.
"""
class MyTables(TableCollection):
tbl = make_table(
Column("id", Integer(), primary_key=True),
Column("value", String(255)),
Column("other_value", String(255)),
UniqueConstraint("value", "other_value"),
)
my_tables_1 = MyTables("prefix1", self.conn)
assert isinstance(my_tables_1.tbl, Table)
assert my_tables_1.tbl.name == 'MyTables_prefix1_tbl'
assert len(my_tables_1.tbl.c) == 3
# Make another instance to check that things aren't bound improperly.
my_tables_2 = MyTables("prefix2", self.conn)
assert isinstance(my_tables_2.tbl, Table)
assert my_tables_2.tbl.name == 'MyTables_prefix2_tbl'
assert len(my_tables_2.tbl.c) == 3
def test_create_tables_with_metadata(self):
"""
.create_tables() should create the tables belonging to the collection
and set metadata.
"""
class MyTables(TableCollection):
tbl1 = make_table(
Column("id", Integer(), primary_key=True),
Column("value", String(255)),
)
tbl2 = make_table(
Column("id", Integer(), primary_key=True),
Column("other_value", String(255)),
)
cmd = self._get_cmd(MyTables)
my_tables = MyTables("foo", self.conn, cmd)
# Check that the tables don't already exist.
assert self.successResultOf(my_tables.exists()) is False
self.failureResultOf(self.conn.execute(my_tables.tbl1.select()))
self.failureResultOf(self.conn.execute(my_tables.tbl2.select()))
# Create the tables and check that they exist.
self.successResultOf(my_tables.create_tables(metadata={'bar': 'baz'}))
assert self.successResultOf(my_tables.exists()) is True
self.successResultOf(self.conn.execute(my_tables.tbl1.select()))
self.successResultOf(self.conn.execute(my_tables.tbl2.select()))
assert self.successResultOf(cmd.get_metadata("foo")) == {'bar': 'baz'}
def test_create_tables_no_metadata(self):
"""
.create_tables() should create the tables belonging to the collection
and set metadata. If no metadata is provided, an empty dict should be
used.
"""
class MyTables(TableCollection):
tbl1 = make_table(
Column("id", Integer(), primary_key=True),
Column("value", String(255)),
)
tbl2 = make_table(
Column("id", Integer(), primary_key=True),
Column("other_value", String(255)),
)
cmd = self._get_cmd(MyTables)
my_tables = MyTables("foo", self.conn, cmd)
# Check that the tables don't already exist.
assert self.successResultOf(my_tables.exists()) is False
self.failureResultOf(self.conn.execute(my_tables.tbl1.select()))
self.failureResultOf(self.conn.execute(my_tables.tbl2.select()))
# Create the tables and check that they exist.
self.successResultOf(my_tables.create_tables())
assert self.successResultOf(my_tables.exists()) is True
self.successResultOf(self.conn.execute(my_tables.tbl1.select()))
self.successResultOf(self.conn.execute(my_tables.tbl2.select()))
assert self.successResultOf(cmd.get_metadata("foo")) == {}
def test_create_tables_already_exists(self):
"""
.create_tables() should do nothing if the tables already exist.
"""
class MyTables(TableCollection):
tbl = make_table(
Column("id", Integer(), primary_key=True),
Column("value", String(255)),
)
cmd = self._get_cmd(MyTables)
my_tables = MyTables("foo", self.conn, cmd)
# Create the tables and check that they exist.
self.successResultOf(my_tables.create_tables(metadata={'bar': 'baz'}))
assert self.successResultOf(my_tables.exists()) is True
assert self.successResultOf(cmd.get_metadata("foo")) == {'bar': 'baz'}
# Create the tables again and check that nothing changes.
self.successResultOf(my_tables.create_tables(metadata={'a': 'b'}))
assert self.successResultOf(my_tables.exists()) is True
assert self.successResultOf(cmd.get_metadata("foo")) == {'bar': 'baz'}
def test_create_tables_error(self):
"""
.create_tables() should fail if the tables can't be created.
"""
class BrokenType(UserDefinedType):
def get_col_spec(self):
return "BROKEN;;"
class MyTables(TableCollection):
tbl = make_table(
Column("id", Integer(), primary_key=True),
Column("value", BrokenType()),
)
my_tables = MyTables("prefix", self.conn)
self.failureResultOf(my_tables.create_tables())
def test_get_metadata(self):
"""
.get_metadata() should fetch the metadata for this collection.
"""
class MyTables(TableCollection):
tbl = make_table(
Column("id", Integer(), primary_key=True),
Column("value", String(255)),
)
my_tables = MyTables("prefix", self.conn)
self.successResultOf(my_tables._collection_metadata.create())
self.successResultOf(my_tables.create_tables(metadata={'bar': 'baz'}))
assert self.successResultOf(my_tables.get_metadata()) == {'bar': 'baz'}
def test_set_metadata(self):
"""
.set_metadata() should update the metadata for this collection.
"""
class MyTables(TableCollection):
tbl = make_table(
Column("id", Integer(), primary_key=True),
Column("value", String(255)),
)
my_tables = MyTables("prefix", self.conn)
self.successResultOf(my_tables._collection_metadata.create())
self.successResultOf(my_tables.create_tables())
assert self.successResultOf(my_tables.get_metadata()) == {}
self.successResultOf(my_tables.set_metadata({'bar': 'baz'}))
assert self.successResultOf(my_tables.get_metadata()) == {'bar': 'baz'}
def test_execute_query_happy(self):
"""
.execute_query() should query the database and return a result.
"""
my_tables = TableCollection("prefix", self.conn)
self.successResultOf(my_tables.create_tables())
result = self.successResultOf(my_tables.execute_query("SELECT 42;"))
rows = self.successResultOf(result.fetchall())
assert rows == [(42,)]
def test_execute_query_no_collection(self):
"""
.execute_query() should fail with CollectionMissingError if the
collection does not exist.
"""
my_tables = TableCollection("prefix", self.conn)
self.failureResultOf(
my_tables.execute_query("SELECT 42;"), CollectionMissingError)
def test_execute_query_error(self):
"""
.execute_query() should fail if given an invalid query.
"""
my_tables = TableCollection("prefix", self.conn)
self.successResultOf(my_tables.create_tables())
self.failureResultOf(my_tables.execute_query("SELECT ;;"))
def test_execute_fetchall_no_collection(self):
"""
.execute_fetchall() should fail with CollectionMissingError if the
collection does not exist.
"""
my_tables = TableCollection("prefix", self.conn)
self.failureResultOf(
my_tables.execute_fetchall("SELECT 42;"), CollectionMissingError)
def test_execute_fetchall(self):
"""
.execute_fetchall() should query the database and return all rows from
the result.
"""
my_tables = TableCollection("prefix", self.conn)
self.successResultOf(my_tables.create_tables())
rows = self.successResultOf(my_tables.execute_fetchall("SELECT 42;"))
assert rows == [(42,)]
|
|
#!/usr/bin/python2.4
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getopt
import logging
import md5
import os
import os.path
import re
import sys
import time
import traceback
import urllib2
import xmlrpclib
import xml.dom.minidom
import gdata
from gdata import atom
try:
import gaexmlrpclib
from google.appengine.api import urlfetch
ON_GAE = True
except ImportError:
ON_GAE = False
__author__ = 'JJ Lueck ([email protected])'
########################
# Constants
########################
CATEGORY_NS = 'http://www.blogger.com/atom/ns#'
CATEGORY_KIND = 'http://schemas.google.com/g/2005#kind'
POST_KIND = 'http://schemas.google.com/blogger/2008/kind#post'
COMMENT_KIND = 'http://schemas.google.com/blogger/2008/kind#comment'
ATOM_TYPE = 'application/atom+xml'
HTML_TYPE = 'text/html'
ATOM_THREADING_NS = 'http://purl.org/syndication/thread/1.0'
DUMMY_URI = 'http://www.blogger.com/'
###########################
# Helper Atom class
###########################
class BloggerGDataFeed(gdata.GDataFeed):
def _ToElementTree(self):
tree = gdata.GDataFeed._ToElementTree(self)
# Modify the tree such that entries are always the last elements
# of the top-level feed. This conforms to the Atom specification
# and fixes a bug where the Blog title may exist after the entries
# which causes Blogger to ignore the title.
for i in reversed(range(len(tree))):
if tree[i].tag.endswith('entry'):
break
subelem = tree[i]
tree.remove(subelem)
tree.insert(0, subelem)
return tree
class InReplyTo(atom.ExtensionElement):
"""Supplies the in-reply-to element from the Atom threading protocol."""
def __init__(self, ref, href=None):
"""Constructs an InReplyTo element."""
attrs = {}
attrs['ref'] = ref
attrs['type'] = ATOM_TYPE
if href:
attrs['href'] = href
atom.ExtensionElement.__init__(self, 'in-reply-to',
namespace=ATOM_THREADING_NS,
attributes=attrs)
###########################
# Helper UserMap class
###########################
class UserMap(object):
def __init__(self):
self.comment2user = {}
self.max_id = -1
def Add(self, xml_map):
self._ReadMap(xml_map)
def GetUser(self, comment_id):
return self.comment2user.get(comment_id, None)
def GetLargestId(self):
return self.max_id
def _ReadMap(self, xml_map):
# One half of the XML document contains a map between user ID and
# the user's name. Build a user_map with this mapping
users = xml_map.getElementsByTagName('usermap')
user_map = dict([(user.getAttribute('id'),
user.getAttribute('user')) for user in users])
# The other half of the XML document contains a map between the
# comment ID and comment authors
comments = xml_map.getElementsByTagName('comment')
for comment in comments:
comment_id = comment.getAttribute('id')
user_id = comment.getAttribute('posterid')
if user_id:
self.comment2user[comment_id] = user_map[user_id]
else:
self.comment2user[comment_id] = 'Anonymous'
self.max_id = max(int(comment_id), self.max_id)
###########################
# Helper URL fetching
###########################
class UrlFetcherFactory(object):
def newUrlFetcher(self):
if ON_GAE:
return GaeUrlFetcher()
else:
return NativeUrlFetcher()
def fetch(url, payload, headers={}):
pass
class GaeUrlFetcher(object):
def fetch(self, url, payload, headers={}):
response = urlfetch.fetch(url, payload, 'POST', headers)
return response.content
class NativeUrlFetcher(object):
def fetch(self, url, payload, headers={}):
response = urllib2.urlopen(urllib2.Request(url, payload, headers=headers))
data = response.read()
response.close()
return data
###########################
# Translation class
###########################
class LiveJournal2Blogger(object):
"""Performs the translation of LiveJournal blog to the Blogger
export format.
"""
def __init__(self, username, password, server='www.livejournal.com'):
self.username = username
self.password = password
self.server_name = server
if ON_GAE:
self.server = xmlrpclib.ServerProxy('http://%s/interface/xmlrpc' % server,
gaexmlrpclib.GAEXMLRPCTransport())
else:
self.server = xmlrpclib.ServerProxy('http://%s/interface/xmlrpc' % server)
self.url_fetcher = UrlFetcherFactory().newUrlFetcher()
def Translate(self, outfile):
"""Performs the actual translation to a Blogger export format.
Args:
outfile: The output file that should receive the translated document
"""
# Create the top-level feed object
feed = BloggerGDataFeed()
# Fill in the feed object with the boilerplate metadata
feed.generator = atom.Generator(text='Blogger')
feed.title = atom.Title(text='LiveJournal blog')
feed.link.append(
atom.Link(href=DUMMY_URI, rel='self', link_type=ATOM_TYPE))
feed.link.append(
atom.Link(href=DUMMY_URI, rel='alternate', link_type=HTML_TYPE))
feed.updated = atom.Updated(text=self._ToBlogTime(time.gmtime()))
# Grab the list of posts
posts = self._GetPosts()
feed.entry.extend(posts)
# Grab the list of comments
comments = self._GetComments()
feed.entry.extend(comments)
# Serialize the feed object
outfile.write(str(feed))
def _GetPosts(self):
sync_time = ''
posts = []
num_failures = 0
max_failures = 5
while num_failures < max_failures:
start_time = time.time()
try:
# Get the next round of items which contain posts/comments
challenge, challenge_response = self._GetAuthTokens()
logging.info('Retrieving auth tokens: %d ms' % ((time.time() - start_time) * 1000))
except:
logging.error(traceback.format_exc())
num_failures += 1
time.sleep(0.5)
continue
start_time = time.time()
try:
response = self.server.LJ.XMLRPC.syncitems({
'username': self.username,
'ver': 1,
'lastsync': sync_time,
'auth_method': 'challenge',
'auth_challenge': challenge,
'auth_response': challenge_response})
logging.info('Sync-ing %d items: %d ms' %
(len(response['syncitems']), (time.time() - start_time) * 1000))
except:
logging.error('Failure after %d ms' % ((time.time() - start_time) * 1000))
logging.error(traceback.format_exc())
num_failures += 1
time.sleep(0.5)
continue
# Break out if we have no more items
if len(response['syncitems']) == 0:
break
# Loop through the items and get the contents
for item in response['syncitems']:
item_type, item_id = item['item'].split('-')
if item_type == 'L':
while num_failures < max_failures:
start_time = time.time()
try:
# Get the next round of items which contain posts/comments
challenge, challenge_response = self._GetAuthTokens()
logging.info('Retrieving auth tokens: %d ms' % ((time.time() - start_time) * 1000))
except:
logging.error('Failure after %d ms' % ((time.time() - start_time) * 1000))
logging.error(traceback.format_exc())
num_failures += 1
time.sleep(0.5)
continue
start_time = time.time()
try:
event = self.server.LJ.XMLRPC.getevents({
'username': self.username,
'ver': 1,
'selecttype': 'one',
'itemid': item_id,
'auth_method': 'challenge',
'auth_challenge': challenge,
'auth_response': challenge_response})
logging.info('Retrieved item %s: %d ms' %
(item_id, (time.time() - start_time) * 1000))
if len(event['events']) > 0:
posts.append(self._TranslatePost(event['events'][0]))
break
except:
logging.error('Failure after %d ms' % ((time.time() - start_time) * 1000))
logging.error(traceback.format_exc())
num_failures += 1
time.sleep(0.5)
continue
if num_failures > max_failures:
raise 'TooManyFailures'
sync_time = item['time']
if num_failures > max_failures:
raise 'TooManyFailures'
return posts
def _TranslatePost(self, lj_event):
post_entry = gdata.GDataEntry()
post_entry.id = atom.Id(text='post-%d' % lj_event['itemid'])
post_entry.link.append(
atom.Link(href=DUMMY_URI, rel='self', link_type=ATOM_TYPE))
post_entry.link.append(
atom.Link(href=lj_event['url'], rel='alternate', link_type=ATOM_TYPE))
post_entry.author = atom.Author(atom.Name(text=self.username))
post_entry.category.append(
atom.Category(scheme=CATEGORY_KIND, term=POST_KIND))
post_entry.published = atom.Published(
text=self._ToBlogTime(self._FromLjTime(lj_event['eventtime'])))
post_entry.updated = atom.Updated(
text=self._ToBlogTime(self._FromLjTime(lj_event['eventtime'])))
content = lj_event['event']
if isinstance(lj_event['event'], xmlrpclib.Binary):
content = lj_event['event'].data
post_entry.content = atom.Content(
content_type='html', text=self._TranslateContent(content))
subject = lj_event.get('subject', None)
if not subject:
subject = self._CreateSnippet(content)
if not isinstance(subject, basestring):
subject = str(subject)
post_entry.title = atom.Title(text=subject)
# Turn the taglist into individual labels
taglist = lj_event['props'].get('taglist', None)
if isinstance(taglist, xmlrpclib.Binary):
taglist = taglist.data
elif not isinstance(taglist, basestring):
taglist = str(taglist)
if taglist:
tags = taglist.split(',')
for tag in tags:
post_entry.category.append(
atom.Category(scheme=CATEGORY_NS, term=tag.strip()))
return post_entry
def _GetComments(self):
current_id = 0
max_id = -1
user_map = UserMap()
comments = []
# First make requests to generate the user map. This is gathered by requesting for
# comment metadata and paging through the responses. For each request for a page of
# comment metadata, add the results to a running UserMap which provides the mapping
# from comment identifier to the author's name.
while True:
session_key = self._GetSessionToken()
request_url = ('http://%s/export_comments.bml?get=comment_meta&startid=%d'
% (self.server_name, current_id))
response = self.url_fetcher.fetch(
request_url, None, headers={'Cookie': 'ljsession=%s' % session_key})
response_doc = xml.dom.minidom.parseString(response)
user_map.Add(response_doc)
current_id = user_map.GetLargestId()
max_id = int(self._GetText(response_doc.getElementsByTagName('maxid')[0]))
if max_id >= current_id:
break
# Second, loop through the contents of the comments and user our UserMap to fill
# in the author of the comment. All of the rest of the data is found in the
# comment response document.
current_id = 0
while True:
session_key = self._GetSessionToken()
request_url = ('http://%s/export_comments.bml?get=comment_body&startid=%d'
% (self.server_name, current_id))
response = self.url_fetcher.fetch(
request_url, None, headers={'Cookie': 'ljsession=%s' % session_key})
response_doc = xml.dom.minidom.parseString(response)
for comment in response_doc.getElementsByTagName('comment'):
# If this has been marked as a deleted comment, do not add it
if comment.getAttribute('state') != 'D':
comments.append(self._TranslateComment(comment, user_map))
current_id = int(comment.getAttribute('id'))
if current_id >= max_id:
break
return comments
def _TranslateComment(self, xml_comment, user_map):
comment_id = xml_comment.getAttribute('id')
comment_entry = gdata.GDataEntry()
comment_entry.id = atom.Id(text='comment-%s' % comment_id)
comment_entry.link.append(
atom.Link(href=DUMMY_URI, rel='self', link_type=ATOM_TYPE))
comment_entry.link.append(
atom.Link(href=DUMMY_URI, rel='alternate', link_type=ATOM_TYPE))
comment_entry.author = atom.Author(
atom.Name(text=user_map.GetUser(comment_id)))
comment_entry.category.append(
atom.Category(scheme=CATEGORY_KIND, term=COMMENT_KIND))
comment_body = self._TranslateContent(
self._GetText(xml_comment.getElementsByTagName('body')[0]))
comment_entry.content = atom.Content(
content_type='html', text=comment_body)
comment_entry.published = atom.Published(
text=self._GetText(xml_comment.getElementsByTagName('date')[0]))
comment_entry.updated = atom.Updated(
text=self._GetText(xml_comment.getElementsByTagName('date')[0]))
subject = xml_comment.getElementsByTagName('subject')
if subject:
subject = self._GetText(subject[0])
else:
subject = self._CreateSnippet(comment_body)
comment_entry.title = atom.Title(text=subject)
comment_entry.extension_elements.append(
InReplyTo('post-%s' % xml_comment.getAttribute('jitemid')))
return comment_entry
def _TranslateContent(self, content):
if not isinstance(content, basestring):
content = str(content)
return content.replace('\r\n', '<br/>')
def _GetAuthTokens(self):
"""Returns the information necessary to create new requests to the
LiveJournal server using XML-RPC. Returns a tuple containing the challege,
and the successful response to the challenge.
"""
response = self.server.LJ.XMLRPC.getchallenge()
challenge = response['challenge']
return challenge, self._HashChallenge(challenge)
def _GetSessionToken(self):
"""Returns the information necessary to create new requests to the
LiveJournal server via HTTP.
"""
# Use the flat RPC protocol to generate the session information
request_url = 'http://%s/interface/flat' % self.server_name
# The first request is used to obtain the challenge token
response = self.url_fetcher.fetch(request_url, 'mode=getchallenge')
challenge = self._ResponseToDict(response)['challenge']
# The second request is to actually generate the session cookie by
# responding to the challenge
challenge_response = self._HashChallenge(challenge)
response = self.url_fetcher.fetch(
request_url, ('mode=sessiongenerate&auth_method=challenge&'
'user=%s&auth_challenge=%s&auth_response=%s' %
(self.username, challenge, challenge_response)))
result = self._ResponseToDict(response)
if result.get('errmsg', None):
raise 'Login Unsuccessful'
return result['ljsession']
def _ResponseToDict(self, contents):
"""Takes the result of a request to the LiveJournal flat XML-RPC
protocol and transforms the key/value pairs into a dictionary.
"""
elems = contents.split('\n')
# This little bit of Python wizardry turns a list of elements into
# key value pairs.
return dict(zip(elems[::2], elems[1::2]))
def _HashChallenge(self, challenge):
"""Hashes the challege with the password to produce the challenge
response.
"""
return md5.new(challenge + md5.new(self.password).hexdigest()).hexdigest()
def _CreateSnippet(self, content):
"""Creates a snippet of content. The maximum size being 53 characters,
50 characters of data followed by elipses.
"""
content = re.sub('<[^>]+>', '', content)
if isinstance(content, str):
content = content.decode('UTF-8', 'ignore')
if len(content) < 50:
return content
return content[:49] + '...'
def _GetText(self, xml_elem):
"""Assumes the text for the element is the only child of the element."""
return xml_elem.firstChild.nodeValue
def _FromLjTime(self, lj_time):
"""Converts the LiveJournal event time to a time/date struct."""
return time.strptime(lj_time, '%Y-%m-%d %H:%M:%S')
def _ToBlogTime(self, time_tuple):
"""Converts a time struct to a Blogger time/date string."""
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time_tuple)
def usage():
return ('Usage: %s -u <username> -p <password> [-s <server>]\n\n'
' Outputs the converted Blogger export file to standard out.' %
os.path.basename(sys.argv[0]))
if __name__ == '__main__':
# parse command line options
try:
opts, args = getopt.getopt(
sys.argv[1:], 'u:p:s:', ['username=', 'password=', 'server='])
except getopt.error, msg:
print usage()
sys.exit(2)
# Store the parsed results
username = None
password = None
server = 'www.livejournal.com'
# Process options
for opt, arg in opts:
if opt in ['-u', '--username']:
username = arg
elif opt in ['-p', '--password']:
password = arg
elif opt in ['-s', '--server']:
server = arg
if not username or not password:
print usage()
sys.exit(-1)
# Perform the translation
translator = LiveJournal2Blogger(username, password, server)
translator.Translate(sys.stdout)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.clip_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class ClipTest(tf.test.TestCase):
# ClipByValue test
def testClipByValue(self):
with self.test_session():
x = tf.constant([-5.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
np_ans = [[-4.4, 2.0, 3.0],
[4.0, 4.4, 4.4]]
clip_value = 4.4
ans = tf.clip_by_value(x, -clip_value, clip_value)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByValueNonFinite(self):
with self.test_session():
x = tf.constant([float('NaN'), float('Inf'), -float('Inf')])
np_ans = [float('NaN'), 4.0, -4.0]
clip_value = 4.0
ans = tf.clip_by_value(x, -clip_value, clip_value)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
# ClipByNorm tests
def testClipByNormClipped(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
np_ans = [[-2.4, 0.0, 0.0],
[3.2, 0.0, 0.0]]
clip_norm = 4.0
ans = tf.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByNormNotClipped(self):
# No norm clipping when clip_norm >= 5
with self.test_session():
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Norm of x = sqrt(3^2 + 4^2) = 5
np_ans = [[-3.0, 0.0, 0.0],
[4.0, 0.0, 0.0]]
clip_norm = 6.0
ans = tf.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByNormZero(self):
# No norm clipping when norm = 0
with self.test_session():
x = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
# Norm = 0, no changes
np_ans = [[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]
clip_norm = 6.0
ans = tf.clip_by_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByGlobalNormClipped(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = tf.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0],
[3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = tf.clip_by_global_norm((x0, x1), clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormSupportsNone(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = tf.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0],
[3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = tf.clip_by_global_norm((x0, None, x1, None), clip_norm)
self.assertTrue(ans[1] is None)
self.assertTrue(ans[3] is None)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[2].eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
# ClipByGlobalNorm tests
def testClipByGlobalNormWithIndexedSlicesClipped(self):
# Norm clipping when clip_norm < 5
with self.test_session():
x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = tf.IndexedSlices(tf.constant([1.0, -2.0]),
tf.constant([3, 4]))
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
clip_norm = 4.0
# Answers are the original tensors scaled by 4.0/5.0
np_ans_0 = [[-1.6, 0.0, 0.0],
[3.2, 0.0, 0.0]]
np_ans_1 = [0.8, -1.6]
ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].values.eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormPreservesDenseShape(self):
dense_shape = (1,)
slices = tf.IndexedSlices(
tf.constant([1.0]),
tf.constant([0]),
dense_shape=dense_shape)
ans, _ = tf.clip_by_global_norm([slices], 1.0)
modified_slices = ans[0]
self.assertEqual(dense_shape, slices.dense_shape)
self.assertEqual(dense_shape, modified_slices.dense_shape)
def testClipByGlobalNormNotClipped(self):
# No norm clipping when clip_norm >= 5
with self.test_session():
x0 = tf.constant([-2.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
x1 = tf.constant([1.0, -2.0])
# Global norm of x0 and x1 = sqrt(1 + 4^2 + 2^2 + 2^2) = 5
np_ans_0 = [[-2.0, 0.0, 0.0],
[4.0, 0.0, 0.0]]
np_ans_1 = [1.0, -2.0]
clip_norm = 6.0
ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 5.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByGlobalNormZero(self):
# No norm clipping when norm = 0
with self.test_session():
x0 = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
x1 = tf.constant([0.0, 0.0])
# Norm = 0, no changes
np_ans_0 = [[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]
np_ans_1 = [0.0, 0.0]
clip_norm = 6.0
ans, norm = tf.clip_by_global_norm([x0, x1], clip_norm)
tf_ans_1 = ans[0].eval()
tf_ans_2 = ans[1].eval()
tf_norm = norm.eval()
self.assertAllClose(tf_norm, 0.0)
self.assertAllClose(np_ans_0, tf_ans_1)
self.assertAllClose(np_ans_1, tf_ans_2)
def testClipByAverageNormClipped(self):
# Norm clipping when average clip_norm < 0.83333333
with self.test_session():
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-2.88, 0.0, 0.0],
[3.84, 0.0, 0.0]]
clip_norm = 0.8
ans = tf.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormNotClipped(self):
# No norm clipping when average clip_norm >= 0.83333333
with self.test_session():
x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
# Average norm of x = sqrt(3^2 + 4^2) / 6 = 0.83333333
np_ans = [[-3.0, 0.0, 0.0],
[4.0, 0.0, 0.0]]
clip_norm = 0.9
ans = tf.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
def testClipByAverageNormZero(self):
# No norm clipping when average clip_norm = 0
with self.test_session():
x = tf.constant([0.0, 0.0, 0.0, 0.0, 0.0, 0.0], shape=[2, 3])
# Average norm = 0, no changes
np_ans = [[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0]]
clip_norm = 0.9
ans = tf.clip_by_average_norm(x, clip_norm)
tf_ans = ans.eval()
self.assertAllClose(np_ans, tf_ans)
if __name__ == "__main__":
tf.test.main()
|
|
"""
sentry.web.forms.accounts
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from datetime import datetime
import pytz
from django import forms
from django.conf import settings
from django.contrib.auth import authenticate, get_user_model
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from sentry import options
from sentry.auth import password_validation
from sentry.app import ratelimiter
from sentry.constants import LANGUAGES
from sentry.models import (
Organization, OrganizationStatus, User, UserOption, UserOptionValue
)
from sentry.utils.auth import find_users, logger
from sentry.web.forms.fields import ReadOnlyTextField
from six.moves import range
def _get_timezone_choices():
results = []
for tz in pytz.common_timezones:
now = datetime.now(pytz.timezone(tz))
offset = now.strftime('%z')
results.append((int(offset), tz, '(GMT%s) %s' % (offset, tz)))
results.sort()
for i in range(len(results)):
results[i] = results[i][1:]
return results
TIMEZONE_CHOICES = _get_timezone_choices()
class AuthenticationForm(forms.Form):
username = forms.CharField(
label=_('Account'), max_length=128, widget=forms.TextInput(
attrs={'placeholder': _('username or email'),
}),
)
password = forms.CharField(
label=_('Password'), widget=forms.PasswordInput(
attrs={'placeholder': _('password'),
}),
)
error_messages = {
'invalid_login': _("Please enter a correct %(username)s and password. "
"Note that both fields may be case-sensitive."),
'rate_limited': _("You have made too many failed authentication "
"attempts. Please try again later."),
'no_cookies': _("Your Web browser doesn't appear to have cookies "
"enabled. Cookies are required for logging in."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
if not self.fields['username'].label:
self.fields['username'].label = capfirst(self.username_field.verbose_name)
def clean_username(self):
value = (self.cleaned_data.get('username') or '').strip()
if not value:
return
return value.lower()
def is_rate_limited(self):
if self._is_ip_rate_limited():
return True
if self._is_user_rate_limited():
return True
return False
def _is_ip_rate_limited(self):
limit = options.get('auth.ip-rate-limit')
if not limit:
return False
ip_address = self.request.META['REMOTE_ADDR']
return ratelimiter.is_limited(
'auth:ip:{}'.format(ip_address),
limit,
)
def _is_user_rate_limited(self):
limit = options.get('auth.user-rate-limit')
if not limit:
return False
username = self.cleaned_data.get('username')
if not username:
return False
return ratelimiter.is_limited(
u'auth:username:{}'.format(username),
limit,
)
def clean(self):
username = self.cleaned_data.get('username')
if self.is_rate_limited():
logger.info('user.auth.rate-limited', extra={
'ip_address': self.request.META['REMOTE_ADDR'],
'username': username,
})
raise forms.ValidationError(self.error_messages['rate_limited'])
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'] % {
'username': self.username_field.verbose_name
})
self.check_for_test_cookie()
return self.cleaned_data
def check_for_test_cookie(self):
if self.request and not self.request.session.test_cookie_worked():
raise forms.ValidationError(self.error_messages['no_cookies'])
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class RegistrationForm(forms.ModelForm):
username = forms.EmailField(
label=_('Email'), max_length=128,
widget=forms.TextInput(attrs={'placeholder': '[email protected]'}))
password = forms.CharField(
widget=forms.PasswordInput(attrs={'placeholder': 'something super secret'}))
class Meta:
fields = ('username',)
model = User
def clean_username(self):
value = (self.cleaned_data.get('username') or '').strip()
if not value:
return
if User.objects.filter(username__iexact=value).exists():
raise forms.ValidationError(_('An account is already registered with that email address.'))
return value.lower()
def clean_password(self):
password = self.cleaned_data['password']
password_validation.validate_password(password)
return password
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.email = user.username
user.set_password(self.cleaned_data['password'])
if commit:
user.save()
return user
class RecoverPasswordForm(forms.Form):
user = forms.CharField(label=_('Username or email'))
def clean_user(self):
value = (self.cleaned_data.get('user') or '').strip()
if not value:
return
users = find_users(value, with_valid_password=False)
if not users:
raise forms.ValidationError(_("We were unable to find a matching user."))
users = [u for u in users if not u.is_managed]
if not users:
raise forms.ValidationError(_("The account you are trying to recover is managed and does not support password recovery."))
if len(users) > 1:
raise forms.ValidationError(_("Multiple accounts were found matching this email address."))
return users[0]
class ChangePasswordRecoverForm(forms.Form):
password = forms.CharField(widget=forms.PasswordInput())
def clean_password(self):
password = self.cleaned_data['password']
password_validation.validate_password(password)
return password
class EmailForm(forms.Form):
primary_email = forms.EmailField(label=_('Primary Email'))
alt_email = forms.EmailField(
label=_('New Email'),
required=False,
help_text='Designate an alternative email for this account',
)
password = forms.CharField(
label=_('Current password'),
widget=forms.PasswordInput(),
help_text=_('You will need to enter your current account password to make changes.'),
required=True,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(EmailForm, self).__init__(*args, **kwargs)
needs_password = user.has_usable_password()
if not needs_password:
del self.fields['password']
def save(self, commit=True):
if self.cleaned_data['primary_email'] != self.user.email:
new_username = self.user.email == self.user.username
else:
new_username = False
self.user.email = self.cleaned_data['primary_email']
if new_username and not User.objects.filter(username__iexact=self.user.email).exists():
self.user.username = self.user.email
if commit:
self.user.save()
return self.user
def clean_password(self):
value = self.cleaned_data.get('password')
if value and not self.user.check_password(value):
raise forms.ValidationError(_('The password you entered is not correct.'))
elif not value:
raise forms.ValidationError(_('You must confirm your current password to make changes.'))
return value
class AccountSettingsForm(forms.Form):
name = forms.CharField(required=True, label=_('Name'), max_length=30)
username = forms.CharField(label=_('Username'), max_length=128)
email = forms.EmailField(label=_('Email'))
new_password = forms.CharField(
label=_('New password'),
widget=forms.PasswordInput(),
required=False,
# help_text=password_validation.password_validators_help_text_html(),
)
password = forms.CharField(
label=_('Current password'),
widget=forms.PasswordInput(),
help_text='You will need to enter your current account password to make changes.',
required=False,
)
def __init__(self, user, request, *args, **kwargs):
self.user = user
self.request = request
super(AccountSettingsForm, self).__init__(*args, **kwargs)
needs_password = user.has_usable_password()
if self.user.is_managed:
# username and password always managed, email and
# name optionally managed
for field in ('email', 'name', 'username'):
if field == 'username' or field in settings.SENTRY_MANAGED_USER_FIELDS:
self.fields[field] = ReadOnlyTextField(label=self.fields[field].label)
if field == 'email':
needs_password = False
del self.fields['new_password']
# don't show username field if its the same as their email address
if self.user.email == self.user.username:
del self.fields['username']
if not needs_password:
del self.fields['password']
def is_readonly(self):
if self.user.is_managed:
return set(('email', 'name')) == set(settings.SENTRY_MANAGED_USER_FIELDS)
return False
def _clean_managed_field(self, field):
if self.user.is_managed and (field == 'username' or
field in settings.SENTRY_MANAGED_USER_FIELDS):
return getattr(self.user, field)
return self.cleaned_data[field]
def clean_email(self):
return self._clean_managed_field('email')
def clean_name(self):
return self._clean_managed_field('name')
def clean_username(self):
value = self._clean_managed_field('username')
if User.objects.filter(username__iexact=value).exclude(id=self.user.id).exists():
raise forms.ValidationError(_("That username is already in use."))
return value
def clean_password(self):
value = self.cleaned_data.get('password')
if value and not self.user.check_password(value):
raise forms.ValidationError('The password you entered is not correct.')
elif not value and (
self.cleaned_data.get('email', self.user.email) != self.user.email
or self.cleaned_data.get('new_password')
):
raise forms.ValidationError('You must confirm your current password to make changes.')
return value
def clean_new_password(self):
new_password = self.cleaned_data.get('new_password')
if new_password:
password_validation.validate_password(new_password)
return new_password
def save(self, commit=True):
if self.cleaned_data.get('new_password'):
self.user.set_password(self.cleaned_data['new_password'])
self.user.refresh_session_nonce(self.request)
self.user.name = self.cleaned_data['name']
if self.cleaned_data['email'] != self.user.email:
new_username = self.user.email == self.user.username
else:
new_username = False
self.user.email = self.cleaned_data['email']
if self.cleaned_data.get('username'):
self.user.username = self.cleaned_data['username']
elif new_username and not User.objects.filter(username__iexact=self.user.email).exists():
self.user.username = self.user.email
if commit:
self.user.save()
return self.user
class AppearanceSettingsForm(forms.Form):
language = forms.ChoiceField(
label=_('Language'), choices=LANGUAGES, required=False,
widget=forms.Select(attrs={'class': 'input-xlarge'}))
stacktrace_order = forms.ChoiceField(
label=_('Stacktrace order'), choices=(
('-1', _('Default (let Sentry decide)')),
('1', _('Most recent call last')),
('2', _('Most recent call first')),
), help_text=_('Choose the default ordering of frames in stacktraces.'),
required=False,
widget=forms.Select(attrs={'class': 'input-xlarge'}))
timezone = forms.ChoiceField(
label=_('Time zone'), choices=TIMEZONE_CHOICES, required=False,
widget=forms.Select(attrs={'class': 'input-xxlarge'}))
clock_24_hours = forms.BooleanField(
label=_('Use a 24-hour clock'),
required=False,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AppearanceSettingsForm, self).__init__(*args, **kwargs)
def save(self):
# Save user language
UserOption.objects.set_value(
user=self.user,
project=None,
key='language',
value=self.cleaned_data['language'],
)
# Save stacktrace options
UserOption.objects.set_value(
user=self.user,
project=None,
key='stacktrace_order',
value=self.cleaned_data['stacktrace_order'],
)
# Save time zone options
UserOption.objects.set_value(
user=self.user,
project=None,
key='timezone',
value=self.cleaned_data['timezone'],
)
# Save clock 24 hours option
UserOption.objects.set_value(
user=self.user,
project=None,
key='clock_24_hours',
value=self.cleaned_data['clock_24_hours'],
)
return self.user
class NotificationReportSettingsForm(forms.Form):
organizations = forms.ModelMultipleChoiceField(
queryset=Organization.objects.none(),
required=False,
widget=forms.CheckboxSelectMultiple(),
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(NotificationReportSettingsForm, self).__init__(*args, **kwargs)
org_queryset = Organization.objects.filter(
status=OrganizationStatus.VISIBLE,
member_set__user=user,
)
disabled_orgs = set(UserOption.objects.get_value(
user=user,
project=None,
key='reports:disabled-organizations',
default=[],
))
self.fields['organizations'].queryset = org_queryset
self.fields['organizations'].initial = [
o.id for o in org_queryset
if o.id not in disabled_orgs
]
def save(self):
enabled_orgs = set((
o.id for o in self.cleaned_data.get('organizations')
))
all_orgs = set(self.fields['organizations'].queryset.values_list('id', flat=True))
UserOption.objects.set_value(
user=self.user,
project=None,
key='reports:disabled-organizations',
value=list(all_orgs.difference(enabled_orgs)),
)
class NotificationSettingsForm(forms.Form):
alert_email = forms.EmailField(
label=_('Email'),
help_text=_('Designate an alternative email address to send email notifications to.'),
required=False
)
subscribe_by_default = forms.BooleanField(
label=_('Automatically subscribe to alerts for new projects'),
help_text=_("When enabled, you'll automatically subscribe to alerts when you create or join a project."),
required=False,
)
workflow_notifications = forms.BooleanField(
label=_('Automatically subscribe to workflow notifications for new projects'),
help_text=_("When enabled, you'll automatically subscribe to workflow notifications when you create or join a project."),
required=False,
)
self_notifications = forms.BooleanField(
label=_('Receive notifications about my own activity'),
help_text=_('Enable this if you wish to receive emails for your own actions, as well as others.'),
required=False,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(NotificationSettingsForm, self).__init__(*args, **kwargs)
self.fields['alert_email'].initial = UserOption.objects.get_value(
user=self.user,
project=None,
key='alert_email',
default=user.email,
)
self.fields['subscribe_by_default'].initial = (
UserOption.objects.get_value(
user=self.user,
project=None,
key='subscribe_by_default',
default='1',
) == '1'
)
self.fields['workflow_notifications'].initial = (
UserOption.objects.get_value(
user=self.user,
project=None,
key='workflow:notifications',
default=UserOptionValue.all_conversations,
) == UserOptionValue.all_conversations
)
self.fields['self_notifications'].initial = UserOption.objects.get_value(
user=self.user,
project=None,
key='self_notifications',
default='0'
) == '1'
def get_title(self):
return "General"
def save(self):
UserOption.objects.set_value(
user=self.user,
project=None,
key='alert_email',
value=self.cleaned_data['alert_email'],
)
UserOption.objects.set_value(
user=self.user,
project=None,
key='subscribe_by_default',
value='1' if self.cleaned_data['subscribe_by_default'] else '0',
)
UserOption.objects.set_value(
user=self.user,
project=None,
key='self_notifications',
value='1' if self.cleaned_data['self_notifications'] else '0',
)
if self.cleaned_data.get('workflow_notifications') is True:
UserOption.objects.set_value(
user=self.user,
project=None,
key='workflow:notifications',
value=UserOptionValue.all_conversations,
)
else:
UserOption.objects.set_value(
user=self.user,
project=None,
key='workflow:notifications',
value=UserOptionValue.participating_only,
)
class ProjectEmailOptionsForm(forms.Form):
alert = forms.BooleanField(required=False)
workflow = forms.BooleanField(required=False)
email = forms.ChoiceField(label="", choices=(), required=False,
widget=forms.Select())
def __init__(self, project, user, *args, **kwargs):
self.project = project
self.user = user
super(ProjectEmailOptionsForm, self).__init__(*args, **kwargs)
has_alerts = project.is_user_subscribed_to_mail_alerts(user)
has_workflow = project.is_user_subscribed_to_workflow(user)
# This allows users who have entered an alert_email value or have specified an email
# for notifications to keep their settings
emails = [e.email for e in user.get_verified_emails()]
alert_email = UserOption.objects.get_value(user=self.user, project=None, key='alert_email', default=None)
specified_email = UserOption.objects.get_value(user, project, 'mail:email', None)
emails.extend([user.email, alert_email, specified_email])
choices = [(email, email) for email in set(emails) if email is not None]
self.fields['email'].choices = choices
self.fields['alert'].initial = has_alerts
self.fields['workflow'].initial = has_workflow
self.fields['email'].initial = specified_email or alert_email or user.email
def save(self):
UserOption.objects.set_value(
self.user, self.project, 'mail:alert',
int(self.cleaned_data['alert']),
)
UserOption.objects.set_value(
self.user, self.project, 'workflow:notifications',
UserOptionValue.all_conversations if self.cleaned_data['workflow'] else UserOptionValue.participating_only,
)
if self.cleaned_data['email']:
UserOption.objects.set_value(
self.user, self.project, 'mail:email',
self.cleaned_data['email'],
)
else:
UserOption.objects.unset_value(
self.user, self.project, 'mail:email')
class TwoFactorForm(forms.Form):
otp = forms.CharField(
label=_('One-time password'), max_length=20, widget=forms.TextInput(
attrs={'placeholder': _('Code from authenticator'),
'autofocus': True,
}),
)
class ConfirmPasswordForm(forms.Form):
password = forms.CharField(
label=_('Sentry account password'),
widget=forms.PasswordInput(),
help_text='You will need to enter your current Sentry account password to make changes.',
required=True,
)
def __init__(self, user, *args, **kwargs):
self.user = user
super(ConfirmPasswordForm, self).__init__(*args, **kwargs)
needs_password = user.has_usable_password()
if not needs_password:
del self.fields['password']
|
|
# Copyright 2010-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test suite for pymongo, bson, and gridfs.
"""
import os
import socket
import sys
from pymongo.common import partition_node
if sys.version_info[:2] == (2, 6):
import unittest2 as unittest
from unittest2 import SkipTest
else:
import unittest
from unittest import SkipTest
import warnings
from functools import wraps
import pymongo
import pymongo.errors
from bson.py3compat import _unicode
from pymongo import common
from test.version import Version
# hostnames retrieved from isMaster will be of unicode type in Python 2,
# so ensure these hostnames are unicodes, too. It makes tests like
# `test_repr` predictable.
host = _unicode(os.environ.get("DB_IP", 'localhost'))
port = int(os.environ.get("DB_PORT", 27017))
pair = '%s:%d' % (host, port)
host2 = _unicode(os.environ.get("DB_IP2", 'localhost'))
port2 = int(os.environ.get("DB_PORT2", 27018))
host3 = _unicode(os.environ.get("DB_IP3", 'localhost'))
port3 = int(os.environ.get("DB_PORT3", 27019))
db_user = _unicode(os.environ.get("DB_USER", "user"))
db_pwd = _unicode(os.environ.get("DB_PASSWORD", "password"))
class client_knobs(object):
def __init__(
self,
heartbeat_frequency=None,
kill_cursor_frequency=None):
self.heartbeat_frequency = heartbeat_frequency
self.kill_cursor_frequency = kill_cursor_frequency
self.old_heartbeat_frequency = None
self.old_kill_cursor_frequency = None
def enable(self):
self.old_heartbeat_frequency = common.HEARTBEAT_FREQUENCY
self.old_kill_cursor_frequency = common.KILL_CURSOR_FREQUENCY
if self.heartbeat_frequency is not None:
common.HEARTBEAT_FREQUENCY = self.heartbeat_frequency
if self.kill_cursor_frequency is not None:
common.KILL_CURSOR_FREQUENCY = self.kill_cursor_frequency
def __enter__(self):
self.enable()
def disable(self):
common.HEARTBEAT_FREQUENCY = self.old_heartbeat_frequency
common.KILL_CURSOR_FREQUENCY = self.old_kill_cursor_frequency
def __exit__(self, exc_type, exc_val, exc_tb):
self.disable()
class ClientContext(object):
def __init__(self):
"""Create a client and grab essential information from the server."""
self.connected = False
self.ismaster = {}
self.w = None
self.nodes = set()
self.replica_set_name = None
self.rs_client = None
self.cmd_line = None
self.version = Version(-1) # Needs to be comparable with Version
self.auth_enabled = False
self.test_commands_enabled = False
self.is_mongos = False
self.is_rs = False
self.has_ipv6 = False
try:
client = pymongo.MongoClient(host, port,
serverSelectionTimeoutMS=100)
client.admin.command('ismaster') # Can we connect?
# If so, then reset client to defaults.
self.client = pymongo.MongoClient(host, port)
except pymongo.errors.ConnectionFailure:
self.client = None
else:
self.connected = True
self.ismaster = self.client.admin.command('ismaster')
self.w = len(self.ismaster.get("hosts", [])) or 1
self.nodes = set([(host, port)])
self.replica_set_name = self.ismaster.get('setName', '')
self.rs_client = None
self.version = Version.from_client(self.client)
if self.replica_set_name:
self.is_rs = True
self.rs_client = pymongo.MongoClient(
pair, replicaSet=self.replica_set_name)
nodes = [partition_node(node)
for node in self.ismaster.get('hosts', [])]
nodes.extend([partition_node(node)
for node in self.ismaster.get('passives', [])])
nodes.extend([partition_node(node)
for node in self.ismaster.get('arbiters', [])])
self.nodes = set(nodes)
self.rs_or_standalone_client = self.rs_client or self.client
try:
self.cmd_line = self.client.admin.command('getCmdLineOpts')
except pymongo.errors.OperationFailure as e:
msg = e.details.get('errmsg', '')
if e.code == 13 or 'unauthorized' in msg or 'login' in msg:
# Unauthorized.
self.auth_enabled = True
else:
raise
else:
self.auth_enabled = self._server_started_with_auth()
if self.auth_enabled:
# See if db_user already exists.
self.user_provided = self._check_user_provided()
if not self.user_provided:
roles = {}
if self.version.at_least(2, 5, 3, -1):
roles = {'roles': ['root']}
self.client.admin.add_user(db_user, db_pwd, **roles)
self.client.admin.authenticate(db_user, db_pwd)
if self.rs_client:
self.rs_client.admin.authenticate(db_user, db_pwd)
# May not have this if OperationFailure was raised earlier.
self.cmd_line = self.client.admin.command('getCmdLineOpts')
if 'enableTestCommands=1' in self.cmd_line['argv']:
self.test_commands_enabled = True
elif 'parsed' in self.cmd_line:
params = self.cmd_line['parsed'].get('setParameter', [])
if 'enableTestCommands=1' in params:
self.test_commands_enabled = True
self.is_mongos = (self.ismaster.get('msg') == 'isdbgrid')
self.has_ipv6 = self._server_started_with_ipv6()
def _check_user_provided(self):
try:
self.client.admin.authenticate(db_user, db_pwd)
return True
except pymongo.errors.OperationFailure as e:
msg = e.details.get('errmsg', '')
if e.code == 18 or 'auth fails' in msg:
# Auth failed.
return False
else:
raise
def _server_started_with_auth(self):
# MongoDB >= 2.0
if 'parsed' in self.cmd_line:
parsed = self.cmd_line['parsed']
# MongoDB >= 2.6
if 'security' in parsed:
security = parsed['security']
# >= rc3
if 'authorization' in security:
return security['authorization'] == 'enabled'
# < rc3
return (security.get('auth', False) or
bool(security.get('keyFile')))
return parsed.get('auth', False) or bool(parsed.get('keyFile'))
# Legacy
argv = self.cmd_line['argv']
return '--auth' in argv or '--keyFile' in argv
def _server_started_with_ipv6(self):
if not socket.has_ipv6:
return False
if 'parsed' in self.cmd_line:
if not self.cmd_line['parsed'].get('net', {}).get('ipv6'):
return False
else:
if '--ipv6' not in self.cmd_line['argv']:
return False
# The server was started with --ipv6. Is there an IPv6 route to it?
try:
for info in socket.getaddrinfo(host, port):
if info[0] == socket.AF_INET6:
return True
except socket.error:
pass
return False
def _require(self, condition, msg, func=None):
def make_wrapper(f):
@wraps(f)
def wrap(*args, **kwargs):
# Always raise SkipTest if we can't connect to MongoDB
if not self.connected:
raise SkipTest("Cannot connect to MongoDB on %s" % pair)
if condition:
return f(*args, **kwargs)
raise SkipTest(msg)
return wrap
if func is None:
def decorate(f):
return make_wrapper(f)
return decorate
return make_wrapper(func)
def require_connection(self, func):
"""Run a test only if we can connect to MongoDB."""
return self._require(self.connected,
"Cannot connect to MongoDB on %s" % pair,
func=func)
def require_version_min(self, *ver):
"""Run a test only if the server version is at least ``version``."""
other_version = Version(*ver)
return self._require(self.version >= other_version,
"Server version must be at least %s"
% str(other_version))
def require_version_max(self, *ver):
"""Run a test only if the server version is at most ``version``."""
other_version = Version(*ver)
return self._require(self.version <= other_version,
"Server version must be at most %s"
% str(other_version))
def require_auth(self, func):
"""Run a test only if the server is running with auth enabled."""
return self.check_auth_with_sharding(
self._require(self.auth_enabled,
"Authentication is not enabled on the server",
func=func))
def require_no_auth(self, func):
"""Run a test only if the server is running without auth enabled."""
return self._require(not self.auth_enabled,
"Authentication must not be enabled on the server",
func=func)
def require_replica_set(self, func):
"""Run a test only if the client is connected to a replica set."""
return self._require(self.is_rs,
"Not connected to a replica set",
func=func)
def require_no_replica_set(self, func):
"""Run a test if the client is *not* connected to a replica set."""
return self._require(
not self.is_rs,
"Connected to a replica set, not a standalone mongod",
func=func)
def require_ipv6(self, func):
"""Run a test only if the client can connect to a server via IPv6."""
return self._require(self.has_ipv6,
"No IPv6",
func=func)
def require_no_mongos(self, func):
"""Run a test only if the client is not connected to a mongos."""
return self._require(not self.is_mongos,
"Must be connected to a mongod, not a mongos",
func=func)
def require_mongos(self, func):
"""Run a test only if the client is connected to a mongos."""
return self._require(self.is_mongos,
"Must be connected to a mongos",
func=func)
def check_auth_with_sharding(self, func):
"""Skip a test when connected to mongos < 2.0 and running with auth."""
condition = not (self.auth_enabled and
self.is_mongos and self.version < (2,))
return self._require(condition,
"Auth with sharding requires MongoDB >= 2.0.0",
func=func)
def require_test_commands(self, func):
"""Run a test only if the server has test commands enabled."""
return self._require(self.test_commands_enabled,
"Test commands must be enabled",
func=func)
# Reusable client context
client_context = ClientContext()
class IntegrationTest(unittest.TestCase):
"""Base class for TestCases that need a connection to MongoDB to pass."""
@classmethod
@client_context.require_connection
def setUpClass(cls):
cls.client = client_context.rs_or_standalone_client
cls.db = cls.client.pymongo_test
class MockClientTest(unittest.TestCase):
"""Base class for TestCases that use MockClient.
This class is *not* an IntegrationTest: if properly written, MockClient
tests do not require a running server.
The class temporarily overrides HEARTBEAT_FREQUENCY to speed up tests.
"""
def setUp(self):
super(MockClientTest, self).setUp()
self.client_knobs = client_knobs(
heartbeat_frequency=0.001)
self.client_knobs.enable()
def tearDown(self):
self.client_knobs.disable()
super(MockClientTest, self).tearDown()
def setup():
warnings.resetwarnings()
warnings.simplefilter("always")
def teardown():
c = client_context.client
c.drop_database("pymongo-pooling-tests")
c.drop_database("pymongo_test")
c.drop_database("pymongo_test1")
c.drop_database("pymongo_test2")
c.drop_database("pymongo_test_mike")
c.drop_database("pymongo_test_bernie")
if client_context.auth_enabled and not client_context.user_provided:
c.admin.remove_user(db_user)
class PymongoTestRunner(unittest.TextTestRunner):
def run(self, test):
setup()
result = super(PymongoTestRunner, self).run(test)
try:
teardown()
finally:
return result
def test_cases(suite):
"""Iterator over all TestCases within a TestSuite."""
for suite_or_case in suite._tests:
if isinstance(suite_or_case, unittest.TestCase):
# unittest.TestCase
yield suite_or_case
else:
# unittest.TestSuite
for case in test_cases(suite_or_case):
yield case
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Driver base-classes:
(Beginning of) the contract that compute drivers must follow, and shared
types that support that contract
"""
from nova.compute import power_state
from nova import context as nova_context
from nova import db
from nova import flags
from nova import log as logging
LOG = logging.getLogger(__name__)
FLAGS = flags.FLAGS
class InstanceInfo(object):
def __init__(self, name, state):
self.name = name
assert state in power_state.valid_states(), "Bad state: %s" % state
self.state = state
def block_device_info_get_root(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('root_device_name')
def block_device_info_get_swap(block_device_info):
block_device_info = block_device_info or {}
return block_device_info.get('swap') or {'device_name': None,
'swap_size': 0}
def swap_is_usable(swap):
return swap and swap['device_name'] and swap['swap_size'] > 0
def block_device_info_get_ephemerals(block_device_info):
block_device_info = block_device_info or {}
ephemerals = block_device_info.get('ephemerals') or []
return ephemerals
def block_device_info_get_mapping(block_device_info):
block_device_info = block_device_info or {}
block_device_mapping = block_device_info.get('block_device_mapping') or []
return block_device_mapping
class ComputeDriver(object):
"""Base class for compute drivers.
The interface to this class talks in terms of 'instances' (Amazon EC2 and
internal Nova terminology), by which we mean 'running virtual machine'
(XenAPI terminology) or domain (Xen or libvirt terminology).
An instance has an ID, which is the identifier chosen by Nova to represent
the instance further up the stack. This is unfortunately also called a
'name' elsewhere. As far as this layer is concerned, 'instance ID' and
'instance name' are synonyms.
Note that the instance ID or name is not human-readable or
customer-controlled -- it's an internal ID chosen by Nova. At the
nova.virt layer, instances do not have human-readable names at all -- such
things are only known higher up the stack.
Most virtualization platforms will also have their own identity schemes,
to uniquely identify a VM or domain. These IDs must stay internal to the
platform-specific layer, and never escape the connection interface. The
platform-specific layer is responsible for keeping track of which instance
ID maps to which platform-specific ID, and vice versa.
In contrast, the list_disks and list_interfaces calls may return
platform-specific IDs. These identify a specific virtual disk or specific
virtual network interface, and these IDs are opaque to the rest of Nova.
Some methods here take an instance of nova.compute.service.Instance. This
is the data structure used by nova.compute to store details regarding an
instance, and pass them into this layer. This layer is responsible for
translating that generic data structure into terms that are specific to the
virtualization platform.
"""
def init_host(self, host):
"""Initialize anything that is necessary for the driver to function,
including catching up with currently running VM's on the given host."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_info(self, instance):
"""Get the current status of an instance, by name (not ID!)
Returns a dict containing:
:state: the running state, one of the power_state codes
:max_mem: (int) the maximum memory in KBytes allowed
:mem: (int) the memory in KBytes used by the domain
:num_cpu: (int) the number of virtual CPUs for the domain
:cpu_time: (int) the CPU time used in nanoseconds
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_num_instances(self):
"""Return the total number of virtual machines.
Return the number of virtual machines that the hypervisor knows
about.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
return len(self.list_instances())
def instance_exists(self, instance_id):
"""Checks existence of an instance on the host.
:param instance_id: The ID / name of the instance to lookup
Returns True if an instance with the supplied ID exists on
the host, False otherwise.
.. note::
This implementation works for all drivers, but it is
not particularly efficient. Maintainers of the virt drivers are
encouraged to override this method with something more
efficient.
"""
return instance_id in self.list_instances()
def list_instances(self):
"""
Return the names of all the instances known to the virtualization
layer, as a list.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def list_instances_detail(self):
"""Return a list of InstanceInfo for all registered VMs"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def spawn(self, context, instance, image_meta,
network_info=None, block_device_info=None):
"""
Create a new instance/VM/domain on the virtualization platform.
Once this successfully completes, the instance should be
running (power_state.RUNNING).
If this fails, any partial instance should be completely
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
:param context: security context
:param instance: Instance object as returned by DB layer.
This function should use the data there to guide
the creation of the new instance.
:param image_meta: image object returned by nova.image.glance that
defines the image from which to boot this instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices to be
attached to the instance.
"""
raise NotImplementedError()
def destroy(self, instance, network_info, block_device_info=None):
"""Destroy (shutdown and delete) the specified instance.
If the instance is not found (for example if networking failed), this
function should still succeed. It's probably a good idea to log a
warning in that case.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param block_device_info: Information about block devices that should
be detached from the instance.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reboot(self, instance, network_info, reboot_type):
"""Reboot the specified instance.
:param instance: Instance object as returned by DB layer.
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param reboot_type: Either a HARD or SOFT reboot
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_console_pool_info(self, console_type):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_console_output(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_vnc_console(self, instance):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_diagnostics(self, instance):
"""Return data about VM diagnostics"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_all_bw_usage(self, instances, start_time, stop_time=None):
"""Return bandwidth usage info for each interface on each
running VM"""
raise NotImplementedError()
def get_host_ip_addr(self):
"""
Retrieves the IP address of the dom0
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def attach_volume(self, connection_info, instance_name, mountpoint):
"""Attach the disk to the instance at mountpoint using info"""
raise NotImplementedError()
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach the disk attached to the instance"""
raise NotImplementedError()
def compare_cpu(self, cpu_info):
"""Compares given cpu info against host
Before attempting to migrate a VM to this host,
compare_cpu is called to ensure that the VM will
actually run here.
:param cpu_info: (str) JSON structure describing the source CPU.
:returns: None if migration is acceptable
:raises: :py:class:`~nova.exception.InvalidCPUInfo` if migration
is not acceptable.
"""
raise NotImplementedError()
def migrate_disk_and_power_off(self, context, instance, dest,
instance_type, network_info):
"""
Transfers the disk of a running instance in multiple phases, turning
off the instance before the end.
"""
raise NotImplementedError()
def snapshot(self, context, instance, image_id):
"""
Snapshots the specified instance.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param image_id: Reference to a pre-created image that will
hold the snapshot.
"""
raise NotImplementedError()
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance):
"""Completes a resize, turning on the migrated instance
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
:param image_meta: image object returned by nova.image.glance that
defines the image from which this instance
was created
"""
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, instance, network_info):
"""Finish reverting a resize, powering back on the instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def pause(self, instance):
"""Pause the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unpause(self, instance):
"""Unpause paused VM instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def suspend(self, instance):
"""suspend the specified instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume(self, instance):
"""resume the specified instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def rescue(self, context, instance, network_info, image_meta):
"""Rescue the specified instance"""
raise NotImplementedError()
def unrescue(self, instance, network_info):
"""Unrescue the specified instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def power_off(self, instance):
"""Power off the specified instance."""
raise NotImplementedError()
def power_on(self, instance):
"""Power on the specified instance"""
raise NotImplementedError()
def update_available_resource(self, ctxt, host):
"""Updates compute manager resource info on ComputeNode table.
This method is called when nova-compute launches, and
whenever admin executes "nova-manage service update_resource".
:param ctxt: security context
:param host: hostname that compute manager is currently running
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def live_migration(self, ctxt, instance_ref, dest,
post_method, recover_method):
"""Spawning live_migration operation for distributing high-load.
:param ctxt: security context
:param instance_ref:
nova.db.sqlalchemy.models.Instance object
instance object that is migrated.
:param dest: destination host
:param post_method:
post operation method.
expected nova.compute.manager.post_live_migration.
:param recover_method:
recovery method when any exception occurs.
expected nova.compute.manager.recover_live_migration.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
"""This method is called after a change to security groups.
All security groups and their associated rules live in the datastore,
and calling this method should apply the updated rules to instances
running the specified security group.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
"""This method is called when a security group is added to an instance.
This message is sent to the virtualization drivers on hosts that are
running an instance that belongs to a security group that has a rule
that references the security group identified by `security_group_id`.
It is the responsibility of this method to make sure any rules
that authorize traffic flow with members of the security group are
updated and any new members can communicate, and any removed members
cannot.
Scenario:
* we are running on host 'H0' and we have an instance 'i-0'.
* instance 'i-0' is a member of security group 'speaks-b'
* group 'speaks-b' has an ingress rule that authorizes group 'b'
* another host 'H1' runs an instance 'i-1'
* instance 'i-1' is a member of security group 'b'
When 'i-1' launches or terminates we will receive the message
to update members of group 'b', at which time we will make
any changes needed to the rules for instance 'i-0' to allow
or deny traffic coming from 'i-1', depending on if it is being
added or removed from the group.
In this scenario, 'i-1' could just as easily have been running on our
host 'H0' and this method would still have been called. The point was
that this method isn't called on the host where instances of that
group are running (as is the case with
:py:meth:`refresh_security_group_rules`) but is called where references
are made to authorizing those instances.
An error should be raised if the operation cannot complete.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_provider_fw_rules(self):
"""This triggers a firewall update based on database changes.
When this is called, rules have either been added or removed from the
datastore. You can retrieve rules with
:py:meth:`nova.db.provider_fw_rule_get_all`.
Provider rules take precedence over security group rules. If an IP
would be allowed by a security group ingress rule, but blocked by
a provider rule, then packets from the IP are dropped. This includes
intra-project traffic in the case of the allow_project_net_traffic
flag for the libvirt-derived classes.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reset_network(self, instance):
"""reset networking for specified instance"""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def ensure_filtering_rules_for_instance(self, instance_ref, network_info):
"""Setting up filtering rules and waiting for its completion.
To migrate an instance, filtering rules to hypervisors
and firewalls are inevitable on destination host.
( Waiting only for filtering rules to hypervisor,
since filtering rules to firewall rules can be set faster).
Concretely, the below method must be called.
- setup_basic_filtering (for nova-basic, etc.)
- prepare_instance_filter(for nova-instance-instance-xxx, etc.)
to_xml may have to be called since it defines PROJNET, PROJMASK.
but libvirt migrates those value through migrateToURI(),
so , no need to be called.
Don't use thread for this method since migration should
not be started when setting-up filtering rules operations
are not completed.
:params instance_ref: nova.db.sqlalchemy.models.Instance object
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def set_admin_password(self, context, instance_id, new_pass=None):
"""
Set the root password on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the value of the new password.
"""
raise NotImplementedError()
def inject_file(self, instance, b64_path, b64_contents):
"""
Writes a file on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the base64-encoded path to which the file is to be
written on the instance; the third is the contents of the file, also
base64-encoded.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def agent_update(self, instance, url, md5hash):
"""
Update agent on the specified instance.
The first parameter is an instance of nova.compute.service.Instance,
and so the instance is being specified as instance.name. The second
parameter is the URL of the agent to be fetched and updated on the
instance; the third is the md5 hash of the file for verification
purposes.
"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance"""
# TODO(Vek): Need to pass context in for access to auth_token
pass
def poll_rebooting_instances(self, timeout):
"""Poll for rebooting instances"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def poll_rescued_instances(self, timeout):
"""Poll for rescued instances"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def poll_unconfirmed_resizes(self, resize_confirm_window):
"""Poll for unconfirmed resizes."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def host_power_action(self, host, action):
"""Reboots, shuts down or powers up the host."""
raise NotImplementedError()
def host_maintenance_mode(self, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
raise NotImplementedError()
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def plug_vifs(self, instance, network_info):
"""Plug VIFs into networks."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unplug_vifs(self, instance, network_info):
"""Unplug VIFs from networks."""
raise NotImplementedError()
def update_host_status(self):
"""Refresh host stats"""
raise NotImplementedError()
def get_host_stats(self, refresh=False):
"""Return currently known host stats"""
raise NotImplementedError()
def list_disks(self, instance_name):
"""
Return the IDs of all the virtual disks attached to the specified
instance, as a list. These IDs are opaque to the caller (they are
only useful for giving back to this layer as a parameter to
disk_stats). These IDs only need to be unique for a given instance.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def list_interfaces(self, instance_name):
"""
Return the IDs of all the virtual network interfaces attached to the
specified instance, as a list. These IDs are opaque to the caller
(they are only useful for giving back to this layer as a parameter to
interface_stats). These IDs only need to be unique for a given
instance.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def resize(self, instance, flavor):
"""
Resizes/Migrates the specified instance.
The flavor parameter determines whether or not the instance RAM and
disk space are modified, and if so, to what size.
"""
raise NotImplementedError()
def block_stats(self, instance_name, disk_id):
"""
Return performance counters associated with the given disk_id on the
given instance_name. These are returned as [rd_req, rd_bytes, wr_req,
wr_bytes, errs], where rd indicates read, wr indicates write, req is
the total number of I/O requests made, bytes is the total number of
bytes transferred, and errs is the number of requests held up due to a
full pipeline.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def interface_stats(self, instance_name, iface_id):
"""
Return performance counters associated with the given iface_id on the
given instance_id. These are returned as [rx_bytes, rx_packets,
rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx
indicates receive, tx indicates transmit, bytes and packets indicate
the total number of bytes or packets transferred, and errs and dropped
is the total number of packets failed / dropped.
All counters are long integers.
This method is optional. On some platforms (e.g. XenAPI) performance
statistics can be retrieved directly in aggregate form, without Nova
having to do the aggregation. On those platforms, this method is
unused.
Note that this function takes an instance ID.
"""
raise NotImplementedError()
def legacy_nwinfo(self):
"""
Indicate if the driver requires the legacy network_info format.
"""
# TODO(tr3buchet): update all subclasses and remove this
return True
def manage_image_cache(self, context):
"""
Manage the driver's local image cache.
Some drivers chose to cache images for instances on disk. This method
is an opportunity to do management of that cache which isn't directly
related to other calls into the driver. The prime example is to clean
the cache and remove images which are no longer of interest.
"""
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
raise NotImplementedError()
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
raise NotImplementedError()
def get_volume_connector(self, instance):
"""Get connector information for the instance for attaching to volumes.
Connector information is a dictionary representing the ip of the
machine that will be making the connection, the name of the iscsi
initiator and the hostname of the machine as follows::
{
'ip': ip,
'initiator': initiator,
'host': hostname
}
"""
raise NotImplementedError()
|
|
"""===========================
Ancestral repeats pipeline
===========================
:Author: Andreas Heger
:Release: $Id: pipeline_ancestral_repeats.py 2876 2010-03-27 17:42:11Z andreas $
:Date: |today|
:Tags: Python
The ancestral repeats pipeline defines ancestral repeats for a pair of genomes
and computes rates for these.
This pipeline performs the following actions:
* collect repeatmasker annotation from external
databases. Currently implemented are:
* UCSC
* Ensembl
* build pairwise genomic alignment from axt or maf files
* define ancestral repeats
* compute rates of ancestral repeats
Usage
=====
See :ref:`PipelineSettingUp` and :ref:`PipelineRunning` on general
information how to use CGAT pipelines.
Configuration
-------------
The pipeline expects a :term:`query` and :term:`target` genome. These
should be set in the general section. For each genome there should
then be section on how to obtain the repeatmasker tracks. The default
configuration file gives an example.
Input
-----
The pipeline starts from an empty working directory. It will collect
the input data from directories specified in the configuration files.
The genomic alignment can both be build from :term:`axt` formatted
pairwise alignments and from :term:`maf` formatted multiple
alignments.
:term:`axt` formatted files (such as file:`chr1.hg19.mm10.net.axt.gz`)
are build by hierarchically from chains by selecting the
highest-scoring non-overlapping chains on top and then filling in the
gaps with lower scoring chains. A net is single-coverage for target
but not for query, unless it has been filtered to be single-coverage
on both target and query. Because it's single-coverage in the target,
it's no longer symmetrical. For ancestral repeat determination, use
:term:`axt` files that have been filtered to be single-coverage on
both query and target. By convention, the UCSC adds "rbest" to the net
filename in that case, such as: `hg19.panTro3.rbest.net.gz`.
:term:`maf` files currently only work if the :term:`query` genome is
the reference species in the maf files. This is a consequence of
:file:`maf2Axt` requiring that the strand of the reference species is
always positive and I have not figured out how to invert maf
alignments.
.. note::
ENSEMBL import is not thoroughly tested.
:term:`maf` formatted import is not thoroughly tested.
Type::
python pipeline_ancestral_repeats.py --help
for command line help.
Requirements
------------
Output
======
The pipeline builds the following files:
aligned_repeats.psl.gz
:term:`psl` formatted files of alignments between ancestral repeats
aligned_repeats.rates.gz
rates between ancestral repeats
alignment.psl.gz
:term:`psl` formatted genomic alignment between query and target.
<query>_rates.gff.gz
:term:`gff` formatted file of ancestral repeats on the query. The score field is set
to the estimated substitution rate of the repeat.
Example
=======
Example data is available at
http://www.cgat.org/~andreas/sample_data/pipeline_ancestral_repeats.tgz.
To run the example, simply unpack and untar::
wget http://www.cgat.org/~andreas/sample_data/pipeline_ancestral_repeats.tgz
tar -xvzf pipeline_ancestral_repeats.tgz
cd pipeline_ancestral_repeats
python <srcdir>/pipeline_ancestral_repeats.py make full
The example data builds ancestral repeats between human hg19:chr19 and
mouse mm9:chr7.
Code
====
"""
import sys
import os
import CGAT.Experiment as E
import logging as L
import CGATPipelines.PipelineUCSC as PipelineUCSC
from ruffus import *
###################################################
###################################################
###################################################
# Pipeline configuration
###################################################
import CGATPipelines.Pipeline as P
P.getParameters(
["%s/pipeline.ini" % os.path.splitext(__file__)[0],
"../pipeline.ini",
"pipeline.ini"],
defaults={
'query': "",
'target': ""})
PARAMS = P.PARAMS
if os.path.exists("pipeline_conf.py"):
L.info("reading additional configuration from pipeline_conf.py")
exec(compile(open("pipeline_conf.py").read(), "pipeline_conf.py", 'exec'))
def getGenomes():
'''return genome names of query and target.'''
genome_query = os.path.join(PARAMS["genome_dir"], PARAMS["query"])
genome_target = os.path.join(PARAMS["genome_dir"], PARAMS["target"])
return genome_query, genome_target
@files([("%s/%s.idx" % (PARAMS["genome_dir"], x), "%s.sizes" % x)
for x in (PARAMS["query"], PARAMS["target"])])
def buildSizes(infile, outfile):
'''extract size information from genomes.'''
outf = open(outfile, "w")
for line in open(infile):
data = line[:-1].split("\t")
if len(data) >= 4:
contig = data[0]
outf.write("%s\t%s\n" % (contig, data[3]))
outf.close()
if "axt_dir" in PARAMS:
# build pairwise alignment from axt formatted data.'''
@follows(buildSizes)
@merge("%s/*.axt.gz" % PARAMS["axt_dir"],
PARAMS["interface_alignment_psl"])
def buildGenomeAlignment(infiles, outfile):
'''build pairwise genomic aligment from axt files.'''
try:
os.remove(outfile)
except OSError:
pass
for infile in infiles:
E.info("adding %s" % infile)
statement = '''gunzip < %(infile)s
| axtToPsl
/dev/stdin
%(query)s.sizes
%(target)s.sizes
/dev/stdout
| pslSwap /dev/stdin /dev/stdout
| gzip >> %(outfile)s
'''
P.run()
elif "maf_dir" in PARAMS:
@follows(buildSizes)
@merge("%s/*.maf.gz" % PARAMS["maf_dir"], "alignment.raw.psl.gz")
def buildRawGenomeAlignment(infiles, outfile):
'''build pairwise genomic aligment from maf files.
'''
try:
os.remove(outfile)
except OSError:
pass
for infile in infiles:
# skip maf files without Hsap on top.
if "other" in infile or "supercontig" in infile:
continue
E.info("adding %s" % infile)
genome_query, genome_target = getGenomes()
statement = '''gunzip < %(infile)s
| cgat maf2psl
--query=%(maf_name_query)s
--target=%(maf_name_target)s
--log=%(outfile)s.log
| cgat psl2psl
--method=filter-fasta
--method=sanitize
--queries-tsv-file=%(genome_query)s
--target-psl-file=%(genome_target)s
--log=%(outfile)s.log
| gzip
>> %(outfile)s
'''
P.run()
@transform(buildRawGenomeAlignment,
suffix(".raw.psl.gz"),
".psl.gz")
def buildGenomeAlignment(infile, outfile):
'''remove non-unique alignments in genomic infile.'''
statement = '''gunzip < %(infile)s
| sort -k10,10 -k12,12n
| cgat psl2psl
--method=remove-overlapping-query
--log=%(outfile)s.log
| sort -k14,14 -k16,16n
| cgat psl2psl
--method=remove-overlapping-target
--log=%(outfile)s.log
| gzip
>> %(outfile)s
'''
P.run()
@follows(buildSizes)
@merge("%s/*.maf.gz" % PARAMS["maf_dir"], PARAMS["interface_alignment_psl"])
def buildGenomeAlignmentUCSCTools(infiles, outfile):
'''build pairwise genomic aligment from maf files.'''
try:
os.remove(outfile)
except OSError:
pass
for infile in infiles:
# skip maf files without Hsap on top.
if "other" in infile or "supercontig" in infile:
continue
E.info("adding %s" % infile)
genome_query, genome_target = getGenomes()
statement = '''gunzip < %(infile)s
| mafToAxt
/dev/stdin
%(maf_name_target)s
%(maf_name_query)s
/dev/stdout
-stripDb
| axtToPsl
/dev/stdin
%(target)s.sizes
%(query)s.sizes
/dev/stdout
| cgat psl2psl
--queries-tsv-file=%(genome_query)s
--target-psl-file=%(genome_target)s
--method=sanitize
| gzip
>> %(outfile)s
'''
P.run()
else:
raise ValueError(
"configuration error: please specify either maf_dir or axt_dir")
def importRepeatsFromUCSC(infile, outfile, ucsc_database, repeattypes, genome):
'''import repeats from a UCSC formatted file.
The repeats are stored as a :term:`gff` formatted file.
'''
repclasses = "','".join(repeattypes.split(","))
# Repeats are either stored in a single ``rmsk`` table (hg19) or in
# individual ``rmsk`` tables (mm9) like chr1_rmsk, chr2_rmsk, ....
# In order to do a single statement, the ucsc mysql database is
# queried for tables that end in rmsk.
dbhandle = PipelineUCSC.connectToUCSC(
host=PARAMS["ucsc_host"],
user=PARAMS["ucsc_user"],
database=ucsc_database)
cc = dbhandle.execute("SHOW TABLES LIKE '%%rmsk'")
tables = [x[0] for x in cc.fetchall()]
if len(tables) == 0:
raise ValueError("could not find any `rmsk` tables")
tmpfile = P.getTempFile(shared=True)
total_repeats = 0
for table in tables:
E.info("%s: loading repeats from %s" % (ucsc_database, table))
cc = dbhandle.execute(
"""SELECT genoName, 'repeat', 'exon', genoStart+1, genoEnd, '.',
strand, '.',
CONCAT('class \\"', repClass, '\\"; family \\"', repFamily, '\\";')
FROM %(table)s
WHERE repClass in ('%(repclasses)s') """ % locals())
n = 0
for data in cc.fetchall():
n += 1
tmpfile.write("\t".join(map(str, data)) + "\n")
E.info("%s: %s=%i repeats downloaded" % (ucsc_database, table, n))
total_repeats += n
if total_repeats == 0:
raise ValueErrror("did not find any repeats for %s" % ucsc_database)
tmpfile.close()
tmpfilename = tmpfile.name
statement = '''cat %(tmpfilename)s
| %(pipeline_scriptsdir)s/gff_sort pos
| cgat gff2gff
--method=sanitize
--sanitize-method=genome
--skip-missing
--genome-file=%(genome)s
--log=%(outfile)s.log
| gzip
> %(outfile)s
'''
P.run()
os.unlink(tmpfilename)
def importRepeatsFromEnsembl(infile, outfile,
ensembl_database,
repeattypes, genome):
'''import repeats from an ENSEMBL database.
'''
statement = '''
perl %(scriptsdir)s/ensembl_repeats2gff.pl
-h %(ensembl_host)s
-u %(ensembl_user)s
-p %(ensembl_password)s
-d %(ensembl_database)s
--repeattypes %(repeattypes)s
| %(pipeline_scriptsdir)s/gff_sort pos
| cgat gff2gff
--method=sanitize
--sanitize-method=genome
--skip-missing
--genome-file=%(genome)s
--log=%(outfile)s.log
| gzip
> %(outfile)s
'''
P.run()
@jobs_limit(1, "UCSC")
@files([(None, "%s_repeats.gff.gz" % x, x)
for x in (PARAMS["query"], PARAMS["target"])])
def importRepeats(infile, outfile, track):
'''import repeats from external sources.'''
source = PARAMS["%s_source" % track]
genome = os.path.join(PARAMS["genome_dir"], track)
if source == "ensembl":
importRepeatsFromEnsembl(infile, outfile,
PARAMS["%s_database" % track],
repeattypes=PARAMS["%s_repeattypes" % track],
genome=genome)
elif source == "ucsc":
importRepeatsFromUCSC(infile, outfile,
PARAMS["%s_database" % track],
repeattypes=PARAMS["%s_repeattypes" % track],
genome=genome)
@transform(importRepeats,
suffix("_repeats.gff.gz"),
"_merged.gff.gz")
def mergeRepeats(infile, outfile):
'''merge adjacent repeats.'''
statement = '''gunzip
< %(infile)s
| cgat gff2gff
--method=merge-features
--min-distance=0
--max-distance=10
--min-features=0
--max-features=0
--log=%(outfile)s.log
| gzip
> %(outfile)s
'''
P.run()
@follows(buildGenomeAlignment)
@merge(mergeRepeats, "aligned_repeats.psl.gz")
def buildAlignedRepeats(infiles, outfile):
'''build alignment between repeats.
'''
infile_target = PARAMS["target"] + "_merged.gff.gz"
infile_query = PARAMS["query"] + "_merged.gff.gz"
# using farm.py to send to cluster
# granularity should be set automatically.
granularity = 5000
# need to escape pipe symbols within farm.py command
# statement = r'''
# gunzip < %(interface_alignment_psl)s
# | %(cmd-farm)s --split-at-lines=%(granularity)i --log=%(outfile)s.log --is-binary
# "cgat psl2psl
# --method=test
# --log=%(outfile)s.log
# | cgat psl2psl
# --method=map
# --filter-query=%(infile_query)s
# --filter-target=%(infile_target)s
# --log=%(outfile)s.log "
# | gzip
# > %(outfile)s'''
# P.run()
statement = '''
gunzip < %(interface_alignment_psl)s
| cgat psl2psl
--method=test
--log=%(outfile)s.log
| cgat psl2psl
--method=map
--filter-query=%(infile_query)s
--filter-target=%(infile_target)s
--log=%(outfile)s.log
| gzip
> %(outfile)s'''
P.run()
########################################################
########################################################
########################################################
@files(buildAlignedRepeats, "aligned_repeats.rates.gz")
def buildRepeatsRates(infile, outfile):
'''compute rates for individual aligned repeats.'''
genome_query, genome_target = getGenomes()
statement = '''gunzip < %(infile)s
| sort -k10,10 -k14,14 -k9,9 -k12,12n
| %(cmd-farm)s --split-at-lines=10000 --output-header --log=%(outfile)s.log
"cgat psl2psl
--log=%(outfile)s.log
--method=add-sequence
--queries-tsv-file=%(genome_query)s
--target-psl-file=%(genome_target)s
| cgat psl2table
--method=query-counts
--method=baseml
--baseml-model=REV"
| gzip > %(outfile)s
'''
P.run()
@transform((buildAlignedRepeats, buildGenomeAlignment),
suffix(".psl.gz"),
".stats")
def computeAlignmentStats(infile, outfile):
'''compute alignment coverage statistics'''
statement = '''
gunzip < %(infile)s
| cgat psl2stats
--log=%(outfile)s.log
> %(outfile)s'''
P.run()
########################################################
########################################################
########################################################
@transform(mergeRepeats, suffix(".gff.gz"), ".stats")
def computeRepeatsCounts(infile, outfile):
'''count number and type of repeats.'''
pass
# %_repeats_counts.stats: ucsc_%_repeats.table.gz
# $(PRELOG)
# @gunzip < $< | pe "s/#//" |\
# csv_cut genoName genoStart genoEnd repName repClass repFamily |\
# awk '/genoName/ {printf("%s\t%s\n", $$5, "length"); next;} {printf("%s\t%i\n", $$5, $$3-$$2); } ' |\
# t2t --group=1 --group-function=stats > $@
# $(EPILOG)
########################################################
########################################################
########################################################
@transform(mergeRepeats,
suffix("_merged.gff.gz"),
"_repeats_sizes.stats")
def buildRepeatDistribution(infile, outfile):
'''count size and distance distribution of repeats.'''
statement = '''gunzip
< %(infile)s
| cgat gff2histogram
--output-filename-pattern="%(outfile)s.%%s"
--method=all
> %(outfile)s
'''
P.run()
########################################################
########################################################
########################################################
@files(buildRepeatsRates, PARAMS["interface_rates_query_gff"])
def exportRatesAsGFF(infile, outfile):
'''export gff file with rate as score.'''
statement = '''gunzip
< %(infile)s
| cgat csv_cut qName qStart qEnd distance converged
| awk '!/qName/ && $5 {printf("%%s\\tancestral_repeat\\texon\\t%%s\\t%%s\\t%%s\\t+\\t.\\t.\\n", $1, $2, $3, $4);}'
| gzip
> %(outfile)s
'''
P.run()
@follows(importRepeats,
mergeRepeats,
buildAlignedRepeats,
buildRepeatsRates,
buildRepeatDistribution,
computeAlignmentStats,
computeRepeatsCounts,
exportRatesAsGFF,
)
def full():
pass
###################################################################
###################################################################
###################################################################
# primary targets
###################################################################
@follows(mkdir("report"))
def build_report():
'''build report from scratch.'''
E.info("starting report build process from scratch")
P.run_report(clean=True)
@follows(mkdir("report"))
def update_report():
'''update report.'''
E.info("updating report")
P.run_report(clean=False)
@follows(update_report)
def publish_report():
'''publish report.'''
E.info("publishing report")
P.publish_report()
if __name__ == "__main__":
sys.exit(P.main(sys.argv))
|
|
#!/usr/bin/python
import urllib
import urllib2
from optparse import OptionParser
import json
import os.path
import time
import re
from datetime import datetime, timedelta
import sys
BASEurl = "https://api.xforce.ibmcloud.com/"
sys.path.append('./')
import iprep_conf as IC
yesterday = datetime.now() - timedelta(days=1)
YEST = yesterday.strftime('20%y-%m-%dT00:00:00Z')
headers = {"Authorization": "Basic %s " % IC.xfex_cred,
"Accept": "application/json",
'User-Agent': 'Mozilla 5.0'}
def getPAM(text):
furl = BASEurl + "signatures/fulltext?q=%s" % text.strip()
request = urllib2.Request(furl, None, headers)
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
return data2["rows"][0]["pamid"].strip()
def getXFD_fromCVE(cve):
furl = BASEurl + "vulnerabilities/search/%s" % cve.strip()
request = urllib2.Request(furl, None, headers)
try:
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
return data2[0]["xfdbid"]
except:
return "Not found"
def getXFD(pamid):
furl = BASEurl + "signatures/%s" % pamid
request = urllib2.Request(furl, None, headers)
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
return data2["protects_against"]["xfdbid"]
def getFull(xfid):
if xfid == "Not found":
return xfid
furl = BASEurl + "vulnerabilities/%s" % xfid
request = urllib2.Request(furl, None, headers)
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
list = []
return [data2[u"description"], data2[u"risk_level"], data2[u"platforms_affected"], data2[u"stdcode"]]
def getCase_attachments(Caseid):
furl = BASEurl + "casefiles/%s/attachments" % Caseid
request = urllib2.Request(furl, None, headers)
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
return data2
def getTrend(ip):
#try:
furl = BASEurl + "ipr/%s" % ip
request = urllib2.Request(furl, None, headers)
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
date = []
score = []
for item in data2["history"]:
date.append(item["created"])
score.append(item["score"])
return (date, score)
#except:
#return [str(data2), "Ups", "Ups", "ups"]
def getip(ip):
try:
furl = BASEurl + "ipr/%s" % ip
furl2 = BASEurl + "ipr/malware/%s" % ip
request = urllib2.Request(furl, None, headers)
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
request = urllib2.Request(furl2, None, headers)
data = urllib2.urlopen(request)
data3 = json.loads(data.read())
merged_dict = {key: value for (key, value) in (data2.items() + data3.items())}
return merged_dict
#return str(data2)
#return [data2[u"history"][0]["geo"]["country"], data2[u"score"], data2[u"reason"], data2[u"categoryDescriptions"]]
except:
return [str(data2), "Ups", "Ups", "ups"]
def ixf_IPtoWeb(ip):
dataset = getip(ip)
return '''<ul><b> %s </b>
<li>Description: %s</li>
<li>Score: %s</li>
<li>Geo Location: %s</li></ul>''' % (ip, str(dataset[3]), str(dataset[1]), str(dataset[0]))
def ixf_m(text):
return 0
def ixf_cve_forWeb(cve):
result = getFull(getXFD_fromCVE(cve))
print result
return '''<ul><b> %s </b>
<li>Description: %s</li>
<li>Risk_level: %s</li>
<li>Affected: %s</li>
<li>STDcode: %s</li></ul>''' % (cve, result[0], result[1] , str(", ".join(result[2])),str(", ".join(result[3])) )
def ixf_s(text):
return getFull(getXFD(getPAM(text)))
def ixf_forWeb(text):
result = ixf_s(text.strip(","))
return '''<ul><b> %s </b>
<li>Description: %s</li>
<li>Risk_level: %s</li>
<li>Affected: %s</li>
<li>STDcode: %s</li></ul>''' % (text, result[0], result[1] , str(", ".join(result[2])),str(", ".join(result[3])) )
def getVulnr():
try:
furl = BASEurl + "vulnerabilities"
request = urllib2.Request(furl, None, headers)
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
return data2
except:
return {"IBM XForce Exchange" : "No Data"}
def getURL(url):
try:
furl = BASEurl + "url/%s" % url
request = urllib2.Request(furl, None, headers)
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
return data2
except:
return {"IBM XForce Exchange" : "No Data"}
def getURLm(url):
try:
furl = BASEurl + "url/malware/%s" % url
request = urllib2.Request(furl, None, headers)
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
return data2
except:
return {"IBM XForce Exchange" : "No Data"}
def getCase_Shared():
furl = BASEurl + "casefiles/shared"
request = urllib2.Request(furl, None, headers)
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
return data2
def getCase_Public():
furl = BASEurl + "casefiles/public"
request = urllib2.Request(furl, None, headers)
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
return data2
def getXPUdir():
furl = BASEurl + "signatures/xpu/directory"
request = urllib2.Request(furl, None, headers)
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
return data2
def getCase_by_Group(groupID):
furl = BASEurl + "user/groups/%s/casefiles" % groupID
request = urllib2.Request(furl, None, headers)
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
return data2
def getxfid_fromMS(msid):
furl = BASEurl + "vulnerabilities/msid/%s" % msid
request = urllib2.Request(furl, None, headers)
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
return data2
def getmsid(msid):
try:
furl = BASEurl + "vulnerabilities/msid/%s" % msid
request = urllib2.Request(furl, None, headers)
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
return data2
except:
return {"IBM XForce Exchange" : "No Data"}
def getMalw(hash):
try:
furl = BASEurl + "malware/%s" % hash
request = urllib2.Request(furl, None, headers)
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
return data2
except:
return {"IBM XForce Exchange" : "No Data"}
def getDAMN(id):
try:
furl = BASEurl + "casefiles/%s/attachments" % id
request = urllib2.Request(furl, None, headers)
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
iplist = set()
for item in data2["attachments"]:
if "IP" in item["report"]["type"]:
iplist.add(item["report"]["title"])
return iplist
except:
return {"IBM XForce Exchange" : "No Data"}
#- Spam
#- Anonymisation Services
#- Scanning IPs
#- Dynamic IPs
#- Malware
#- Bots
#- Botnet Command and Control Server
def intrIPs(cat=None):
dcata = { "1" : "Spam",
"2" : "Anonymisation Services",
"3" : "Scanning IPs",
"4" : "Dynamic IPs",
"5" : "Malware",
"6" : "Bots",
"7" : "Botnet Command and Control Server"}
if cat is None:
cata = dcata["7"]
size = 45
else:
cata = dcata[cat]
size = 45
datar = dict()
furl = BASEurl + "ipr?category=%s&startDate=%s&limit=%s" % (urllib.quote_plus(cata), YEST, size)
request = urllib2.Request(furl, None, headers)
try:
data = urllib2.urlopen(request)
data2 = json.loads(data.read())
datar1 = dict(datar.items() + data2.items())
datar = datar1
except:
return {"IBM XForce Exchange" : "No Data"}
return datar
def extractIP(text):
ip = re.compile(r"\b(?:(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(?:25[0-5]|2[0-4]\d|[01]?\d\d?)\b")
return ip.findall(text)[0]
parser = OptionParser()
parser.add_option("-c", "--coll", dest="coll" , default=None,
help="Get a file of Public Collection IPS", metavar="filename")
parser.add_option("-i", "--ip", dest="ip" , default=None,
help="get IP intel", metavar="IP_Address")
parser.add_option("-s", "--sstr", dest="s_string" , default=None,
help="Get a file of Public Collection IPS", metavar="filename")
parser.add_option("-m", "--multiple", dest="s_PAMFILE" , default=None,
help="file of signature list, one PAM_Name per line", metavar="filename")
parser.add_option("-p", "--pam", dest="s_PAMSTRING" , default=None,
help="PAM string to be checked", metavar="single PAM_Name")
(options, args) = parser.parse_args()
HOMEfolder = os.path.dirname(os.path.realpath(__file__))
url = "https://api.xforce.ibmcloud.com"
if( options.s_string is not None ):
result = ixf_s(options.s_string.strip(","))
print "PAM_Name: %s\nDescription: %s\nRisk_level: %s \n\nAffected: %s\n\nSTDcode: %s\n" % (options.s_string.strip(","), result[0], result[1] ,
str(", ".join(result[2])), str(",".join(result[3])))
elif options.coll is not None:
outfile = open(options.coll,"wb")
outfile.write("## "+str(datetime.datetime.utcnow())+"\n")
outfile.write(getCase_Public())
outfile.close()
print "XForce Public Collections IP list updated and saved as %s" % options.coll
elif options.ip is not None:
print getip(options.ip)
elif ( options.s_PAMFILE is not None ):
fili = open(options.s_filename, "rb")
ofili = open(options.s_filename+"_OUT", "wb")
ofili.writelines("PAM_Name, Description, Risk_Score, Affected_Systems, STDcode")
for line in fili.readlines():
print line.strip(",")
result = ixf_s(line.strip(","))
ofili.writelines("%s,%s,%s,%s,%s" % (line.strip(","), result[0].replace(",", " "), result[1] , str(";".join(result[2])), str("; ".join(result[3]))))
time.sleep(5)
elif( options.s_PAMSTRING is not None ):
result = ixf_s(options.s_string.strip(","))
print "PAM_Name: %s\nDescription: %s\nRisk_level: %s \n\nAffected: %s\n\nSTDcode: %s\n" % (options.s_string.strip(","), result[0], result[1] , str(", ".join(result[2])), str(",".join(result[3])))
elif ( options.s_PAMFILE is not None ):
fili = open(options.s_filename, "rb")
ofili = open(options.s_filename+"_OUT", "wb")
ofili.writelines("PAM_Name, Description, Risk_Score, Affected_Systems, STDcode")
for line in fili.readlines():
print line.strip(",")
result = ixf_s(line.strip(","))
ofili.writelines("%s,%s,%s,%s,%s" % (line.strip(","), result[0].replace(",", " "), result[1] , str(";".join(result[2])), str("; ".join(result[3]))))
time.sleep(5)
|
|
# -*- coding: utf-8 -*-
import pytest
from thriftpy.thrift import TType
from thriftpy.parser import load
from thriftpy.parser.exc import ThriftParserError, ThriftGrammerError
def test_comments():
load('parser-cases/comments.thrift')
def test_constants():
thrift = load('parser-cases/constants.thrift')
assert thrift.int16 == 3
assert thrift.int32 == 800
assert thrift.int64 == 123456789
assert thrift.tstr == 'hello world'
assert thrift.integer32 == 900
assert thrift.tdouble == 1.3
assert thrift.tlist == [1, 2, 3]
assert thrift.tset == set([1, 2, 3])
assert thrift.tmap1 == {'key': 'val'}
assert thrift.tmap2 == {'key': 32}
assert thrift.my_country == 4
assert thrift.tom == thrift.Person(name='tom')
assert thrift.country_map == {1: 'US', 2: 'UK', 3: 'CA', 4: 'CN'}
def test_include():
thrift = load('parser-cases/include.thrift', include_dirs=[
'./parser-cases'])
assert thrift.datetime == 1422009523
def test_tutorial():
thrift = load('parser-cases/tutorial.thrift', include_dirs=[
'./parser-cases'])
assert thrift.INT32CONSTANT == 9853
assert thrift.MAPCONSTANT == {'hello': 'world', 'goodnight': 'moon'}
assert thrift.Operation.ADD == 1 and thrift.Operation.SUBTRACT == 2 \
and thrift.Operation.MULTIPLY == 3 and thrift.Operation.DIVIDE == 4
work = thrift.Work()
assert work.num1 == 0 and work.num2 is None and work.op is None \
and work.comment is None
assert set(thrift.Calculator.thrift_services) == set([
'ping', 'add', 'calculate', 'zip', 'getStruct'])
def test_e_type_error():
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_type_error_0.thrift')
assert 'Type error' in str(excinfo.value)
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_type_error_1.thrift')
assert 'Type error' in str(excinfo.value)
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_type_error_2.thrift')
assert 'Type error' in str(excinfo.value)
def test_value_ref():
thrift = load('parser-cases/value_ref.thrift')
assert thrift.container == {'key': [1, 2, 3]}
assert thrift.lst == [39, 899, 123]
def test_type_ref():
thrift = load('parser-cases/type_ref.thrift')
assert thrift.jerry == thrift.type_ref_shared.Writer(
name='jerry', age=26, country=thrift.type_ref_shared.Country.US)
assert thrift.book == thrift.type_ref_shared.Book(name='Hello World',
writer=thrift.jerry)
def test_e_value_ref():
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_value_ref_0.thrift')
assert excinfo.value
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_value_ref_1.thrift')
assert str(excinfo.value) == ('Couldn\'t find a named value in enum Lang '
'for value 3')
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_value_ref_2.thrift')
assert str(excinfo.value) == \
'No enum value or constant found named \'Cookbook\''
def test_enums():
thrift = load('parser-cases/enums.thrift')
assert thrift.Lang.C == 0
assert thrift.Lang.Go == 1
assert thrift.Lang.Java == 2
assert thrift.Lang.Javascript == 3
assert thrift.Lang.PHP == 4
assert thrift.Lang.Python == 5
assert thrift.Lang.Ruby == 6
assert thrift.Country.US == 1
assert thrift.Country.UK == 2
assert thrift.Country.CN == 3
assert thrift.OS.OSX == 0
assert thrift.OS.Win == 3
assert thrift.OS.Linux == 4
def test_structs():
thrift = load('parser-cases/structs.thrift')
assert thrift.Person.thrift_spec == {
1: (TType.STRING, 'name', False),
2: (TType.STRING, 'address', False)
}
assert thrift.Person.default_spec == [
('name', None), ('address', None)
]
assert thrift.Email.thrift_spec == {
1: (TType.STRING, 'subject', False),
2: (TType.STRING, 'content', False),
3: (TType.STRUCT, 'sender', thrift.Person, False),
4: (TType.STRUCT, 'recver', thrift.Person, True),
}
assert thrift.Email.default_spec == [
('subject', 'Subject'), ('content', None),
('sender', None), ('recver', None)
]
assert thrift.email == thrift.Email(
subject='Hello',
content='Long time no see',
sender=thrift.Person(name='jack', address='[email protected]'),
recver=thrift.Person(name='chao', address='[email protected]')
)
def test_e_structs():
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_structs_0.thrift')
assert str(excinfo.value) == \
'Field \'name\' was required to create constant for type \'User\''
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_structs_1.thrift')
assert str(excinfo.value) == \
'No field named \'avatar\' was found in struct of type \'User\''
def test_service():
thrift = load('parser-cases/service.thrift')
assert thrift.EmailService.thrift_services == ['ping', 'send']
assert thrift.EmailService.ping_args.thrift_spec == {}
assert thrift.EmailService.ping_args.default_spec == []
assert thrift.EmailService.ping_result.thrift_spec == {
1: (TType.STRUCT, 'network_error', thrift.NetworkError, False)
}
assert thrift.EmailService.ping_result.default_spec == [
('network_error', None)
]
assert thrift.EmailService.send_args.thrift_spec == {
1: (TType.STRUCT, 'recver', thrift.User, False),
2: (TType.STRUCT, 'sender', thrift.User, False),
3: (TType.STRUCT, 'email', thrift.Email, False),
}
assert thrift.EmailService.send_args.default_spec == [
('recver', None), ('sender', None), ('email', None)
]
assert thrift.EmailService.send_result.thrift_spec == {
0: (TType.BOOL, 'success', False),
1: (TType.STRUCT, 'network_error', thrift.NetworkError, False)
}
assert thrift.EmailService.send_result.default_spec == [
('success', None), ('network_error', None)
]
def test_service_extends():
thrift = load('parser-cases/service_extends.thrift')
assert thrift.PingService.thrift_services == ['ping', 'getStruct']
def test_e_service_extends():
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_service_extends_0.thrift')
assert 'Can\'t find service' in str(excinfo.value)
def test_e_dead_include():
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_dead_include_0.thrift')
assert 'Dead including' in str(excinfo.value)
def test_e_grammer_error_at_eof():
with pytest.raises(ThriftGrammerError) as excinfo:
load('parser-cases/e_grammer_error_at_eof.thrift')
assert str(excinfo.value) == 'Grammer error at EOF'
def test_e_use_thrift_reserved_keywords():
with pytest.raises(ThriftParserError) as excinfo:
load('parser-cases/e_use_thrift_reserved_keywords.thrift')
assert 'Cannot use reserved language keyword' in str(excinfo.value)
def test_e_duplicate_field_id_or_name():
with pytest.raises(ThriftGrammerError) as excinfo:
load('parser-cases/e_duplicate_field_id.thrift')
assert 'field identifier/name has already been used' in str(excinfo.value)
with pytest.raises(ThriftGrammerError) as excinfo:
load('parser-cases/e_duplicate_field_name.thrift')
assert 'field identifier/name has already been used' in str(excinfo.value)
def test_thrift_meta():
thrift = load('parser-cases/tutorial.thrift')
meta = thrift.__thrift_meta__
assert meta['consts'] == [thrift.INT32CONSTANT, thrift.MAPCONSTANT]
assert meta['enums'] == [thrift.Operation]
assert meta['structs'] == [thrift.Work]
assert meta['exceptions'] == [thrift.InvalidOperation]
assert meta['services'] == [thrift.Calculator]
assert meta['includes'] == [thrift.shared]
|
|
from __future__ import print_function
from collections import defaultdict
import datetime
import fnmatch
import glob
import json
import os
import random
import re
import shlex
import textwrap
import networkx as nx
from django.conf import settings
from django.core.management import call_command as django_call_command
from gcutils.storage import Client as StorageClient
from openprescribing.slack import notify_slack
from openprescribing.utils import find_files
from .models import TaskLog
class Source(object):
def __init__(self, name, attrs):
self.name = name
self.title = attrs['title']
self.data_dir = os.path.join(
settings.PIPELINE_DATA_BASEDIR,
attrs.get('data_dir', name)
)
self.publisher = attrs.get('publisher')
self.publication_schedule = attrs.get('publication_schedule')
self.publication_lag = attrs.get('publication_lag')
self.notes = attrs.get('notes')
self.index_url = attrs.get('index_url')
self.urls = attrs.get('urls')
self.tasks = TaskCollection()
def add_task(self, task):
self.tasks.add(task)
def tasks_that_use_raw_source_data(self):
tasks = self.tasks.by_type('convert')
if not tasks:
tasks = self.tasks.by_type('import')
return tasks
class SourceCollection(object):
def __init__(self, source_data):
self._sources = {
name: Source(name, attrs)
for name, attrs in source_data.items()
}
def __getitem__(self, name):
return self._sources[name]
class Task(object):
def __init__(self, name, attrs):
self.name = name
self.task_type = attrs['type']
if self.task_type == 'post_process':
self.source_id = None
else:
self.source_id = attrs['source_id']
if self.task_type != 'manual_fetch':
self.command = attrs['command']
if self.task_type not in ['manual_fetch', 'auto_fetch']:
self.dependency_names = attrs['dependencies']
else:
self.dependency_names = []
def set_source(self, source):
self.source = source
def resolve_dependencies(self, task_collection):
self.dependencies = [
task_collection[name]
for name in self.dependency_names
]
def filename_pattern(self):
'''Return pattern that matches the part of the task's command that
should be substituted for the task's input filename.'''
filename_flags = [
'filename',
'ccg',
'epraccur',
'chem_file',
'hscic_address',
'month_from_prescribing_filename',
'zip_path',
]
cmd_parts = shlex.split(self.command.encode('unicode-escape'))
filename_idx = None
for flag in filename_flags:
try:
filename_idx = cmd_parts.index("--%s" % flag) + 1
except ValueError:
pass
assert filename_idx is not None
return cmd_parts[filename_idx]
def imported_paths(self):
'''Return a list of import records for all imported data for this
task.'''
records = load_import_records()
records_for_source = records[self.source.name]
pattern = self.filename_pattern()
matched_records = [
record for record in records_for_source
if path_matches_pattern(record['imported_file'], pattern)
]
sorted_records = sorted(
matched_records,
key=lambda record: record['imported_at']
)
return [record['imported_file'] for record in sorted_records]
def input_paths(self):
'''Return list of of paths to input files for task.'''
paths = glob.glob("%s/*/*" % self.source.data_dir)
return sorted(
path for path in paths
if path_matches_pattern(path, self.filename_pattern())
)
def set_last_imported_path(self, path):
'''Set the path of the most recently imported data for this source.'''
now = datetime.datetime.now().replace(microsecond=0).isoformat()
records = load_import_records()
records[self.source.name].append({
'imported_file': path,
'imported_at': now,
})
dump_import_records(records)
def unimported_paths(self):
'''Return list of of paths to input files for task that have not been
imported.'''
imported_paths = [record for record in self.imported_paths()]
return [
path for path in self.input_paths()
if path not in imported_paths
]
class ManualFetchTask(Task):
def run(self, year, month):
print('Running manual fetch task {}'.format(self.name))
instructions = self.manual_fetch_instructions()
print(instructions)
paths_before = find_files(self.source.data_dir)
raw_input('Press return when done, or to skip this step')
paths_after = find_files(self.source.data_dir)
new_paths = [path for path in paths_after if path not in paths_before]
if new_paths:
print('The following files have been manually fetched:')
for path in new_paths:
print(' * {}'.format(path))
else:
print('No new files were found at {}'.format(self.source.data_dir))
raw_input('Press return to confirm, or Ctrl+C to cancel '
'and resolve any problems')
def manual_fetch_instructions(self):
source = self.source
expected_location = os.path.join(
settings.PIPELINE_DATA_BASEDIR,
source.name,
'YYYY_MM',
)
output = []
output.append('~' * 80)
output.append('You should now locate the latest data for %s, if '
'available' % source.name)
output.append('You should save it at:')
output.append(' %s' % expected_location)
if source.index_url:
output.append('Where to look:')
output.append(' %s' % source.index_url)
if source.urls:
output.append('Previous data has been found at:')
for k, v in source.urls.items():
output.append(' %s: %s' % (k, v))
if source.publication_schedule:
output.append('Publication frequency:')
output.append(' %s' % source.publication_schedule)
if source.notes:
output.append('Notes:')
for line in textwrap.wrap(source.notes):
output.append(' %s' % line)
output.append('The last imported data can be found at:')
for task in source.tasks_that_use_raw_source_data():
paths = task.imported_paths()
if paths:
path = paths[-1]
else:
path = '<never imported>'
output.append(' %s' % path)
return '\n'.join(output)
class AutoFetchTask(Task):
def run(self, year, month):
print('Running auto fetch task {}'.format(self.name))
command = self.command.format(year=year, month=month)
tokens = shlex.split(command)
call_command(*tokens)
class ConvertTask(Task):
def run(self, year, month):
# For now, year and month are ignored
print('Running convert task {}'.format(self.name))
unimported_paths = self.unimported_paths()
for path in unimported_paths:
command = self.command.replace(self.filename_pattern(), path)
tokens = shlex.split(command)
call_command(*tokens)
self.set_last_imported_path(path)
class ImportTask(Task):
def run(self, year, month):
# For now, year and month are ignored
print('Running import task {}'.format(self.name))
unimported_paths = self.unimported_paths()
for path in unimported_paths:
command = self.command.replace(self.filename_pattern(), path)
tokens = shlex.split(command)
call_command(*tokens)
self.set_last_imported_path(path)
class PostProcessTask(Task):
def run(self, year, month, last_imported):
# For now, year and month are ignored
command = self.command.format(last_imported=last_imported)
tokens = shlex.split(command)
call_command(*tokens)
class TaskCollection(object):
task_type_to_cls = {
'manual_fetch': ManualFetchTask,
'auto_fetch': AutoFetchTask,
'convert': ConvertTask,
'import': ImportTask,
'post_process': PostProcessTask,
}
def __init__(self, task_data=None, ordered=False, task_type=None):
self._tasks = {}
if isinstance(task_data, dict):
for name, attrs in task_data.items():
cls = self.task_type_to_cls[attrs['type']]
task = cls(name, attrs)
self.add(task)
elif isinstance(task_data, list):
for task in task_data:
self.add(task)
self._ordered = ordered
self._type = task_type
def add(self, task):
self._tasks[task.name] = task
def __getitem__(self, name):
return self._tasks[name]
def __iter__(self):
if self._ordered:
graph = nx.DiGraph()
for task in self._tasks.values():
graph.add_node(task)
for dependency in task.dependencies:
graph.add_node(dependency)
graph.add_edge(dependency, task)
tasks = nx.topological_sort(graph)
else:
tasks = [task for _, task in sorted(self._tasks.items())]
for task in tasks:
if self._type is None:
yield task
else:
if self._type == task.task_type:
yield task
def __nonzero__(self):
if self._type:
return any(task for task in self if task.task_type == self._type)
else:
return bool(self._tasks)
def by_type(self, task_type):
return TaskCollection(list(self), ordered=self._ordered,
task_type=task_type)
def ordered(self):
return TaskCollection(list(self), ordered=True, task_type=self._type)
def load_tasks():
metadata_path = settings.PIPELINE_METADATA_DIR
with open(os.path.join(metadata_path, 'sources.json')) as f:
source_data = json.load(f)
sources = SourceCollection(source_data)
with open(os.path.join(metadata_path, 'tasks.json')) as f:
task_data = json.load(f)
tasks = TaskCollection(task_data)
for task in tasks:
if task.source_id is None:
task.set_source(None)
else:
source = sources[task.source_id]
task.set_source(source)
source.add_task(task)
task.resolve_dependencies(tasks)
return tasks
def load_import_records():
with open(settings.PIPELINE_IMPORT_LOG_PATH) as f:
log_data = json.load(f)
return defaultdict(list, log_data)
def dump_import_records(records):
with open(settings.PIPELINE_IMPORT_LOG_PATH, 'w') as f:
json.dump(records, f, indent=2, separators=(',', ': '))
def upload_all_to_storage(tasks):
for task in tasks.by_type('convert'):
upload_task_input_files(task)
for task in tasks.by_type('import'):
upload_task_input_files(task)
def upload_task_input_files(task):
storage_client = StorageClient()
bucket = storage_client.get_bucket()
for path in task.input_paths():
assert path[0] == '/'
assert settings.PIPELINE_DATA_BASEDIR[-1] == '/'
name = 'hscic' + path.replace(settings.PIPELINE_DATA_BASEDIR, '/')
blob = bucket.blob(name)
if blob.exists():
print("Skipping %s, already uploaded" % name)
continue
print("Uploading %s to %s" % (path, name))
with open(path) as f:
blob.upload_from_file(f)
def path_matches_pattern(path, pattern):
return fnmatch.fnmatch(os.path.basename(path), pattern)
def call_command(*args):
print('call_command {}'.format(args))
return django_call_command(*args)
def run_task(task, year, month, **kwargs):
if TaskLog.objects.filter(
year=year,
month=month,
task_name=task.name,
status=TaskLog.SUCCESSFUL,
).exists():
# This task has already been run successfully
return
task_log = TaskLog.objects.create(
year=year,
month=month,
task_name=task.name,
)
try:
task.run(year, month, **kwargs)
task_log.mark_succeeded()
except:
# We want to catch absolutely every error here, including things that
# wouldn't be caught by `except Exception` (like `KeyboardInterrupt`),
# since we want to log that the task didn't complete.
import traceback
task_log.mark_failed(formatted_tb=traceback.format_exc())
msg = 'Importing data for {}_{} has failed when running {}.'.format(
year, month, task.name)
notify_slack(msg)
raise
def run_all(year, month, under_test=False):
tasks = load_tasks()
if not under_test:
for task in tasks.by_type('manual_fetch'):
run_task(task, year, month)
for task in tasks.by_type('auto_fetch'):
run_task(task, year, month)
upload_all_to_storage(tasks)
for task in tasks.by_type('convert').ordered():
run_task(task, year, month)
for task in tasks.by_type('import').ordered():
run_task(task, year, month)
prescribing_path = tasks['import_hscic_prescribing'].imported_paths()[-1]
last_imported = re.findall(r'/(\d{4}_\d{2})/', prescribing_path)[0]
for task in tasks.by_type('post_process').ordered():
if under_test and 'smoketest' in task.name:
# Smoketests run against live site, so we should skip when running
# under test
continue
run_task(task, year, month, last_imported=last_imported)
TaskLog.objects.create(
year=year,
month=month,
task_name='fetch_and_import',
status=TaskLog.SUCCESSFUL,
)
activity = random.choice([
'Put the kettle on',
'Have a glass of wine',
'Get up and stretch',
])
msg = '''
Importing data for {}_{} complete!'
You should now:
* Tweet about it
* Commit the changes to the smoke tests
* {}
(Details: https://github.com/ebmdatalab/openprescribing/wiki/Importing-data)
'''.strip().format(year, month, activity)
if not under_test:
notify_slack(msg)
def in_progress():
try:
current_year, current_month = TaskLog.objects.\
values_list('year', 'month').\
distinct().\
order_by('-year', '-month')[0]
except IndexError:
# In development there might be no TaskLog objects.
return False
return not TaskLog.objects.filter(
year=current_year,
month=current_month,
task_name='fetch_and_import',
).exists()
|
|
#!/usr/bin/env python
"""
search_and_DOI_utilities.py
Python utilities for searching for PLOS papers, retrieving them, and obtaining the DOIs cited therein.
External dependencies: beautifulsoup4, lxml, requests
All of these packages can be easilly installed using pip.
"""
from bs4 import BeautifulSoup
from itertools import chain, compress
import requests
from urllib2 import quote
import json
import re
import codecs
from copy import copy
def plos_search(query='*', query_parameters=None, extra_parameters=None, rows=20, fq='''doc_type:full AND article_type:"Research Article"''',
output="json", verbose=False, api_key='...'):
'''
Accesses the PLOS search API.
INPUTS
--------
query : str
A keyword to search for. This can be '*' if you want to return all results and search w/ query_parameters
query_parameters : (dict)
A dictionary of 'query_type: query' pairs. E.g.: {'author': 'Eisen'}
NOTE: Currently, it seems that 'author' is the only keyword you can use.
For all other keywords, use 'extra_parameters'
extra_parameters : (dict)
Extra parameters to pass. key-value pairs are parameter names and values for the search api.
rows : (int)
maximum number of results to return.
fq : (dict)
Determines what kind of results are returned.
Set by default to return only full documents that are research articles (almost always what you want).
output: (string)
determines output type. Set to JSON by default,
XML is also possible, along with a few others.
OUTPUT
--------
doc_json : (json)
A list of query results. Each item is a json entry representing an article.
'''
PLOS_SEARCH_URL = "http://api.plos.org/search"
# Create dictionaries we'll use in the query
if isinstance(query_parameters, dict):
query_strings = ["{0}:'{1}'".format(key, value)
for key, value in query_parameters.iteritems()]
print query_strings
else:
query_strings = []
query_dict = {'q': ' AND '.join(query_strings)}
query_dict.update({'fq': fq,
'wt': output,
'rows': str(rows),
'api_key': api_key})
if extra_parameters is not None:
query_dict.update(extra_parameters)
headers = {'Content-Type': 'application/' + output}
# Build the final query and post a GET request
r = requests.get(PLOS_SEARCH_URL, params=query_dict, headers=headers)
r.encoding = "UTF-8" # just to be sure
print r.url
doc_json = r.json()["response"]["docs"]
if verbose:
print query_dict['q']
print r.url
if len(doc_json) == 0:
print('No results found')
return doc_json
def plos_dois(search_results):
'''Turns search results from plos_search into a list of DOIs.'''
return [paper["id"] for paper in search_results]
def remote_XML_retrieval(doi, destination=None):
'''
Given the DOI of a PLOS paper, downloads the XML and parses it using Beautiful Soup.
If you'd like to save the XML as a file, set destination to a filename.
'''
DOI_URL = "http://www.plosone.org/article/fetchObjectAttachment.action?uri=info:doi/"
headers = {"Content-Type": "application/xml"}
r = requests.get(DOI_URL + doi + "&representation=XML")
# Doesn't matter whether it's a PLOS ONE article or not -- this will work for any article in any PLOS journal.
r.encoding = "UTF-8" # This is needed to keep the encoding on the papers correct.
if destination:
with codecs.open(destination, "w", "utf-8") as f:
f.write(r.text)
soup = BeautifulSoup(r.text, features="xml")
return soup
def local_XML_parsing(filename):
'''Opens the given XML file, parses it using Beautiful Soup, and returns the output.'''
f = codecs.open(filename, "r", "utf-8")
soup = BeautifulSoup(f, features = "xml")
f.close()
return soup
def paper_doi(paper):
'''Given a soupified PLOS XML paper, returns that paper's DOI.'''
paper_doi = paper.find("article-id", attrs={"pub-id-type":"doi"}).text
return paper_doi
def dois_of_references(paper, crossref = False):
'''
Returns all the resolvable DOIs for all the references in one paper.
It searches for a DOI inline, then looks elsewhere if that fails.
By default, this function looks at the inline HTML DOIs on the PLOS website for the DOIs if they can't be found inline.
If crossref=True, it uses CrossRef instead.
CrossRef is generally slower, which is why crossref = False by default.
'''
# Get the doi of the given paper.
paper_doi = plos_paper_doi(paper)
# Find all the references.
references = paper.find_all("ref")
max_ref_num = len(references)
ref_nums = range(1, max_ref_num + 1)
refs = {i:r.text for i, r in zip(ref_nums, references)}
dois = {}
cr_queries = {}
# Try searching for inline DOIs first.
for i, ref in refs.iteritems():
doimatch = re.search(r"\sdoi:|\sDOI:|\sDoi:|\.doi\.|\.DOI\.", ref)
if doimatch:
rawdoi = ref[doimatch.start():]
try:
doi = rawdoi[rawdoi.index("10."):]
# all DOI's start with 10., see reference here: http://www.doi.org/doi_handbook/2_Numbering.html#2.2
except ValueError:
# if a ValueError is raised, that means the DOI doesn't contain the string '10.' -- which means it's not a valid DOI.
cr_queries[i] = ref
continue # jump to the next reference
# Removing whitespace and anything afterwards.
space = re.search(r"\s", doi)
if space:
doi = doi[:space.start()]
# Removing trailing periods.
if doi[-1] == ".":
doi = doi[:-1]
dois[i] = doi
else:
cr_queries[i] = ref
if crossref:
# Now search for the DOIs on Crossref.
url = "http://search.crossref.org/links"
data = json.dumps(cr_queries.values())
headers = {"Content-Type":"application/json"}
r = requests.post(url, data = data, headers = headers)
if r.json()["query_ok"]:
results = r.json()["results"]
else:
print "There's a problem with the CrossRef DOI search. Check your internet connection and confirm the original paper was properly formatted in the PLOS XML style, then try again."
return None
for i, result in zip(cr_queries.keys(), results):
if result["match"]:
rawdoi = result["doi"]
doi = rawdoi[rawdoi.index("10"):] # CrossRef returns DOIs of the form http://dx.doi/org/10.<whatever>
dois[i] = doi
else:
dois[i] = None
else:
paper_url = "http://www.plosone.org/article/info:doi/" + paper_doi
paper_request = requests.get(paper_url)
paper_html = BeautifulSoup(paper_request.content)
html_reflist = paper_html.find(attrs={"class":"references"})
refnums = html_reflist.findChildren("span", attrs={"class":"label"})
html_references = [r.next_sibling.next_sibling for r in refnums]
for i in cr_queries.iterkeys():
ref = html_references[i-1]
doimatch = re.search(r"\sdoi:|\sDOI:|\sDoi:|\.doi\.|\.DOI\.", ref)
if doimatch:
rawdoi = ref[doimatch.start():]
try:
doi = rawdoi[rawdoi.index("10."):]
# all DOI's start with 10., see reference here: http://www.doi.org/doi_handbook/2_Numbering.html#2.2
except ValueError:
# if a ValueError is raised, that means the DOI doesn't contain the string '10.' -- which means it's not a valid DOI.
dois[i] = None
continue # jump to the next reference
# Removing whitespace and anything afterwards.
space = re.search(r"\s", doi)
if space:
doi = doi[:space.start()]
# Removing trailing periods.
if doi[-1] == ".":
doi = doi[:-1]
dois[i] = doi
else:
dois[i] = None
return dois
# Example usage:
#
# SEARCH_SUBJECT = "circadian rhythms"
# MAX_PAPERS = 500
# search_results = plos_search(SEARCH_SUBJECT, query_type = "subject", rows = MAX_PAPERS)
# print "Retrieving " + str(len(search_results)) + " papers from PLOS journals..."
# dois = plos_dois(search_results)
# papers = [remote_XML_retrieval(doi) for doi in dois]
# first_paper = paper[0]
# cited_dois = dois_of_references(first_paper)
|
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import random
import unittest
import numpy as np
from PIL import Image, ImageOps
import os
# Use GPU:0 to elimate the influence of other tasks.
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import to_variable, declarative, ProgramTranslator
from paddle.fluid.dygraph.nn import Conv2D, Conv2DTranspose, BatchNorm
# Note: Set True to eliminate randomness.
# 1. For one operation, cuDNN has several algorithms,
# some algorithm results are non-deterministic, like convolution algorithms.
# 2. If include BatchNorm, please set `use_global_stats=True` to avoid using
# cudnnBatchNormalizationBackward which is non-deterministic.
if fluid.is_compiled_with_cuda():
fluid.set_flags({'FLAGS_cudnn_deterministic': True})
# set False to speed up training.
use_cudnn = False
step_per_epoch = 10
lambda_A = 10.0
lambda_B = 10.0
lambda_identity = 0.5
# TODO(Aurelius84): Modify it into 256 when we move ut into CE platform.
# It will lead to timeout if set 256 in CI.
IMAGE_SIZE = 64
SEED = 2020
program_translator = ProgramTranslator()
class Cycle_Gan(fluid.dygraph.Layer):
def __init__(self, input_channel, istrain=True):
super(Cycle_Gan, self).__init__()
self.build_generator_resnet_9blocks_a = build_generator_resnet_9blocks(
input_channel)
self.build_generator_resnet_9blocks_b = build_generator_resnet_9blocks(
input_channel)
if istrain:
self.build_gen_discriminator_a = build_gen_discriminator(
input_channel)
self.build_gen_discriminator_b = build_gen_discriminator(
input_channel)
@declarative
def forward(self, input_A, input_B):
"""
Generator of GAN model.
"""
fake_B = self.build_generator_resnet_9blocks_a(input_A)
fake_A = self.build_generator_resnet_9blocks_b(input_B)
cyc_A = self.build_generator_resnet_9blocks_b(fake_B)
cyc_B = self.build_generator_resnet_9blocks_a(fake_A)
diff_A = fluid.layers.abs(
fluid.layers.elementwise_sub(
x=input_A, y=cyc_A))
diff_B = fluid.layers.abs(
fluid.layers.elementwise_sub(
x=input_B, y=cyc_B))
cyc_A_loss = fluid.layers.reduce_mean(diff_A) * lambda_A
cyc_B_loss = fluid.layers.reduce_mean(diff_B) * lambda_B
cyc_loss = cyc_A_loss + cyc_B_loss
fake_rec_A = self.build_gen_discriminator_a(fake_B)
g_A_loss = fluid.layers.reduce_mean(fluid.layers.square(fake_rec_A - 1))
fake_rec_B = self.build_gen_discriminator_b(fake_A)
g_B_loss = fluid.layers.reduce_mean(fluid.layers.square(fake_rec_B - 1))
G = g_A_loss + g_B_loss
idt_A = self.build_generator_resnet_9blocks_a(input_B)
idt_loss_A = fluid.layers.reduce_mean(
fluid.layers.abs(fluid.layers.elementwise_sub(
x=input_B, y=idt_A))) * lambda_B * lambda_identity
idt_B = self.build_generator_resnet_9blocks_b(input_A)
idt_loss_B = fluid.layers.reduce_mean(
fluid.layers.abs(fluid.layers.elementwise_sub(
x=input_A, y=idt_B))) * lambda_A * lambda_identity
idt_loss = fluid.layers.elementwise_add(idt_loss_A, idt_loss_B)
g_loss = cyc_loss + G + idt_loss
return fake_A, fake_B, cyc_A, cyc_B, g_A_loss, g_B_loss, idt_loss_A, idt_loss_B, cyc_A_loss, cyc_B_loss, g_loss
@declarative
def discriminatorA(self, input_A, input_B):
"""
Discriminator A of GAN model.
"""
rec_B = self.build_gen_discriminator_a(input_A)
fake_pool_rec_B = self.build_gen_discriminator_a(input_B)
return rec_B, fake_pool_rec_B
@declarative
def discriminatorB(self, input_A, input_B):
"""
Discriminator B of GAN model.
"""
rec_A = self.build_gen_discriminator_b(input_A)
fake_pool_rec_A = self.build_gen_discriminator_b(input_B)
return rec_A, fake_pool_rec_A
class build_resnet_block(fluid.dygraph.Layer):
def __init__(self, dim, use_bias=False):
super(build_resnet_block, self).__init__()
self.conv0 = conv2d(
num_channels=dim,
num_filters=dim,
filter_size=3,
stride=1,
stddev=0.02,
use_bias=False)
self.conv1 = conv2d(
num_channels=dim,
num_filters=dim,
filter_size=3,
stride=1,
stddev=0.02,
relu=False,
use_bias=False)
self.dim = dim
def forward(self, inputs):
out_res = fluid.layers.pad2d(inputs, [1, 1, 1, 1], mode="reflect")
out_res = self.conv0(out_res)
out_res = fluid.layers.pad2d(out_res, [1, 1, 1, 1], mode="reflect")
out_res = self.conv1(out_res)
return out_res + inputs
class build_generator_resnet_9blocks(fluid.dygraph.Layer):
def __init__(self, input_channel):
super(build_generator_resnet_9blocks, self).__init__()
self.conv0 = conv2d(
num_channels=input_channel,
num_filters=32,
filter_size=7,
stride=1,
padding=0,
stddev=0.02)
self.conv1 = conv2d(
num_channels=32,
num_filters=64,
filter_size=3,
stride=2,
padding=1,
stddev=0.02)
self.conv2 = conv2d(
num_channels=64,
num_filters=128,
filter_size=3,
stride=2,
padding=1,
stddev=0.02)
self.build_resnet_block_list = []
dim = 128
for i in range(9):
Build_Resnet_Block = self.add_sublayer("generator_%d" % (i + 1),
build_resnet_block(dim))
self.build_resnet_block_list.append(Build_Resnet_Block)
self.deconv0 = DeConv2D(
num_channels=dim,
num_filters=32 * 2,
filter_size=3,
stride=2,
stddev=0.02,
padding=[1, 1],
outpadding=[0, 1, 0, 1], )
self.deconv1 = DeConv2D(
num_channels=32 * 2,
num_filters=32,
filter_size=3,
stride=2,
stddev=0.02,
padding=[1, 1],
outpadding=[0, 1, 0, 1])
self.conv3 = conv2d(
num_channels=32,
num_filters=input_channel,
filter_size=7,
stride=1,
stddev=0.02,
padding=0,
relu=False,
norm=False,
use_bias=True)
def forward(self, inputs):
pad_input = fluid.layers.pad2d(inputs, [3, 3, 3, 3], mode="reflect")
y = self.conv0(pad_input)
y = self.conv1(y)
y = self.conv2(y)
for build_resnet_block_i in self.build_resnet_block_list:
y = build_resnet_block_i(y)
y = self.deconv0(y)
y = self.deconv1(y)
y = fluid.layers.pad2d(y, [3, 3, 3, 3], mode="reflect")
y = self.conv3(y)
y = fluid.layers.tanh(y)
return y
class build_gen_discriminator(fluid.dygraph.Layer):
def __init__(self, input_channel):
super(build_gen_discriminator, self).__init__()
self.conv0 = conv2d(
num_channels=input_channel,
num_filters=64,
filter_size=4,
stride=2,
stddev=0.02,
padding=1,
norm=False,
use_bias=True,
relufactor=0.2)
self.conv1 = conv2d(
num_channels=64,
num_filters=128,
filter_size=4,
stride=2,
stddev=0.02,
padding=1,
relufactor=0.2)
self.conv2 = conv2d(
num_channels=128,
num_filters=IMAGE_SIZE,
filter_size=4,
stride=2,
stddev=0.02,
padding=1,
relufactor=0.2)
self.conv3 = conv2d(
num_channels=IMAGE_SIZE,
num_filters=512,
filter_size=4,
stride=1,
stddev=0.02,
padding=1,
relufactor=0.2)
self.conv4 = conv2d(
num_channels=512,
num_filters=1,
filter_size=4,
stride=1,
stddev=0.02,
padding=1,
norm=False,
relu=False,
use_bias=True)
def forward(self, inputs):
y = self.conv0(inputs)
y = self.conv1(y)
y = self.conv2(y)
y = self.conv3(y)
y = self.conv4(y)
return y
class conv2d(fluid.dygraph.Layer):
"""docstring for Conv2D"""
def __init__(self,
num_channels,
num_filters=64,
filter_size=7,
stride=1,
stddev=0.02,
padding=0,
norm=True,
relu=True,
relufactor=0.0,
use_bias=False):
super(conv2d, self).__init__()
if use_bias == False:
con_bias_attr = False
else:
con_bias_attr = fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.0))
self.conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
use_cudnn=use_cudnn,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=stddev)),
bias_attr=con_bias_attr)
# Note(Aurelius84): The calculation of GPU kernel in BN is non-deterministic,
# failure rate is 1/100 in Dev but seems incremental in CE platform.
# If on GPU, we disable BN temporarily.
if fluid.is_compiled_with_cuda():
norm = False
if norm:
self.bn = BatchNorm(
use_global_stats=True, # set True to use deterministic algorithm
num_channels=num_filters,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.NormalInitializer(1.0, 0.02)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.0)),
trainable_statistics=True)
self.relufactor = relufactor
self.use_bias = use_bias
self.norm = norm
self.relu = relu
def forward(self, inputs):
conv = self.conv(inputs)
if self.norm:
conv = self.bn(conv)
if self.relu:
conv = fluid.layers.leaky_relu(conv, alpha=self.relufactor)
return conv
class DeConv2D(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters=64,
filter_size=7,
stride=1,
stddev=0.02,
padding=[0, 0],
outpadding=[0, 0, 0, 0],
relu=True,
norm=True,
relufactor=0.0,
use_bias=False):
super(DeConv2D, self).__init__()
if use_bias == False:
de_bias_attr = False
else:
de_bias_attr = fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.0))
self._deconv = Conv2DTranspose(
num_channels,
num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
use_cudnn=use_cudnn,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=stddev)),
bias_attr=de_bias_attr)
if fluid.is_compiled_with_cuda():
norm = False
if norm:
self.bn = BatchNorm(
use_global_stats=True, # set True to use deterministic algorithm
num_channels=num_filters,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.NormalInitializer(1.0, 0.02)),
bias_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(0.0)),
trainable_statistics=True)
self.outpadding = outpadding
self.relufactor = relufactor
self.use_bias = use_bias
self.norm = norm
self.relu = relu
def forward(self, inputs):
conv = self._deconv(inputs)
conv = fluid.layers.pad2d(
conv, paddings=self.outpadding, mode='constant', pad_value=0.0)
if self.norm:
conv = self.bn(conv)
if self.relu:
conv = fluid.layers.leaky_relu(conv, alpha=self.relufactor)
return conv
class ImagePool(object):
def __init__(self, pool_size=50):
self.pool = []
self.count = 0
self.pool_size = pool_size
def pool_image(self, image):
if self.count < self.pool_size:
self.pool.append(image)
self.count += 1
return image
else:
p = np.random.rand()
if p > 0.5:
random_id = np.random.randint(0, self.pool_size - 1)
temp = self.pool[random_id]
self.pool[random_id] = image
return temp
else:
return image
def reader_creater():
def reader():
while True:
fake_image = np.uint8(
np.random.random((IMAGE_SIZE + 30, IMAGE_SIZE + 30, 3)) * 255)
image = Image.fromarray(fake_image)
# Resize
image = image.resize((286, 286), Image.BICUBIC)
# RandomCrop
i = np.random.randint(0, 30)
j = np.random.randint(0, 30)
image = image.crop((i, j, i + IMAGE_SIZE, j + IMAGE_SIZE))
# RandomHorizontalFlip
sed = np.random.rand()
if sed > 0.5:
image = ImageOps.mirror(image)
# ToTensor
image = np.array(image).transpose([2, 0, 1]).astype('float32')
image = image / 255.0
# Normalize, mean=[0.5,0.5,0.5], std=[0.5,0.5,0.5]
image = (image - 0.5) / 0.5
yield image
return reader
class Args(object):
epoch = 1
batch_size = 4
image_shape = [3, IMAGE_SIZE, IMAGE_SIZE]
max_images_num = step_per_epoch
log_step = 1
train_step = 3
def optimizer_setting(parameters):
lr = 0.0002
optimizer = fluid.optimizer.Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=[
100 * step_per_epoch, 120 * step_per_epoch,
140 * step_per_epoch, 160 * step_per_epoch, 180 * step_per_epoch
],
values=[lr, lr * 0.8, lr * 0.6, lr * 0.4, lr * 0.2, lr * 0.1]),
parameter_list=parameters,
beta1=0.5)
return optimizer
def train(args, to_static):
place = fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() \
else fluid.CPUPlace()
program_translator.enable(to_static)
with fluid.dygraph.guard(place):
max_images_num = args.max_images_num
data_shape = [-1] + args.image_shape
random.seed(SEED)
np.random.seed(SEED)
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
A_pool = ImagePool()
B_pool = ImagePool()
A_reader = paddle.batch(reader_creater(), args.batch_size)()
B_reader = paddle.batch(reader_creater(), args.batch_size)()
cycle_gan = Cycle_Gan(input_channel=data_shape[1], istrain=True)
t_time = 0
vars_G = cycle_gan.build_generator_resnet_9blocks_a.parameters(
) + cycle_gan.build_generator_resnet_9blocks_b.parameters()
vars_da = cycle_gan.build_gen_discriminator_a.parameters()
vars_db = cycle_gan.build_gen_discriminator_b.parameters()
optimizer1 = optimizer_setting(vars_G)
optimizer2 = optimizer_setting(vars_da)
optimizer3 = optimizer_setting(vars_db)
loss_data = []
for epoch in range(args.epoch):
for batch_id in range(max_images_num):
data_A = next(A_reader)
data_B = next(B_reader)
s_time = time.time()
data_A = np.array(
[data_A[0].reshape(3, IMAGE_SIZE, IMAGE_SIZE)]).astype(
"float32")
data_B = np.array(
[data_B[0].reshape(3, IMAGE_SIZE, IMAGE_SIZE)]).astype(
"float32")
data_A = to_variable(data_A)
data_B = to_variable(data_B)
# optimize the g_A network
fake_A, fake_B, cyc_A, cyc_B, g_A_loss, g_B_loss, idt_loss_A, idt_loss_B, cyc_A_loss, cyc_B_loss, g_loss = cycle_gan(
data_A, data_B)
g_loss.backward()
optimizer1.minimize(g_loss)
cycle_gan.clear_gradients()
fake_pool_B = B_pool.pool_image(fake_B).numpy()
fake_pool_B = np.array(
[fake_pool_B[0].reshape(3, IMAGE_SIZE, IMAGE_SIZE)]).astype(
"float32")
fake_pool_B = to_variable(fake_pool_B)
fake_pool_A = A_pool.pool_image(fake_A).numpy()
fake_pool_A = np.array(
[fake_pool_A[0].reshape(3, IMAGE_SIZE, IMAGE_SIZE)]).astype(
"float32")
fake_pool_A = to_variable(fake_pool_A)
# optimize the d_A network
rec_B, fake_pool_rec_B = cycle_gan.discriminatorA(data_B,
fake_pool_B)
d_loss_A = (fluid.layers.square(fake_pool_rec_B) +
fluid.layers.square(rec_B - 1)) / 2.0
d_loss_A = fluid.layers.reduce_mean(d_loss_A)
d_loss_A.backward()
optimizer2.minimize(d_loss_A)
cycle_gan.clear_gradients()
# optimize the d_B network
rec_A, fake_pool_rec_A = cycle_gan.discriminatorB(data_A,
fake_pool_A)
d_loss_B = (fluid.layers.square(fake_pool_rec_A) +
fluid.layers.square(rec_A - 1)) / 2.0
d_loss_B = fluid.layers.reduce_mean(d_loss_B)
d_loss_B.backward()
optimizer3.minimize(d_loss_B)
cycle_gan.clear_gradients()
# Log generator loss and discriminator loss
cur_batch_loss = [
g_loss, d_loss_A, d_loss_B, g_A_loss, cyc_A_loss,
idt_loss_A, g_B_loss, cyc_B_loss, idt_loss_B
]
cur_batch_loss = [x.numpy()[0] for x in cur_batch_loss]
batch_time = time.time() - s_time
t_time += batch_time
if batch_id % args.log_step == 0:
print(
"batch: {}\t Batch_time_cost: {}\n g_loss: {}\t d_A_loss: {}\t d_B_loss:{}\n g_A_loss: {}\t g_A_cyc_loss: {}\t g_A_idt_loss: {}\n g_B_loss: {}\t g_B_cyc_loss: {}\t g_B_idt_loss: {}".
format(batch_id, batch_time, *cur_batch_loss))
if batch_id > args.train_step:
break
loss_data.append(cur_batch_loss)
return np.array(loss_data)
class TestCycleGANModel(unittest.TestCase):
def setUp(self):
self.args = Args()
def train(self, to_static):
out = train(self.args, to_static)
return out
def test_train(self):
st_out = self.train(to_static=True)
dy_out = self.train(to_static=False)
assert_func = np.allclose
# Note(Aurelius84): Because we disable BN on GPU,
# but here we enhance the check on CPU by `np.array_equal`
# which means the dy_out and st_out shall be exactly same.
if not fluid.is_compiled_with_cuda():
assert_func = np.array_equal
self.assertTrue(
assert_func(dy_out, st_out),
msg="dy_out:\n {}\n st_out:\n{}".format(dy_out, st_out))
if __name__ == "__main__":
unittest.main()
|
|
# -*- test-case-name: twisted.scripts.test.test_tap2deb -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
tap2deb creates Debian packages which wrap .tap files.
"""
import os
import sys
import shutil
import subprocess
from email.utils import formatdate as now
from twisted.python import usage
from twisted.python.filepath import FilePath
class MyOptions(usage.Options):
optFlags = [["unsigned", "u"]]
optParameters = [["tapfile", "t", "twistd.tap"],
["maintainer", "m", "",
"The maintainer's name and email in a specific format: "
"'John Doe <[email protected]>'"],
["protocol", "p", ""],
["description", "e", ""],
["long_description", "l", ""],
["set-version", "V", "1.0"],
["debfile", "d", None],
["type", "y", "tap", "Type of configuration: 'tap', 'xml', "
"'source' or 'python' for .tac files"]]
compData = usage.Completions(
optActions={
"type": usage.CompleteList(["tap", "xml", "source", "python"]),
"debfile": usage.CompleteFiles("*.deb")}
)
def postOptions(self):
if not self["maintainer"]:
raise usage.UsageError("maintainer must be specified.")
type_dict = {
'tap': 'file',
'python': 'python',
'source': 'source',
'xml': 'xml',
}
def run(args=None):
"""
Parses the configuration options in C{args} and runs C{dpkg-buildpackage}
to create a .deb file.
@param args: List of strings representing the C{tap2deb} configuration
options.
@type args: L{list}
"""
try:
config = MyOptions()
config.parseOptions(args)
except usage.error as ue:
sys.exit("%s: %s" % (sys.argv[0], ue))
tapFile = config['tapfile']
baseTapFile = os.path.basename(config['tapfile'])
protocol = (config['protocol'] or os.path.splitext(baseTapFile)[0])
debFile = config['debfile'] or 'twisted-' + protocol
version = config['set-version']
maintainer = config['maintainer']
description = config['description'] or (
'A Twisted-based server for %(protocol)s' % vars())
longDescription = config['long_description'] or\
'Automatically created by tap2deb'
twistdOption = type_dict[config['type']]
date = now()
directory = debFile + '-' + version
pythonVersion = '%s.%s' % sys.version_info[:2]
buildDir = FilePath('.build').child(directory)
if buildDir.exists():
buildDir.remove()
debianDir = buildDir.child('debian')
debianDir.child('source').makedirs()
shutil.copy(tapFile, buildDir.path)
debianDir.child('README.Debian').setContent(
'''This package was auto-generated by tap2deb\n''')
debianDir.child('conffiles').setContent(
'''\
/etc/init.d/%(debFile)s
/etc/default/%(debFile)s
/etc/%(baseTapFile)s
''' % vars())
debianDir.child('default').setContent(
'''\
pidfile=/var/run/%(debFile)s.pid
rundir=/var/lib/%(debFile)s/
file=/etc/%(tapFile)s
logfile=/var/log/%(debFile)s.log
''' % vars())
debianDir.child('init.d').setContent(
'''\
#!/bin/sh
PATH=/sbin:/bin:/usr/sbin:/usr/bin
pidfile=/var/run/%(debFile)s.pid \
rundir=/var/lib/%(debFile)s/ \
file=/etc/%(tapFile)s \
logfile=/var/log/%(debFile)s.log
[ -r /etc/default/%(debFile)s ] && . /etc/default/%(debFile)s
test -x /usr/bin/twistd || exit 0
test -r $file || exit 0
test -r /usr/share/%(debFile)s/package-installed || exit 0
case "$1" in
start)
echo -n "Starting %(debFile)s: twistd"
start-stop-daemon --start --quiet --exec /usr/bin/twistd -- \
--pidfile=$pidfile \
--rundir=$rundir \
--%(twistdOption)s=$file \
--logfile=$logfile
echo "."
;;
stop)
echo -n "Stopping %(debFile)s: twistd"
start-stop-daemon --stop --quiet \
--pidfile $pidfile
echo "."
;;
restart)
$0 stop
$0 start
;;
force-reload)
$0 restart
;;
*)
echo "Usage: /etc/init.d/%(debFile)s {start|stop|restart|force-reload}" >&2
exit 1
;;
esac
exit 0
''' % vars())
debianDir.child('init.d').chmod(0755)
debianDir.child('postinst').setContent(
'''\
#!/bin/sh
update-rc.d %(debFile)s defaults >/dev/null
invoke-rc.d %(debFile)s start
#DEBHELPER#
''' % vars())
debianDir.child('prerm').setContent(
'''\
#!/bin/sh
invoke-rc.d %(debFile)s stop
#DEBHELPER#
''' % vars())
debianDir.child('postrm').setContent(
'''\
#!/bin/sh
if [ "$1" = purge ]; then
update-rc.d %(debFile)s remove >/dev/null
fi
#DEBHELPER#
''' % vars())
debianDir.child('changelog').setContent(
'''\
%(debFile)s (%(version)s) unstable; urgency=low
* Created by tap2deb
-- %(maintainer)s %(date)s
''' % vars())
debianDir.child('control').setContent(
'''\
Source: %(debFile)s
Section: net
Priority: extra
Maintainer: %(maintainer)s
Build-Depends-Indep: debhelper, python (>= 2.6.5-7)
Standards-Version: 3.8.4
XS-Python-Version: current
Package: %(debFile)s
Architecture: all
Depends: ${python:Depends}, python-twisted-core
XB-Python-Version: ${python:Versions}
Description: %(description)s
%(longDescription)s
''' % vars())
debianDir.child('copyright').setContent(
'''\
This package was auto-debianized by %(maintainer)s on
%(date)s
It was auto-generated by tap2deb
Upstream Author(s):
Moshe Zadka <[email protected]> -- tap2deb author
Copyright:
Insert copyright here.
''' % vars())
debianDir.child('dirs').setContent(
'''\
etc/init.d
etc/default
var/lib/%(debFile)s
usr/share/doc/%(debFile)s
usr/share/%(debFile)s
''' % vars())
debianDir.child('rules').setContent(
'''\
#!/usr/bin/make -f
export DH_COMPAT=5
build: build-stamp
build-stamp:
dh_testdir
touch build-stamp
clean:
dh_testdir
dh_testroot
rm -f build-stamp install-stamp
dh_clean
install: install-stamp
install-stamp: build-stamp
dh_testdir
dh_testroot
dh_clean -k
dh_installdirs
# Add here commands to install the package into debian/tmp.
cp %(baseTapFile)s debian/tmp/etc/
cp debian/init.d debian/tmp/etc/init.d/%(debFile)s
cp debian/default debian/tmp/etc/default/%(debFile)s
cp debian/copyright debian/tmp/usr/share/doc/%(debFile)s/
cp debian/README.Debian debian/tmp/usr/share/doc/%(debFile)s/
touch debian/tmp/usr/share/%(debFile)s/package-installed
touch install-stamp
binary-arch: build install
binary-indep: build install
dh_testdir
dh_testroot
dh_strip
dh_compress
dh_installchangelogs
dh_python2
dh_fixperms
dh_installdeb
dh_gencontrol
dh_md5sums
dh_builddeb
source diff:
@echo >&2 'source and diff are obsolete - use dpkg-source -b'; false
binary: binary-indep binary-arch
.PHONY: build clean binary-indep binary-arch binary install
''' % vars())
debianDir.child('rules').chmod(0755)
args = ["dpkg-buildpackage", "-rfakeroot"]
if config['unsigned']:
args = args + ['-uc', '-us']
# Build deb
job = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, cwd=buildDir.path)
stdout, _ = job.communicate()
|
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
IDLNamespace for PPAPI
This file defines the behavior of the AST namespace which allows for resolving
a symbol as one or more AST nodes given a release or range of releases.
"""
from __future__ import print_function
import sys
from idl_option import GetOption, Option, ParseOptions
from idl_log import ErrOut, InfoOut, WarnOut
from idl_release import IDLRelease, IDLReleaseList
Option('label', 'Use the specifed label blocks.', default='Chrome')
Option('namespace_debug', 'Use the specified release')
#
# IDLNamespace
#
# IDLNamespace provides a mapping between a symbol name and an IDLReleaseList
# which contains IDLRelease objects. It provides an interface for fetching
# one or more IDLNodes based on a release or range of releases.
#
class IDLNamespace(object):
def __init__(self, parent):
self._name_to_releases = {}
self._parent = parent
def Dump(self):
for name in self._name_to_releases:
InfoOut.Log('NAME=%s' % name)
for cver in self._name_to_releases[name].GetReleases():
InfoOut.Log(' %s' % cver)
InfoOut.Log('')
def FindRelease(self, name, release):
verlist = self._name_to_releases.get(name, None)
if verlist == None:
if self._parent:
return self._parent.FindRelease(name, release)
else:
return None
return verlist.FindRelease(release)
def FindRange(self, name, rmin, rmax):
verlist = self._name_to_releases.get(name, None)
if verlist == None:
if self._parent:
return self._parent.FindRange(name, rmin, rmax)
else:
return []
return verlist.FindRange(rmin, rmax)
def FindList(self, name):
verlist = self._name_to_releases.get(name, None)
if verlist == None:
if self._parent:
return self._parent.FindList(name)
return verlist
def AddNode(self, node):
name = node.GetName()
verlist = self._name_to_releases.setdefault(name,IDLReleaseList())
if GetOption('namespace_debug'):
print("Adding to namespace: %s" % node)
return verlist.AddNode(node)
#
# Testing Code
#
#
# MockNode
#
# Mocks the IDLNode to support error, warning handling, and string functions.
#
class MockNode(IDLRelease):
def __init__(self, name, rmin, rmax):
self.name = name
self.rmin = rmin
self.rmax = rmax
self.errors = []
self.warns = []
self.properties = {
'NAME': name,
'release': rmin,
'deprecate' : rmax
}
def __str__(self):
return '%s (%s : %s)' % (self.name, self.rmin, self.rmax)
def GetName(self):
return self.name
def Error(self, msg):
if GetOption('release_debug'):
print('Error: %s' % msg)
self.errors.append(msg)
def Warn(self, msg):
if GetOption('release_debug'):
print('Warn: %s' % msg)
self.warns.append(msg)
def GetProperty(self, name):
return self.properties.get(name, None)
errors = 0
#
# DumpFailure
#
# Dumps all the information relevant to an add failure.
def DumpFailure(namespace, node, msg):
global errors
print('\n******************************')
print('Failure: %s %s' % (node, msg))
for warn in node.warns:
print(' WARN: %s' % warn)
for err in node.errors:
print(' ERROR: %s' % err)
print('\n')
namespace.Dump()
print('******************************\n')
errors += 1
# Add expecting no errors or warnings
def AddOkay(namespace, node):
okay = namespace.AddNode(node)
if not okay or node.errors or node.warns:
DumpFailure(namespace, node, 'Expected success')
# Add expecting a specific warning
def AddWarn(namespace, node, msg):
okay = namespace.AddNode(node)
if not okay or node.errors or not node.warns:
DumpFailure(namespace, node, 'Expected warnings')
if msg not in node.warns:
DumpFailure(namespace, node, 'Expected warning: %s' % msg)
# Add expecting a specific error any any number of warnings
def AddError(namespace, node, msg):
okay = namespace.AddNode(node)
if okay or not node.errors:
DumpFailure(namespace, node, 'Expected errors')
if msg not in node.errors:
DumpFailure(namespace, node, 'Expected error: %s' % msg)
print(">>%s<<\n>>%s<<\n" % (node.errors[0], msg))
# Verify that a FindRelease call on the namespace returns the expected node.
def VerifyFindOne(namespace, name, release, node):
global errors
if (namespace.FindRelease(name, release) != node):
print("Failed to find %s as release %f of %s" % (node, release, name))
namespace.Dump()
print("\n")
errors += 1
# Verify that a FindRage call on the namespace returns a set of expected nodes.
def VerifyFindAll(namespace, name, rmin, rmax, nodes):
global errors
out = namespace.FindRange(name, rmin, rmax)
if (out != nodes):
print("Found [%s] instead of[%s] for releases %f to %f of %s" % (' '.join([
str(x) for x in out
]), ' '.join([str(x) for x in nodes]), rmin, rmax, name))
namespace.Dump()
print("\n")
errors += 1
def Main(args):
global errors
ParseOptions(args)
InfoOut.SetConsole(True)
namespace = IDLNamespace(None)
FooXX = MockNode('foo', None, None)
Foo1X = MockNode('foo', 1.0, None)
Foo2X = MockNode('foo', 2.0, None)
Foo3X = MockNode('foo', 3.0, None)
# Verify we succeed with undeprecated adds
AddOkay(namespace, FooXX)
AddOkay(namespace, Foo1X)
AddOkay(namespace, Foo3X)
# Verify we fail to add a node between undeprecated releases
AddError(namespace, Foo2X,
'Overlap in releases: 3.0 vs 2.0 when adding foo (2.0 : None)')
BarXX = MockNode('bar', None, None)
Bar12 = MockNode('bar', 1.0, 2.0)
Bar23 = MockNode('bar', 2.0, 3.0)
Bar34 = MockNode('bar', 3.0, 4.0)
# Verify we succeed with fully qualified releases
namespace = IDLNamespace(namespace)
AddOkay(namespace, BarXX)
AddOkay(namespace, Bar12)
# Verify we warn when detecting a gap
AddWarn(namespace, Bar34, 'Gap in release numbers.')
# Verify we fail when inserting into this gap
# (NOTE: while this could be legal, it is sloppy so we disallow it)
AddError(namespace, Bar23, 'Declarations out of order.')
# Verify local namespace
VerifyFindOne(namespace, 'bar', 0.0, BarXX)
VerifyFindAll(namespace, 'bar', 0.5, 1.5, [BarXX, Bar12])
# Verify the correct release of the object is found recursively
VerifyFindOne(namespace, 'foo', 0.0, FooXX)
VerifyFindOne(namespace, 'foo', 0.5, FooXX)
VerifyFindOne(namespace, 'foo', 1.0, Foo1X)
VerifyFindOne(namespace, 'foo', 1.5, Foo1X)
VerifyFindOne(namespace, 'foo', 3.0, Foo3X)
VerifyFindOne(namespace, 'foo', 100.0, Foo3X)
# Verify the correct range of objects is found
VerifyFindAll(namespace, 'foo', 0.0, 1.0, [FooXX])
VerifyFindAll(namespace, 'foo', 0.5, 1.0, [FooXX])
VerifyFindAll(namespace, 'foo', 1.0, 1.1, [Foo1X])
VerifyFindAll(namespace, 'foo', 0.5, 1.5, [FooXX, Foo1X])
VerifyFindAll(namespace, 'foo', 0.0, 3.0, [FooXX, Foo1X])
VerifyFindAll(namespace, 'foo', 3.0, 100.0, [Foo3X])
FooBar = MockNode('foobar', 1.0, 2.0)
namespace = IDLNamespace(namespace)
AddOkay(namespace, FooBar)
if errors:
print('Test failed with %d errors.' % errors)
else:
print('Passed.')
return errors
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
|
|
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from collections import OrderedDict, defaultdict
from datetime import datetime
from sqlalchemy import Date, Time
from sqlalchemy.event import listens_for
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import joinedload
from sqlalchemy.sql import cast
from werkzeug.datastructures import OrderedMultiDict
from indico.core import signals
from indico.core.db import db
from indico.core.db.sqlalchemy.custom import PyIntEnum
from indico.core.db.sqlalchemy.custom.utcdatetime import UTCDateTime
from indico.core.db.sqlalchemy.links import LinkMixin, LinkType
from indico.core.db.sqlalchemy.util.models import auto_table_args
from indico.core.db.sqlalchemy.util.queries import limit_groups
from indico.core.errors import NoReportError
from indico.modules.rb.models.reservation_edit_logs import ReservationEditLog
from indico.modules.rb.models.reservation_occurrences import ReservationOccurrence, ReservationOccurrenceState
from indico.modules.rb.models.room_nonbookable_periods import NonBookablePeriod
from indico.modules.rb.notifications.reservations import (notify_cancellation, notify_confirmation, notify_creation,
notify_modification, notify_rejection, notify_reset_approval)
from indico.modules.rb.util import rb_is_admin
from indico.util.date_time import format_date, format_time, now_utc
from indico.util.i18n import _
from indico.util.serializer import Serializer
from indico.util.string import format_repr, return_ascii, to_unicode
from indico.util.struct.enum import IndicoEnum
from indico.web.flask.util import url_for
class ConflictingOccurrences(Exception):
pass
class RepeatFrequency(int, IndicoEnum):
NEVER = 0
DAY = 1
WEEK = 2
MONTH = 3
class RepeatMapping(object):
mapping = {
(RepeatFrequency.NEVER, 0): ('Single reservation', None, 'none'),
(RepeatFrequency.DAY, 1): ('Repeat daily', 0, 'daily'),
(RepeatFrequency.WEEK, 1): ('Repeat once a week', 1, 'weekly'),
(RepeatFrequency.WEEK, 2): ('Repeat once every two weeks', 2, 'everyTwoWeeks'),
(RepeatFrequency.WEEK, 3): ('Repeat once every three weeks', 3, 'everyThreeWeeks'),
(RepeatFrequency.MONTH, 1): ('Repeat every month', 4, 'monthly')
}
@classmethod
def get_message(cls, repeat_frequency, repeat_interval):
# XXX: move this somewhere else
# not translated since it's only used in log messages + emails now
if repeat_frequency == RepeatFrequency.NEVER:
return u'single booking'
elif repeat_frequency == RepeatFrequency.DAY:
return u'daily booking'
elif repeat_frequency == RepeatFrequency.WEEK:
return u'weekly' if repeat_interval == 1 else u'every {} weeks'.format(repeat_interval)
elif repeat_frequency == RepeatFrequency.MONTH:
return u'monthly' if repeat_interval == 1 else u'every {} months'.format(repeat_interval)
@classmethod
def get_short_name(cls, repeat_frequency, repeat_interval):
# for the API
try:
return cls.mapping[(repeat_frequency, repeat_interval)][2]
except KeyError:
# XXX: this is ugly, let's remove it from the API
return 'periodically'
class ReservationState(int, IndicoEnum):
pending = 1
accepted = 2
cancelled = 3
rejected = 4
class ReservationLink(LinkMixin, db.Model):
__tablename__ = 'reservation_links'
@declared_attr
def __table_args__(cls):
return auto_table_args(cls, schema='roombooking')
allowed_link_types = {LinkType.event, LinkType.contribution, LinkType.session_block}
events_backref_name = 'all_room_reservation_links'
link_backref_name = 'room_reservation_links'
id = db.Column(
db.Integer,
primary_key=True
)
def __repr__(self):
return format_repr(self, 'id', _rawtext=self.link_repr)
# relationship backrefs:
# - reservation (Reservation.link)
ReservationLink.register_link_events()
class Reservation(Serializer, db.Model):
__tablename__ = 'reservations'
__api_public__ = [
'id', ('start_dt', 'startDT'), ('end_dt', 'endDT'), 'repeat_frequency', 'repeat_interval',
('booked_for_name', 'bookedForName'), ('external_details_url', 'bookingUrl'), ('booking_reason', 'reason'),
('is_accepted', 'isConfirmed'), ('is_accepted', 'isValid'), 'is_cancelled',
'is_rejected', ('location_name', 'location'), ('contact_email', 'booked_for_user_email')
]
@declared_attr
def __table_args__(cls):
return (db.Index('ix_reservations_start_dt_date', cast(cls.start_dt, Date)),
db.Index('ix_reservations_end_dt_date', cast(cls.end_dt, Date)),
db.Index('ix_reservations_start_dt_time', cast(cls.start_dt, Time)),
db.Index('ix_reservations_end_dt_time', cast(cls.end_dt, Time)),
db.CheckConstraint("rejection_reason != ''", 'rejection_reason_not_empty'),
{'schema': 'roombooking'})
id = db.Column(
db.Integer,
primary_key=True
)
created_dt = db.Column(
UTCDateTime,
nullable=False,
default=now_utc
)
start_dt = db.Column(
db.DateTime,
nullable=False,
index=True
)
end_dt = db.Column(
db.DateTime,
nullable=False,
index=True
)
repeat_frequency = db.Column(
PyIntEnum(RepeatFrequency),
nullable=False,
default=RepeatFrequency.NEVER
) # week, month, year, etc.
repeat_interval = db.Column(
db.SmallInteger,
nullable=False,
default=0
) # 1, 2, 3, etc.
booked_for_id = db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
index=True,
nullable=True,
# Must be nullable for legacy data :(
)
booked_for_name = db.Column(
db.String,
nullable=False
)
created_by_id = db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
index=True,
nullable=True,
# Must be nullable for legacy data :(
)
room_id = db.Column(
db.Integer,
db.ForeignKey('roombooking.rooms.id'),
nullable=False,
index=True
)
state = db.Column(
PyIntEnum(ReservationState),
nullable=False,
default=ReservationState.accepted
)
booking_reason = db.Column(
db.Text,
nullable=False
)
rejection_reason = db.Column(
db.String,
nullable=True
)
link_id = db.Column(
db.Integer,
db.ForeignKey('roombooking.reservation_links.id'),
nullable=True,
index=True
)
end_notification_sent = db.Column(
db.Boolean,
nullable=False,
default=False
)
edit_logs = db.relationship(
'ReservationEditLog',
backref='reservation',
cascade='all, delete-orphan',
lazy='dynamic'
)
occurrences = db.relationship(
'ReservationOccurrence',
backref='reservation',
cascade='all, delete-orphan',
lazy='dynamic'
)
#: The user this booking was made for.
#: Assigning a user here also updates `booked_for_name`.
booked_for_user = db.relationship(
'User',
lazy=False,
foreign_keys=[booked_for_id],
backref=db.backref(
'reservations_booked_for',
lazy='dynamic'
)
)
#: The user who created this booking.
created_by_user = db.relationship(
'User',
lazy=False,
foreign_keys=[created_by_id],
backref=db.backref(
'reservations',
lazy='dynamic'
)
)
link = db.relationship(
'ReservationLink',
lazy=True,
backref=db.backref(
'reservation',
uselist=False
)
)
# relationship backrefs:
# - room (Room.reservations)
@hybrid_property
def is_pending(self):
return self.state == ReservationState.pending
@hybrid_property
def is_accepted(self):
return self.state == ReservationState.accepted
@hybrid_property
def is_cancelled(self):
return self.state == ReservationState.cancelled
@hybrid_property
def is_rejected(self):
return self.state == ReservationState.rejected
@hybrid_property
def is_archived(self):
return self.end_dt < datetime.now()
@hybrid_property
def is_repeating(self):
return self.repeat_frequency != RepeatFrequency.NEVER
@property
def contact_email(self):
return self.booked_for_user.email if self.booked_for_user else None
@property
def external_details_url(self):
return url_for('rb.booking_link', booking_id=self.id, _external=True)
@property
def location_name(self):
return self.room.location_name
@property
def repetition(self):
return self.repeat_frequency, self.repeat_interval
@property
def linked_object(self):
return self.link.object if self.link else None
@linked_object.setter
def linked_object(self, obj):
assert self.link is None
self.link = ReservationLink(object=obj)
@property
def event(self):
return self.link.event if self.link else None
@return_ascii
def __repr__(self):
return format_repr(self, 'id', 'room_id', 'start_dt', 'end_dt', 'state', _text=self.booking_reason)
@classmethod
def create_from_data(cls, room, data, user, prebook=None, ignore_admin=False):
"""Creates a new reservation.
:param room: The Room that's being booked.
:param data: A dict containing the booking data, usually from a :class:`NewBookingConfirmForm` instance
:param user: The :class:`.User` who creates the booking.
:param prebook: Instead of determining the booking type from the user's
permissions, always use the given mode.
"""
populate_fields = ('start_dt', 'end_dt', 'repeat_frequency', 'repeat_interval', 'room_id', 'booking_reason')
if data['repeat_frequency'] == RepeatFrequency.NEVER and data['start_dt'].date() != data['end_dt'].date():
raise ValueError('end_dt != start_dt for non-repeating booking')
if prebook is None:
prebook = not room.can_book(user, allow_admin=(not ignore_admin))
if prebook and not room.can_prebook(user, allow_admin=(not ignore_admin)):
raise NoReportError(u'You cannot book this room')
room.check_advance_days(data['end_dt'].date(), user)
room.check_bookable_hours(data['start_dt'].time(), data['end_dt'].time(), user)
reservation = cls()
for field in populate_fields:
if field in data:
setattr(reservation, field, data[field])
reservation.room = room
reservation.booked_for_user = data.get('booked_for_user') or user
reservation.booked_for_name = reservation.booked_for_user.full_name
reservation.state = ReservationState.pending if prebook else ReservationState.accepted
reservation.created_by_user = user
reservation.create_occurrences(True)
if not any(occ.is_valid for occ in reservation.occurrences):
raise NoReportError(_(u'Reservation has no valid occurrences'))
db.session.flush()
signals.rb.booking_created.send(reservation)
notify_creation(reservation)
return reservation
@staticmethod
def get_with_data(*args, **kwargs):
filters = kwargs.pop('filters', None)
limit = kwargs.pop('limit', None)
offset = kwargs.pop('offset', 0)
order = kwargs.pop('order', Reservation.start_dt)
limit_per_room = kwargs.pop('limit_per_room', False)
occurs_on = kwargs.pop('occurs_on')
if kwargs:
raise ValueError('Unexpected kwargs: {}'.format(kwargs))
query = Reservation.query.options(joinedload(Reservation.room))
if filters:
query = query.filter(*filters)
if occurs_on:
query = query.filter(
Reservation.id.in_(db.session.query(ReservationOccurrence.reservation_id)
.filter(ReservationOccurrence.date.in_(occurs_on),
ReservationOccurrence.is_valid))
)
if limit_per_room and (limit or offset):
query = limit_groups(query, Reservation, Reservation.room_id, order, limit, offset)
query = query.order_by(order, Reservation.created_dt)
if not limit_per_room:
if limit:
query = query.limit(limit)
if offset:
query = query.offset(offset)
result = OrderedDict((r.id, {'reservation': r}) for r in query)
if 'occurrences' in args:
occurrence_data = OrderedMultiDict(db.session.query(ReservationOccurrence.reservation_id,
ReservationOccurrence)
.filter(ReservationOccurrence.reservation_id.in_(result.iterkeys()))
.order_by(ReservationOccurrence.start_dt))
for id_, data in result.iteritems():
data['occurrences'] = occurrence_data.getlist(id_)
return result.values()
@staticmethod
def find_overlapping_with(room, occurrences, skip_reservation_id=None):
return Reservation.find(Reservation.room == room,
Reservation.id != skip_reservation_id,
ReservationOccurrence.is_valid,
ReservationOccurrence.filter_overlap(occurrences),
_join=ReservationOccurrence)
def accept(self, user):
self.state = ReservationState.accepted
self.add_edit_log(ReservationEditLog(user_name=user.full_name, info=['Reservation accepted']))
notify_confirmation(self)
signals.rb.booking_state_changed.send(self)
valid_occurrences = self.occurrences.filter(ReservationOccurrence.is_valid).all()
pre_occurrences = ReservationOccurrence.find_overlapping_with(self.room, valid_occurrences, self.id).all()
for occurrence in pre_occurrences:
if not occurrence.is_valid:
continue
occurrence.reject(user, u'Rejected due to collision with a confirmed reservation')
def reset_approval(self, user):
self.state = ReservationState.pending
notify_reset_approval(self)
self.add_edit_log(ReservationEditLog(user_name=user.full_name, info=['Requiring new approval due to change']))
def cancel(self, user, reason=None, silent=False):
self.state = ReservationState.cancelled
self.rejection_reason = reason or None
criteria = (ReservationOccurrence.is_valid, ReservationOccurrence.is_within_cancel_grace_period)
self.occurrences.filter(*criteria).update({
ReservationOccurrence.state: ReservationOccurrenceState.cancelled,
ReservationOccurrence.rejection_reason: reason
}, synchronize_session='fetch')
signals.rb.booking_state_changed.send(self)
if not silent:
notify_cancellation(self)
log_msg = u'Reservation cancelled: {}'.format(reason) if reason else 'Reservation cancelled'
self.add_edit_log(ReservationEditLog(user_name=user.full_name, info=[log_msg]))
def reject(self, user, reason, silent=False):
self.state = ReservationState.rejected
self.rejection_reason = reason or None
self.occurrences.filter_by(is_valid=True).update({
ReservationOccurrence.state: ReservationOccurrenceState.rejected,
ReservationOccurrence.rejection_reason: reason
}, synchronize_session='fetch')
signals.rb.booking_state_changed.send(self)
if not silent:
notify_rejection(self)
log_msg = u'Reservation rejected: {}'.format(reason)
self.add_edit_log(ReservationEditLog(user_name=user.full_name, info=[log_msg]))
def add_edit_log(self, edit_log):
self.edit_logs.append(edit_log)
db.session.flush()
def can_accept(self, user, allow_admin=True):
if user is None:
return False
return self.is_pending and self.room.can_moderate(user, allow_admin=allow_admin)
def can_reject(self, user, allow_admin=True):
if user is None:
return False
if self.is_rejected or self.is_cancelled:
return False
return self.room.can_moderate(user, allow_admin=allow_admin)
def can_cancel(self, user, allow_admin=True):
if user is None:
return False
if self.is_rejected or self.is_cancelled or self.is_archived:
return False
is_booked_or_owned_by_user = self.is_owned_by(user) or self.is_booked_for(user)
return is_booked_or_owned_by_user or (allow_admin and rb_is_admin(user))
def can_edit(self, user, allow_admin=True):
if user is None:
return False
if self.is_rejected or self.is_cancelled:
return False
if self.is_archived and not (allow_admin and rb_is_admin(user)):
return False
return self.is_owned_by(user) or self.is_booked_for(user) or self.room.can_manage(user, allow_admin=allow_admin)
def can_delete(self, user, allow_admin=True):
if user is None:
return False
return allow_admin and rb_is_admin(user) and (self.is_cancelled or self.is_rejected)
def create_occurrences(self, skip_conflicts, user=None):
ReservationOccurrence.create_series_for_reservation(self)
db.session.flush()
if user is None:
user = self.created_by_user
# Check for conflicts with nonbookable periods
if not rb_is_admin(user) and not self.room.can_manage(user, permission='override'):
nonbookable_periods = self.room.nonbookable_periods.filter(NonBookablePeriod.end_dt > self.start_dt)
for occurrence in self.occurrences:
if not occurrence.is_valid:
continue
for nbd in nonbookable_periods:
if nbd.overlaps(occurrence.start_dt, occurrence.end_dt):
if not skip_conflicts:
raise ConflictingOccurrences()
occurrence.cancel(user, u'Skipped due to nonbookable date', silent=True, propagate=False)
break
# Check for conflicts with blockings
blocked_rooms = self.room.get_blocked_rooms(*(occurrence.start_dt for occurrence in self.occurrences))
for br in blocked_rooms:
blocking = br.blocking
if blocking.can_override(user, room=self.room):
continue
for occurrence in self.occurrences:
if occurrence.is_valid and blocking.is_active_at(occurrence.start_dt.date()):
# Cancel OUR occurrence
msg = u'Skipped due to collision with a blocking ({})'
occurrence.cancel(user, msg.format(blocking.reason), silent=True, propagate=False)
# Check for conflicts with other occurrences
conflicting_occurrences = self.get_conflicting_occurrences()
for occurrence, conflicts in conflicting_occurrences.iteritems():
if not occurrence.is_valid:
continue
if conflicts['confirmed']:
if not skip_conflicts:
raise ConflictingOccurrences()
# Cancel OUR occurrence
msg = u'Skipped due to collision with {} reservation(s)'
occurrence.cancel(user, msg.format(len(conflicts['confirmed'])), silent=True, propagate=False)
elif conflicts['pending'] and self.is_accepted:
# Reject OTHER occurrences
for conflict in conflicts['pending']:
conflict.reject(user, u'Rejected due to collision with a confirmed reservation')
def find_excluded_days(self):
return self.occurrences.filter(~ReservationOccurrence.is_valid)
def find_overlapping(self):
occurrences = self.occurrences.filter(ReservationOccurrence.is_valid).all()
return Reservation.find_overlapping_with(self.room, occurrences, self.id)
def get_conflicting_occurrences(self):
valid_occurrences = self.occurrences.filter(ReservationOccurrence.is_valid).all()
colliding_occurrences = ReservationOccurrence.find_overlapping_with(self.room, valid_occurrences, self.id).all()
conflicts = defaultdict(lambda: dict(confirmed=[], pending=[]))
for occurrence in valid_occurrences:
for colliding in colliding_occurrences:
if occurrence.overlaps(colliding):
key = 'confirmed' if colliding.reservation.is_accepted else 'pending'
conflicts[occurrence][key].append(colliding)
return conflicts
def is_booked_for(self, user):
return user is not None and self.booked_for_user == user
def is_owned_by(self, user):
return self.created_by_user == user
def modify(self, data, user):
"""Modifies an existing reservation.
:param data: A dict containing the booking data, usually from a :class:`ModifyBookingForm` instance
:param user: The :class:`.User` who modifies the booking.
"""
populate_fields = ('start_dt', 'end_dt', 'repeat_frequency', 'repeat_interval', 'booked_for_user',
'booking_reason')
# fields affecting occurrences
occurrence_fields = {'start_dt', 'end_dt', 'repeat_frequency', 'repeat_interval'}
# fields where date and time are compared separately
date_time_fields = {'start_dt', 'end_dt'}
# fields for the repetition
repetition_fields = {'repeat_frequency', 'repeat_interval'}
# pretty names for logging
field_names = {
'start_dt/date': u"start date",
'end_dt/date': u"end date",
'start_dt/time': u"start time",
'end_dt/time': u"end time",
'repetition': u"booking type",
'booked_for_user': u"'Booked for' user",
'booking_reason': u"booking reason",
}
self.room.check_advance_days(data['end_dt'].date(), user)
self.room.check_bookable_hours(data['start_dt'].time(), data['end_dt'].time(), user)
changes = {}
update_occurrences = False
old_repetition = self.repetition
for field in populate_fields:
if field not in data:
continue
old = getattr(self, field)
new = data[field]
converter = unicode
if old != new:
# Booked for user updates the (redundant) name
if field == 'booked_for_user':
old = self.booked_for_name
new = self.booked_for_name = data[field].full_name
# Apply the change
setattr(self, field, data[field])
# If any occurrence-related field changed we need to recreate the occurrences
if field in occurrence_fields:
update_occurrences = True
# Record change for history entry
if field in date_time_fields:
# The date/time fields create separate entries for the date and time parts
if old.date() != new.date():
changes[field + '/date'] = {'old': old.date(), 'new': new.date(), 'converter': format_date}
if old.time() != new.time():
changes[field + '/time'] = {'old': old.time(), 'new': new.time(), 'converter': format_time}
elif field in repetition_fields:
# Repetition needs special handling since it consists of two fields but they are tied together
# We simply update it whenever we encounter such a change; after the last change we end up with
# the correct change data
changes['repetition'] = {'old': old_repetition,
'new': self.repetition,
'converter': lambda x: RepeatMapping.get_message(*x)}
else:
changes[field] = {'old': old, 'new': new, 'converter': converter}
if not changes:
return False
# Create a verbose log entry for the modification
log = [u'Booking modified']
for field, change in changes.iteritems():
field_title = field_names.get(field, field)
converter = change['converter']
old = to_unicode(converter(change['old']))
new = to_unicode(converter(change['new']))
if not old:
log.append(u"The {} was set to '{}'".format(field_title, new))
elif not new:
log.append(u"The {} was cleared".format(field_title))
else:
log.append(u"The {} was changed from '{}' to '{}'".format(field_title, old, new))
self.edit_logs.append(ReservationEditLog(user_name=user.full_name, info=log))
# Recreate all occurrences if necessary
if update_occurrences:
cols = [col.name for col in ReservationOccurrence.__table__.columns
if not col.primary_key and col.name not in {'start_dt', 'end_dt'}]
old_occurrences = {occ.date: occ for occ in self.occurrences}
self.occurrences.delete(synchronize_session='fetch')
self.create_occurrences(True, user)
db.session.flush()
# Restore rejection data etc. for recreated occurrences
for occurrence in self.occurrences:
old_occurrence = old_occurrences.get(occurrence.date)
# Copy data from old occurrence UNLESS the new one is invalid (e.g. because of collisions)
# Otherwise we'd end up with valid occurrences ignoring collisions!
if old_occurrence and occurrence.is_valid:
for col in cols:
setattr(occurrence, col, getattr(old_occurrence, col))
# Don't cause new notifications for the entire booking in case of daily repetition
if self.repeat_frequency == RepeatFrequency.DAY and all(occ.notification_sent
for occ in old_occurrences.itervalues()):
for occurrence in self.occurrences:
occurrence.notification_sent = True
# Sanity check so we don't end up with an "empty" booking
if not any(occ.is_valid for occ in self.occurrences):
raise NoReportError(_(u'Reservation has no valid occurrences'))
notify_modification(self, changes)
return True
@listens_for(Reservation.booked_for_user, 'set')
def _booked_for_user_set(target, user, *unused):
target.booked_for_name = user.full_name if user else ''
|
|
import unittest
from cartouche.nodes import (Node, Arg, Raises, Except, Returns, Warning,
Note, Yields, Attribute, Usage)
__author__ = 'Robert Smallshire'
class NodeTests(unittest.TestCase):
def test_create_default_node(self):
node = Node()
self.assertEqual(node.indent, 0)
self.assertEqual(node.lines, [])
self.assertIsNone(node.parent)
def test_create_with_indent(self):
node = Node(indent=4)
self.assertEqual(node.indent, 4)
self.assertEqual(node.lines, [])
self.assertIsNone(node.parent)
def test_create_with_lines(self):
node = Node(lines= ['First', 'Second', 'Third'])
self.assertEqual(node.indent, 0)
self.assertEqual(node.lines, ['First', 'Second', 'Third'])
self.assertIsNone(node.parent)
def test_repr(self):
node = Node(5, ['One', 'Two', 'Three'])
actual = repr(node)
expected = "Node(5, ['One', 'Two', 'Three'], children=[])"
self.assertEqual(expected, actual)
def test_add_one_child(self):
node = Node()
child = Node(parent=node)
node.add_child(child)
self.assertIs(node.children[0], child)
def test_add_two_children(self):
node = Node()
child0 = Node(parent=node)
child1 = Node(parent=node)
node.add_child(child0)
node.add_child(child1)
self.assertIs(node.children[0], child0)
self.assertIs(node.children[1], child1)
def test_render_rst_empty(self):
node = Node()
rst = node.render_rst()
self.assertEqual(len(rst), 0)
def test_render_rst_indent(self):
node = Node(indent=4)
rst = node.render_rst()
self.assertEqual(len(rst), 0)
def test_render_rst_lines(self):
node = Node(lines= ['First',
'Second',
'Third'])
rst = node.render_rst()
self.assertEqual(rst, ['First',
'Second',
'Third'])
def test_render_rst_indented_lines(self):
node = Node(indent=3, lines= ['First',
'Second',
'Third'])
rst = node.render_rst()
self.assertEqual(rst, [' First',
' Second',
' Third'])
def test_render_rst_with_child(self):
node = Node(indent=4, lines=["Parent"])
child = Node(indent=8, lines=["Child"], parent=node)
node.add_child(child)
rst = node.render_rst()
self.assertEqual(rst, [' Parent',
' Child'])
def test_render_rst_with_children(self):
node = Node(indent=4, lines=["Parent"])
child_a = Node(indent=8, lines=["ChildA"], parent=node)
node.add_child(child_a)
child_b = Node(indent=6, lines=["ChildB"], parent=node)
node.add_child(child_b)
rst = node.render_rst()
self.assertEqual(rst, [' Parent',
' ChildA',
' ChildB'])
class ArgTests(unittest.TestCase):
def test_create(self):
node = Arg(5, 'foo')
self.assertEqual(node.indent, 5)
self.assertEqual(node.name, 'foo')
self.assertEqual(node.lines, [])
self.assertIsNone(node.parent)
def test_set_type(self):
node = Arg(5, 'foo')
node.type = 'str'
self.assertEqual(node.type, 'str')
def test_add_one_child(self):
node = Arg(5, 'foo')
child = Node(parent=node)
node.add_child(child)
self.assertIs(node.children[0], child)
def test_add_two_children(self):
node = Arg(5, 'foo')
child0 = Node(parent=node)
child1 = Node(parent=node)
node.add_child(child0)
node.add_child(child1)
self.assertIs(node.children[0], child0)
self.assertIs(node.children[1], child1)
def test_repr(self):
node = Arg(5, 'foo')
actual = repr(node)
expected = "Arg('foo', None, children=[])"
self.assertEqual(expected, actual)
def test_render_rst_empty(self):
node = Arg(5, 'bar')
rst = node.render_rst()
self.assertEqual(rst, [' :param bar: ',
''])
def test_render_rst_with_child(self):
node = Arg(5, 'bar')
child = Node(indent=10, lines=["Description"], parent=node)
node.add_child(child)
rst = node.render_rst()
self.assertEqual(rst, [' :param bar: Description',
''])
def test_render_rst_with_children(self):
node = Arg(5, 'bar')
child_a = Node(indent=10, lines=["ChildA"], parent=node)
node.add_child(child_a)
child_b = Node(indent=10, lines=["ChildB"], parent=node)
node.add_child(child_b)
rst = node.render_rst()
self.assertEqual(rst, [' :param bar: ChildA',
' ChildB',
''])
def test_render_rst_with_type(self):
node = Arg(5, 'bar')
node.type = 'str'
rst = node.render_rst()
self.assertEqual(rst, [' :param bar: ',
' :type bar: str',
''])
class RaisesTests(unittest.TestCase):
def test_create_default_node(self):
node = Raises()
self.assertEqual(node.indent, 0)
self.assertEqual(node.lines, [])
self.assertIsNone(node.parent)
def test_create_with_indent(self):
node = Raises(indent=4)
self.assertEqual(node.indent, 4)
self.assertEqual(node.lines, [])
self.assertIsNone(node.parent)
def test_repr(self):
node = Raises(5)
actual = repr(node)
expected = "Raises(5, children=[])"
self.assertEqual(expected, actual)
def test_add_one_child(self):
node = Raises()
child = Node(parent=node)
node.add_child(child)
self.assertIs(node.children[0], child)
def test_add_two_children(self):
node = Raises()
child0 = Node(parent=node)
child1 = Node(parent=node)
node.add_child(child0)
node.add_child(child1)
self.assertIs(node.children[0], child0)
self.assertIs(node.children[1], child1)
def test_render_rst_empty(self):
node = Raises()
rst = node.render_rst()
self.assertEqual(rst, [':raises:',
''])
def test_render_rst_indent(self):
node = Raises(indent=5)
rst = node.render_rst()
self.assertEqual(rst, [' :raises:',
''])
def test_render_rst_with_child(self):
node = Raises(5)
child = Node(indent=10, lines=["Description"], parent=node)
node.add_child(child)
rst = node.render_rst()
self.assertEqual(rst, [' :raises:',
' Description',
''])
def test_render_rst_with_children(self):
node = Raises(5)
child_a = Node(indent=10, lines=["ChildA"], parent=node)
node.add_child(child_a)
child_b = Node(indent=10, lines=["ChildB"], parent=node)
node.add_child(child_b)
rst = node.render_rst()
self.assertEqual(rst, [' :raises:',
' ChildA',
' ChildB',
''])
class ExceptTests(unittest.TestCase):
def test_create(self):
node = Except(5, 'FooError')
self.assertEqual(node.indent, 5)
self.assertEqual(node.type, 'FooError')
self.assertEqual(node.lines, [])
self.assertIsNone(node.parent)
def test_add_one_child(self):
node = Except(5, 'FooError')
child = Node(parent=node)
node.add_child(child)
self.assertIs(node.children[0], child)
def test_add_two_children(self):
node = Except(5, 'FooError')
child0 = Node(parent=node)
child1 = Node(parent=node)
node.add_child(child0)
node.add_child(child1)
self.assertIs(node.children[0], child0)
self.assertIs(node.children[1], child1)
def test_repr(self):
node = Except(5,'FooError')
actual = repr(node)
expected = "Except('FooError', children=[])"
self.assertEqual(expected, actual)
def test_render_rst_empty(self):
node = Except(5, 'FooError')
rst = node.render_rst()
self.assertEqual(rst, [' * FooError - ',
''])
def test_render_rst_indent(self):
node = Except(5, 'FooError')
rst = node.render_rst()
self.assertEqual(rst, [' * FooError - ',
''])
def test_render_rst_with_child(self):
node = Except(5, 'FooError')
child = Node(indent=10, lines=["Description"], parent=node)
node.add_child(child)
rst = node.render_rst()
self.assertEqual(rst, [' * FooError - Description',
''])
def test_render_rst_with_children(self):
node = Except(5, 'FooError')
child_a = Node(indent=10, lines=["ChildA"], parent=node)
node.add_child(child_a)
child_b = Node(indent=10, lines=["ChildB"], parent=node)
node.add_child(child_b)
rst = node.render_rst()
self.assertEqual(rst, [' * FooError - ChildA',
' ChildB',
''])
class ReturnsTests(unittest.TestCase):
def test_create(self):
node = Returns(5)
self.assertEqual(node.indent, 5)
self.assertEqual(node.lines, [])
self.assertIsNone(node.parent)
def test_add_one_child(self):
node = Returns(5)
child = Node(parent=node)
node.add_child(child)
self.assertIs(node.children[0], child)
def test_add_two_children(self):
node = Returns(5)
child0 = Node(parent=node)
child1 = Node(parent=node)
node.add_child(child0)
node.add_child(child1)
self.assertIs(node.children[0], child0)
self.assertIs(node.children[1], child1)
def test_repr(self):
node = Returns(5)
actual = repr(node)
expected = "Returns(5, children=[])"
self.assertEqual(expected, actual)
def test_render_rst_empty(self):
node = Returns(indent=4)
rst = node.render_rst()
self.assertEqual(rst, [' :returns: ',
''])
def test_render_rst_indent(self):
node = Returns(indent=5)
rst = node.render_rst()
self.assertEqual(rst, [' :returns: ',
''])
def test_render_rst_with_child(self):
node = Returns(indent=5)
child = Node(indent=10, lines=["Description"], parent=node)
node.add_child(child)
rst = node.render_rst()
self.assertEqual(rst, [' :returns: Description',
''])
def test_render_rst_with_children(self):
node = Returns(indent=5)
child_a = Node(indent=10, lines=["ChildA"], parent=node)
node.add_child(child_a)
child_b = Node(indent=10, lines=["ChildB"], parent=node)
node.add_child(child_b)
rst = node.render_rst()
self.assertEqual(rst, [' :returns: ChildA',
' ChildB',
''])
class YieldsTests(unittest.TestCase):
def test_create(self):
node = Yields(5)
self.assertEqual(node.indent, 5)
self.assertEqual(node.lines, [])
self.assertIsNone(node.parent)
def test_add_one_child(self):
node = Yields(5)
child = Node(parent=node)
node.add_child(child)
self.assertIs(node.children[0], child)
def test_add_two_children(self):
node = Yields(5)
child0 = Node(parent=node)
child1 = Node(parent=node)
node.add_child(child0)
node.add_child(child1)
self.assertIs(node.children[0], child0)
self.assertIs(node.children[1], child1)
def test_repr(self):
node = Yields(5)
actual = repr(node)
expected = "Yields(5, children=[])"
self.assertEqual(expected, actual)
def test_render_rst_empty(self):
node = Yields(indent=4)
rst = node.render_rst()
self.assertEqual(rst, [' :returns: ',
''])
def test_render_rst_indent(self):
node = Yields(indent=5)
rst = node.render_rst()
self.assertEqual(rst, [' :returns: ',
''])
def test_render_rst_with_child(self):
node = Yields(indent=5)
child = Node(indent=10, lines=["Description"], parent=node)
node.add_child(child)
rst = node.render_rst()
self.assertEqual(rst, [' :returns: Description',
''])
def test_render_rst_with_children(self):
node = Yields(indent=5)
child_a = Node(indent=10, lines=["ChildA"], parent=node)
node.add_child(child_a)
child_b = Node(indent=10, lines=["ChildB"], parent=node)
node.add_child(child_b)
rst = node.render_rst()
self.assertEqual(rst, [' :returns: ChildA',
' ChildB',
''])
class WarningTests(unittest.TestCase):
def test_create(self):
node = Warning(5)
self.assertEqual(node.indent, 5)
self.assertEqual(node.lines, [])
self.assertIsNone(node.parent)
# TODO: test when setting node.line
def test_add_one_child(self):
node = Warning(5)
child = Node(parent=node)
node.add_child(child)
self.assertIs(node.children[0], child)
def test_add_two_children(self):
node = Warning(5)
child0 = Node(parent=node)
child1 = Node(parent=node)
node.add_child(child0)
node.add_child(child1)
self.assertIs(node.children[0], child0)
self.assertIs(node.children[1], child1)
def test_repr(self):
node = Warning(5)
actual = repr(node)
expected = "Warning(5, children=[])"
self.assertEqual(expected, actual)
def test_render_rst_empty(self):
node = Warning(indent=4)
rst = node.render_rst()
self.assertEqual(rst, [' .. warning::',
' '])
def test_render_rst_indent(self):
node = Warning(indent=5)
rst = node.render_rst()
self.assertEqual(rst, [' .. warning::',
' '])
def test_render_rst_with_child(self):
node = Warning(indent=5)
child = Node(indent=10, lines=["Description"], parent=node)
node.add_child(child)
rst = node.render_rst()
self.assertEqual(rst, [' .. warning::',
' ',
' Description',
''])
def test_render_rst_with_children(self):
node = Warning(indent=5)
child_a = Node(indent=10, lines=["ChildA"], parent=node)
node.add_child(child_a)
child_b = Node(indent=12, lines=["ChildB"], parent=node)
node.add_child(child_b)
rst = node.render_rst()
self.assertEqual(rst, [' .. warning::',
' ',
' ChildA',
' ChildB',
''])
class NoteTests(unittest.TestCase):
def test_create(self):
node = Note(5)
self.assertEqual(node.indent, 5)
self.assertEqual(node.lines, [])
self.assertIsNone(node.parent)
# TODO: test when setting node.line
def test_add_one_child(self):
node = Note(5)
child = Node(parent=node)
node.add_child(child)
self.assertIs(node.children[0], child)
def test_add_two_children(self):
node = Note(5)
child0 = Node(parent=node)
child1 = Node(parent=node)
node.add_child(child0)
node.add_child(child1)
self.assertIs(node.children[0], child0)
self.assertIs(node.children[1], child1)
def test_repr(self):
node = Note(5)
actual = repr(node)
expected = "Note(5, children=[])"
self.assertEqual(expected, actual)
def test_repr(self):
node = Warning(5)
actual = repr(node)
expected = "Warning(5, children=[])"
self.assertEqual(expected, actual)
def test_render_rst_empty(self):
node = Note(indent=4)
rst = node.render_rst()
self.assertEqual(rst, [' .. note::',
' '])
def test_render_rst_indent(self):
node = Note(indent=5)
rst = node.render_rst()
self.assertEqual(rst, [' .. note::',
' '])
def test_render_rst_with_child(self):
node = Note(indent=5)
child = Node(indent=10, lines=["Description"], parent=node)
node.add_child(child)
rst = node.render_rst()
self.assertEqual(rst, [' .. note::',
' ',
' Description',
''])
def test_render_rst_with_children(self):
node = Note(indent=5)
child_a = Node(indent=10, lines=["ChildA"], parent=node)
node.add_child(child_a)
child_b = Node(indent=12, lines=["ChildB"], parent=node)
node.add_child(child_b)
rst = node.render_rst()
self.assertEqual(rst, [' .. note::',
' ',
' ChildA',
' ChildB',
''])
class AttributeTests(unittest.TestCase):
def test_create(self):
node = Attribute(5, 'foo')
self.assertEqual(node.indent, 5)
self.assertEqual(node.name, 'foo')
self.assertEqual(node.lines, [])
self.assertIsNone(node.parent)
def test_set_type(self):
node = Attribute(5, 'foo')
node.type = 'str'
self.assertEqual(node.type, 'str')
def test_add_one_child(self):
node = Attribute(5, 'foo')
child = Node(parent=node)
node.add_child(child)
self.assertIs(node.children[0], child)
def test_add_two_children(self):
node = Attribute(5, 'foo')
child0 = Node(parent=node)
child1 = Node(parent=node)
node.add_child(child0)
node.add_child(child1)
self.assertIs(node.children[0], child0)
self.assertIs(node.children[1], child1)
def test_repr(self):
node = Attribute(5, 'foo')
actual = repr(node)
expected = "Attribute('foo', None, children=[])"
self.assertEqual(expected, actual)
def test_render_rst_empty(self):
node = Attribute(5, 'bar')
rst = node.render_rst()
self.assertEqual(rst, ['.. py:attribute:: bar',
''])
def test_render_rst_with_child(self):
node = Attribute(5, 'bar')
child = Node(indent=10, lines=["Description"], parent=node)
node.add_child(child)
rst = node.render_rst()
self.assertEqual(rst, ['.. py:attribute:: bar',
'',
' Description',
''])
def test_render_rst_with_children(self):
node = Attribute(5, 'bar')
child_a = Node(indent=10, lines=["ChildA"], parent=node)
node.add_child(child_a)
child_b = Node(indent=10, lines=["ChildB"], parent=node)
node.add_child(child_b)
rst = node.render_rst()
self.assertEqual(rst, ['.. py:attribute:: bar',
'',
' ChildA',
' ChildB',
''])
def test_render_rst_with_type(self):
node = Attribute(5, 'bar')
node.type = 'str'
rst = node.render_rst()
self.assertEqual(rst, ['.. py:attribute:: bar',
'',
' (str)',
''])
def test_render_rst_with_type_and_description(self):
node = Attribute(5, 'bar')
node.type = 'str'
child = Node(indent=10, lines=["Description"], parent=node)
node.add_child(child)
rst = node.render_rst()
self.assertEqual(rst, ['.. py:attribute:: bar',
'',
' (str) Description',
''])
def test_render_rst_with_type_and_multi_line_description(self):
node = Attribute(5, 'bar')
node.type = 'str'
child = Node(indent=10, lines=["Description1", "Description2"], parent=node)
node.add_child(child)
rst = node.render_rst()
self.assertEqual(rst, ['.. py:attribute:: bar',
'',
' (str) Description1',
' Description2',
''])
class UsageTests(unittest.TestCase):
def test_create(self):
node = Usage(5)
self.assertEqual(node.indent, 5)
self.assertEqual(node.lines, [])
self.assertIsNone(node.parent)
def test_add_one_child(self):
node = Usage(5)
child = Node(parent=node)
node.add_child(child)
self.assertIs(node.children[0], child)
def test_add_two_children(self):
node = Usage(5)
child0 = Node(parent=node)
child1 = Node(parent=node)
node.add_child(child0)
node.add_child(child1)
self.assertIs(node.children[0], child0)
self.assertIs(node.children[1], child1)
def test_repr(self):
node = Usage(5)
actual = repr(node)
expected = "Usage(5)"
self.assertEqual(expected, actual)
def test_render_rst_empty(self):
node = Usage(5)
rst = node.render_rst()
self.assertEqual(rst, [])
def test_render_rst_with_code(self):
node = Usage(7)
child_a = Node(indent=10, lines=['print("Hello, World!")'], parent=node)
node.add_child(child_a)
child_b = Node(indent=10, lines=['print("Reticulating splines!")'], parent=node)
node.add_child(child_b)
rst = node.render_rst()
self.assertEqual(rst, [' .. rubric:: Usage:',
'',
' .. code-block:: python',
'',
' print("Hello, World!")',
' print("Reticulating splines!")',
''])
def test_render_rst_with_indented_code(self):
node = Usage(5)
child_a = Node(indent=10, lines=['for i in range(100):'], parent=node)
node.add_child(child_a)
child_b = Node(indent=14, lines=['print(i)'], parent=node)
node.add_child(child_b)
rst = node.render_rst()
self.assertEqual(rst, [' .. rubric:: Usage:',
'',
' .. code-block:: python',
'',
' for i in range(100):',
' print(i)',
''])
|
|
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os
from unittest import mock
import ddt
from oslo_utils import timeutils
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.objects import fields
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_volume
from cinder.tests.unit import test
from cinder.tests.unit import utils as test_utils
from cinder.volume.drivers import remotefs
from cinder.volume.drivers.windows import smbfs
@ddt.ddt
class WindowsSmbFsTestCase(test.TestCase):
_FAKE_SHARE = '//1.2.3.4/share1'
_FAKE_SHARE_HASH = 'db0bf952c1734092b83e8990bd321131'
_FAKE_MNT_BASE = r'c:\openstack\mnt'
_FAKE_MNT_POINT = os.path.join(_FAKE_MNT_BASE, _FAKE_SHARE_HASH)
_FAKE_VOLUME_ID = '4f711859-4928-4cb7-801a-a50c37ceaccc'
_FAKE_VOLUME_NAME = 'volume-%s.vhdx' % _FAKE_VOLUME_ID
_FAKE_SNAPSHOT_ID = '50811859-4928-4cb7-801a-a50c37ceacba'
_FAKE_SNAPSHOT_NAME = 'volume-%s-%s.vhdx' % (_FAKE_VOLUME_ID,
_FAKE_SNAPSHOT_ID)
_FAKE_SNAPSHOT_PATH = os.path.join(_FAKE_MNT_POINT,
_FAKE_SNAPSHOT_NAME)
_FAKE_VOLUME_SIZE = 1
_FAKE_TOTAL_SIZE = 2048
_FAKE_TOTAL_AVAILABLE = 1024
_FAKE_TOTAL_ALLOCATED = 1024
_FAKE_SHARE_OPTS = '-o username=Administrator,password=12345'
_FAKE_VOLUME_PATH = os.path.join(_FAKE_MNT_POINT,
_FAKE_VOLUME_NAME)
_FAKE_SHARE_OPTS = '-o username=Administrator,password=12345'
@mock.patch.object(smbfs, 'utilsfactory')
@mock.patch.object(smbfs, 'remotefs_brick')
def setUp(self, mock_remotefs, mock_utilsfactory):
super(WindowsSmbFsTestCase, self).setUp()
self.context = context.get_admin_context()
self._FAKE_SMBFS_CONFIG = mock.MagicMock(
smbfs_shares_config=mock.sentinel.share_config_file,
smbfs_default_volume_format='vhdx',
nas_volume_prov_type='thin')
self._smbfs_driver = smbfs.WindowsSmbfsDriver(
configuration=self._FAKE_SMBFS_CONFIG)
self._smbfs_driver._delete = mock.Mock()
self._smbfs_driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
self._smbfs_driver.base = self._FAKE_MNT_BASE
self._diskutils = self._smbfs_driver._diskutils
self._vhdutils = self._smbfs_driver._vhdutils
self.volume = self._simple_volume()
self.snapshot = self._simple_snapshot(volume=self.volume)
self._context = context.get_admin_context()
self.updated_at = timeutils.utcnow()
def _simple_volume(self, **kwargs):
updates = {'id': self._FAKE_VOLUME_ID,
'size': self._FAKE_VOLUME_SIZE,
'provider_location': self._FAKE_SHARE}
updates.update(kwargs)
ctxt = context.get_admin_context()
volume = test_utils.create_volume(ctxt, **updates)
return volume
def _simple_snapshot(self, **kwargs):
volume = kwargs.pop('volume', None) or self._simple_volume()
ctxt = context.get_admin_context()
updates = {'id': self._FAKE_SNAPSHOT_ID,
'volume_id': volume.id}
updates.update(kwargs)
snapshot = test_utils.create_snapshot(ctxt, **updates)
return snapshot
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_check_os_platform')
@mock.patch.object(remotefs.RemoteFSSnapDriverDistributed, 'do_setup')
@mock.patch('os.path.exists')
@mock.patch('os.path.isabs')
@mock.patch.object(image_utils, 'check_qemu_img_version')
def _test_setup(self, mock_check_qemu_img_version,
mock_is_abs, mock_exists,
mock_remotefs_do_setup,
mock_check_os_platform,
config, share_config_exists=True):
mock_exists.return_value = share_config_exists
fake_ensure_mounted = mock.MagicMock()
self._smbfs_driver._ensure_shares_mounted = fake_ensure_mounted
self._smbfs_driver._setup_pool_mappings = mock.Mock()
self._smbfs_driver.configuration = config
if not (config.smbfs_shares_config and share_config_exists):
self.assertRaises(smbfs.SmbfsException,
self._smbfs_driver.do_setup,
mock.sentinel.context)
else:
self._smbfs_driver.do_setup(mock.sentinel.context)
mock_check_qemu_img_version.assert_called_once_with(
self._smbfs_driver._MINIMUM_QEMU_IMG_VERSION)
mock_is_abs.assert_called_once_with(self._smbfs_driver.base)
self.assertEqual({}, self._smbfs_driver.shares)
fake_ensure_mounted.assert_called_once_with()
self._smbfs_driver._setup_pool_mappings.assert_called_once_with()
self.assertTrue(self._smbfs_driver._thin_provisioning_support)
mock_check_os_platform.assert_called_once_with()
def test_setup_pools(self):
pool_mappings = {
'//ip/share0': 'pool0',
'//ip/share1': 'pool1',
}
self._smbfs_driver.configuration.smbfs_pool_mappings = pool_mappings
self._smbfs_driver.shares = {
'//ip/share0': None,
'//ip/share1': None,
'//ip/share2': None
}
expected_pool_mappings = pool_mappings.copy()
expected_pool_mappings['//ip/share2'] = 'share2'
self._smbfs_driver._setup_pool_mappings()
self.assertEqual(expected_pool_mappings,
self._smbfs_driver._pool_mappings)
def test_setup_pool_duplicates(self):
self._smbfs_driver.configuration.smbfs_pool_mappings = {
'share0': 'pool0',
'share1': 'pool0'
}
self.assertRaises(smbfs.SmbfsException,
self._smbfs_driver._setup_pool_mappings)
def test_initialize_connection(self):
self._smbfs_driver.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
self._smbfs_driver._get_mount_point_base = mock.Mock(
return_value=self._FAKE_MNT_BASE)
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver.get_volume_format = mock.Mock(
return_value=mock.sentinel.format)
fake_data = {'export': self._FAKE_SHARE,
'format': mock.sentinel.format,
'name': self._FAKE_VOLUME_NAME,
'options': self._FAKE_SHARE_OPTS}
expected = {
'driver_volume_type': 'smbfs',
'data': fake_data,
'mount_point_base': self._FAKE_MNT_BASE}
ret_val = self._smbfs_driver.initialize_connection(
self.volume, None)
self.assertEqual(expected, ret_val)
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_snapshot_backing_file')
@mock.patch.object(smbfs.WindowsSmbfsDriver, 'get_volume_format')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_mount_point_base')
def test_initialize_connection_snapshot(self, mock_get_mount_base,
mock_get_volume_format,
mock_get_snap_by_backing_file):
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
mock_get_snap_by_backing_file.return_value = self._FAKE_VOLUME_NAME
mock_get_volume_format.return_value = 'vhdx'
mock_get_mount_base.return_value = self._FAKE_MNT_BASE
exp_data = {'export': self._FAKE_SHARE,
'format': 'vhdx',
'name': self._FAKE_VOLUME_NAME,
'options': self._FAKE_SHARE_OPTS,
'access_mode': 'ro'}
expected = {
'driver_volume_type': 'smbfs',
'data': exp_data,
'mount_point_base': self._FAKE_MNT_BASE}
ret_val = self._smbfs_driver.initialize_connection_snapshot(
self.snapshot, mock.sentinel.connector)
self.assertEqual(expected, ret_val)
mock_get_snap_by_backing_file.assert_called_once_with(self.snapshot)
mock_get_volume_format.assert_called_once_with(self.snapshot.volume)
mock_get_mount_base.assert_called_once_with()
def test_setup(self):
self._test_setup(config=self._FAKE_SMBFS_CONFIG)
def test_setup_missing_shares_config_option(self):
fake_config = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_config.smbfs_shares_config = None
self._test_setup(config=fake_config,
share_config_exists=False)
def test_setup_missing_shares_config_file(self):
self._test_setup(config=self._FAKE_SMBFS_CONFIG,
share_config_exists=False)
@mock.patch.object(smbfs, 'context')
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'_get_pool_name_from_share')
def test_get_total_allocated(self, mock_get_pool_name, mock_ctxt):
fake_pool_name = 'pool0'
fake_host_name = 'fake_host@fake_backend'
fake_vol_sz_sum = 5
mock_db = mock.Mock()
mock_db.volume_data_get_for_host.return_value = [
mock.sentinel.vol_count, fake_vol_sz_sum]
self._smbfs_driver.host = fake_host_name
self._smbfs_driver.db = mock_db
mock_get_pool_name.return_value = fake_pool_name
allocated = self._smbfs_driver._get_total_allocated(
mock.sentinel.share)
self.assertEqual(fake_vol_sz_sum << 30,
allocated)
mock_get_pool_name.assert_called_once_with(mock.sentinel.share)
mock_db.volume_data_get_for_host.assert_called_once_with(
context=mock_ctxt.get_admin_context.return_value,
host='fake_host@fake_backend#pool0')
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'_get_local_volume_path_template')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_lookup_local_volume_path')
@mock.patch.object(smbfs.WindowsSmbfsDriver, 'get_volume_format')
def _test_get_volume_path(self, mock_get_volume_format, mock_lookup_volume,
mock_get_path_template, volume_exists=True):
drv = self._smbfs_driver
(mock_get_path_template.return_value,
ext) = os.path.splitext(self._FAKE_VOLUME_PATH)
volume_format = ext.strip('.')
mock_lookup_volume.return_value = (
self._FAKE_VOLUME_PATH if volume_exists else None)
mock_get_volume_format.return_value = volume_format
ret_val = drv.local_path(self.volume)
if volume_exists:
self.assertFalse(mock_get_volume_format.called)
else:
mock_get_volume_format.assert_called_once_with(self.volume)
self.assertEqual(self._FAKE_VOLUME_PATH, ret_val)
def test_get_existing_volume_path(self):
self._test_get_volume_path()
def test_get_new_volume_path(self):
self._test_get_volume_path(volume_exists=False)
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_local_volume_dir')
def test_get_local_volume_path_template(self, mock_get_local_dir):
mock_get_local_dir.return_value = self._FAKE_MNT_POINT
ret_val = self._smbfs_driver._get_local_volume_path_template(
self.volume)
exp_template = os.path.splitext(self._FAKE_VOLUME_PATH)[0]
self.assertEqual(exp_template, ret_val)
@mock.patch('os.path.exists')
def test_lookup_local_volume_path(self, mock_exists):
expected_path = self._FAKE_VOLUME_PATH + '.vhdx'
mock_exists.side_effect = lambda x: x == expected_path
ret_val = self._smbfs_driver._lookup_local_volume_path(
self._FAKE_VOLUME_PATH)
extensions = [
".%s" % ext
for ext in self._smbfs_driver._VALID_IMAGE_EXTENSIONS]
possible_paths = [self._FAKE_VOLUME_PATH + ext
for ext in extensions]
mock_exists.assert_has_calls(
[mock.call(path) for path in possible_paths])
self.assertEqual(expected_path, ret_val)
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'_get_local_volume_path_template')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_lookup_local_volume_path')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_volume_format_spec')
def _test_get_volume_format(self, mock_get_format_spec,
mock_lookup_volume, mock_get_path_template,
qemu_format=False, volume_format='vhdx',
expected_vol_fmt=None,
volume_exists=True):
expected_vol_fmt = expected_vol_fmt or volume_format
vol_path = '%s.%s' % (os.path.splitext(self._FAKE_VOLUME_PATH)[0],
volume_format)
mock_get_path_template.return_value = vol_path
mock_lookup_volume.return_value = (
vol_path if volume_exists else None)
mock_get_format_spec.return_value = volume_format
supported_fmts = self._smbfs_driver._SUPPORTED_IMAGE_FORMATS
if volume_format.lower() not in supported_fmts:
self.assertRaises(smbfs.SmbfsException,
self._smbfs_driver.get_volume_format,
self.volume,
qemu_format)
else:
ret_val = self._smbfs_driver.get_volume_format(self.volume,
qemu_format)
if volume_exists:
self.assertFalse(mock_get_format_spec.called)
else:
mock_get_format_spec.assert_called_once_with(self.volume)
self.assertEqual(expected_vol_fmt, ret_val)
def test_get_volume_format_invalid_extension(self):
self._test_get_volume_format(volume_format='fake')
def test_get_existing_vhdx_volume_format(self):
self._test_get_volume_format()
def test_get_new_vhd_volume_format(self):
fmt = 'vhd'
self._test_get_volume_format(volume_format=fmt,
volume_exists=False,
expected_vol_fmt=fmt)
def test_get_new_vhd_legacy_volume_format(self):
img_fmt = 'vhd'
expected_fmt = 'vpc'
self._test_get_volume_format(volume_format=img_fmt,
volume_exists=False,
qemu_format=True,
expected_vol_fmt=expected_fmt)
@ddt.data([False, False],
[True, True],
[False, True])
@ddt.unpack
def test_get_volume_format_spec(self,
volume_meta_contains_fmt,
volume_type_contains_fmt):
self._smbfs_driver.configuration = copy.copy(self._FAKE_SMBFS_CONFIG)
fake_vol_meta_fmt = 'vhd'
fake_vol_type_fmt = 'vhdx'
volume_metadata = {}
volume_type_extra_specs = {}
if volume_meta_contains_fmt:
volume_metadata['volume_format'] = fake_vol_meta_fmt
elif volume_type_contains_fmt:
volume_type_extra_specs['smbfs:volume_format'] = fake_vol_type_fmt
volume_type = fake_volume.fake_volume_type_obj(self.context)
volume = fake_volume.fake_volume_obj(self.context)
# Optional arguments are not set in _from_db_object,
# so have to set explicitly here
volume.volume_type = volume_type
volume.metadata = volume_metadata
# Same for extra_specs and VolumeType
volume_type.extra_specs = volume_type_extra_specs
resulted_fmt = self._smbfs_driver._get_volume_format_spec(volume)
if volume_meta_contains_fmt:
expected_fmt = fake_vol_meta_fmt
elif volume_type_contains_fmt:
expected_fmt = fake_vol_type_fmt
else:
expected_fmt = self._FAKE_SMBFS_CONFIG.smbfs_default_volume_format
self.assertEqual(expected_fmt, resulted_fmt)
@mock.patch.object(remotefs.RemoteFSSnapDriverDistributed,
'create_volume')
def test_create_volume_base(self, mock_create_volume):
self._smbfs_driver.create_volume(self.volume)
mock_create_volume.assert_called_once_with(self.volume)
@mock.patch('os.path.exists')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_vhd_type')
def _test_create_volume(self, mock_get_vhd_type, mock_exists,
volume_exists=False, volume_format='vhdx'):
mock_exists.return_value = volume_exists
self._smbfs_driver.create_vhd = mock.MagicMock()
fake_create = self._smbfs_driver._vhdutils.create_vhd
self._smbfs_driver.get_volume_format = mock.Mock(
return_value=volume_format)
if volume_exists or volume_format not in ('vhd', 'vhdx'):
self.assertRaises(exception.InvalidVolume,
self._smbfs_driver._do_create_volume,
self.volume)
else:
fake_vol_path = self._FAKE_VOLUME_PATH
self._smbfs_driver._do_create_volume(self.volume)
fake_create.assert_called_once_with(
fake_vol_path, mock_get_vhd_type.return_value,
max_internal_size=self.volume.size << 30,
guid=self.volume.id)
def test_create_volume(self):
self._test_create_volume()
def test_create_existing_volume(self):
self._test_create_volume(True)
def test_create_volume_invalid_volume(self):
self._test_create_volume(volume_format="qcow")
def test_delete_volume(self):
drv = self._smbfs_driver
fake_vol_info = self._FAKE_VOLUME_PATH + '.info'
drv._ensure_share_mounted = mock.MagicMock()
fake_ensure_mounted = drv._ensure_share_mounted
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv.get_active_image_from_info = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
drv._delete = mock.Mock()
drv._local_path_volume_info = mock.Mock(
return_value=fake_vol_info)
with mock.patch('os.path.exists', lambda x: True):
drv.delete_volume(self.volume)
fake_ensure_mounted.assert_called_once_with(self._FAKE_SHARE)
drv._delete.assert_any_call(
self._FAKE_VOLUME_PATH)
drv._delete.assert_any_call(fake_vol_info)
def test_ensure_mounted(self):
self._smbfs_driver.shares = {self._FAKE_SHARE: self._FAKE_SHARE_OPTS}
self._smbfs_driver._ensure_share_mounted(self._FAKE_SHARE)
self._smbfs_driver._remotefsclient.mount.assert_called_once_with(
self._FAKE_SHARE, self._FAKE_SHARE_OPTS)
def test_get_capacity_info(self):
self._diskutils.get_disk_capacity.return_value = (
self._FAKE_TOTAL_SIZE, self._FAKE_TOTAL_AVAILABLE)
self._smbfs_driver._get_mount_point_for_share = mock.Mock(
return_value=mock.sentinel.mnt_point)
self._smbfs_driver._get_total_allocated = mock.Mock(
return_value=self._FAKE_TOTAL_ALLOCATED)
ret_val = self._smbfs_driver._get_capacity_info(self._FAKE_SHARE)
expected_ret_val = [int(x) for x in [self._FAKE_TOTAL_SIZE,
self._FAKE_TOTAL_AVAILABLE,
self._FAKE_TOTAL_ALLOCATED]]
self.assertEqual(expected_ret_val, ret_val)
self._smbfs_driver._get_mount_point_for_share.assert_called_once_with(
self._FAKE_SHARE)
self._diskutils.get_disk_capacity.assert_called_once_with(
mock.sentinel.mnt_point)
self._smbfs_driver._get_total_allocated.assert_called_once_with(
self._FAKE_SHARE)
def _test_get_img_info(self, backing_file=None):
self._smbfs_driver._vhdutils.get_vhd_parent_path.return_value = (
backing_file)
image_info = self._smbfs_driver._qemu_img_info(self._FAKE_VOLUME_PATH)
self.assertEqual(self._FAKE_VOLUME_NAME,
image_info.image)
backing_file_name = backing_file and os.path.basename(backing_file)
self.assertEqual(backing_file_name, image_info.backing_file)
def test_get_img_info_without_backing_file(self):
self._test_get_img_info()
def test_get_snapshot_info(self):
self._test_get_img_info(self._FAKE_VOLUME_PATH)
@ddt.data('attached', 'detached')
def test_create_snapshot(self, attach_status):
self.snapshot.volume.attach_status = attach_status
self.snapshot.volume.save()
self._smbfs_driver._vhdutils.create_differencing_vhd = (
mock.Mock())
self._smbfs_driver._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
fake_create_diff = (
self._smbfs_driver._vhdutils.create_differencing_vhd)
self._smbfs_driver._do_create_snapshot(
self.snapshot,
os.path.basename(self._FAKE_VOLUME_PATH),
self._FAKE_SNAPSHOT_PATH)
if attach_status != 'attached':
fake_create_diff.assert_called_once_with(self._FAKE_SNAPSHOT_PATH,
self._FAKE_VOLUME_PATH)
else:
fake_create_diff.assert_not_called()
self.assertEqual(os.path.basename(self._FAKE_VOLUME_PATH),
self.snapshot.metadata['backing_file'])
# Ensure that the changes have been saved.
self.assertFalse(bool(self.snapshot.obj_what_changed()))
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'_check_extend_volume_support')
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'_local_path_active_image')
def test_extend_volume(self, mock_get_active_img,
mock_check_ext_support):
volume = fake_volume.fake_volume_obj(self.context)
new_size = volume.size + 1
self._smbfs_driver.extend_volume(volume, new_size)
mock_check_ext_support.assert_called_once_with(volume, new_size)
mock_get_active_img.assert_called_once_with(volume)
self._vhdutils.resize_vhd.assert_called_once_with(
mock_get_active_img.return_value,
new_size * units.Gi,
is_file_max_size=False)
@ddt.data({'snapshots_exist': True},
{'vol_fmt': smbfs.WindowsSmbfsDriver._DISK_FORMAT_VHD,
'snapshots_exist': True,
'expected_exc': exception.InvalidVolume})
@ddt.unpack
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'get_volume_format')
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'_snapshots_exist')
def test_check_extend_support(self, mock_snapshots_exist,
mock_get_volume_format,
vol_fmt=None, snapshots_exist=False,
share_eligible=True,
expected_exc=None):
vol_fmt = vol_fmt or self._smbfs_driver._DISK_FORMAT_VHDX
volume = fake_volume.fake_volume_obj(
self.context, provider_location='fake_provider_location')
new_size = volume.size + 1
mock_snapshots_exist.return_value = snapshots_exist
mock_get_volume_format.return_value = vol_fmt
if expected_exc:
self.assertRaises(expected_exc,
self._smbfs_driver._check_extend_volume_support,
volume, new_size)
else:
self._smbfs_driver._check_extend_volume_support(volume, new_size)
mock_get_volume_format.assert_called_once_with(volume)
mock_snapshots_exist.assert_called_once_with(volume)
@ddt.data({},
{'delete_latest': True},
{'attach_status': 'detached'},
{'snap_info_contains_snap_id': False})
@ddt.unpack
@mock.patch.object(remotefs.RemoteFSSnapDriverDistributed,
'_delete_snapshot')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_local_volume_dir')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_local_path_volume_info')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_write_info_file')
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_read_info_file')
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'_nova_assisted_vol_snap_delete')
@mock.patch.object(smbfs.WindowsSmbfsDriver,
'_get_snapshot_by_backing_file')
def test_delete_snapshot(self, mock_get_snap_by_backing_file,
mock_nova_assisted_snap_del,
mock_read_info_file, mock_write_info_file,
mock_local_path_volume_info,
mock_get_local_dir,
mock_remotefs_snap_delete,
attach_status='attached',
snap_info_contains_snap_id=True,
delete_latest=False):
self.snapshot.volume.attach_status = attach_status
self.snapshot.metadata['backing_file'] = os.path.basename(
self._FAKE_VOLUME_PATH)
higher_snapshot = self._simple_snapshot(id=None,
volume=self.volume)
fake_snap_file = 'snap_file'
fake_snap_parent_path = os.path.join(self._FAKE_MNT_POINT,
'snap_file_parent')
active_img = 'active_img' if not delete_latest else fake_snap_file
snap_info = dict(active=active_img)
if snap_info_contains_snap_id:
snap_info[self.snapshot.id] = fake_snap_file
mock_get_snap_by_backing_file.return_value = (
higher_snapshot if not delete_latest else None)
mock_info_path = mock_local_path_volume_info.return_value
mock_read_info_file.return_value = snap_info
mock_get_local_dir.return_value = self._FAKE_MNT_POINT
self._vhdutils.get_vhd_parent_path.return_value = (
fake_snap_parent_path)
expected_delete_info = {'file_to_merge': fake_snap_file,
'volume_id': self.snapshot.volume.id}
self._smbfs_driver._delete_snapshot(self.snapshot)
if attach_status != 'attached':
mock_remotefs_snap_delete.assert_called_once_with(self.snapshot)
elif snap_info_contains_snap_id:
mock_local_path_volume_info.assert_called_once_with(
self.snapshot.volume)
mock_read_info_file.assert_called_once_with(
mock_info_path, empty_if_missing=True)
mock_nova_assisted_snap_del.assert_called_once_with(
self.snapshot._context, self.snapshot, expected_delete_info)
exp_merged_img_path = os.path.join(self._FAKE_MNT_POINT,
fake_snap_file)
self._smbfs_driver._delete.assert_called_once_with(
exp_merged_img_path)
if delete_latest:
self._vhdutils.get_vhd_parent_path.assert_called_once_with(
exp_merged_img_path)
exp_active = os.path.basename(fake_snap_parent_path)
else:
exp_active = active_img
self.assertEqual(exp_active, snap_info['active'])
self.assertNotIn(snap_info, self.snapshot.id)
mock_write_info_file.assert_called_once_with(mock_info_path,
snap_info)
if attach_status != 'attached' or not snap_info_contains_snap_id:
mock_nova_assisted_snap_del.assert_not_called()
mock_write_info_file.assert_not_called()
if not delete_latest and snap_info_contains_snap_id:
self.assertEqual(os.path.basename(self._FAKE_VOLUME_PATH),
higher_snapshot.metadata['backing_file'])
self.assertFalse(bool(higher_snapshot.obj_what_changed()))
@ddt.data(True, False)
def test_get_snapshot_by_backing_file(self, metadata_set):
backing_file = 'fake_backing_file'
if metadata_set:
self.snapshot.metadata['backing_file'] = backing_file
self.snapshot.save()
for idx in range(2):
# We're adding a few other snapshots.
self._simple_snapshot(id=None,
volume=self.volume)
snapshot = self._smbfs_driver._get_snapshot_by_backing_file(
self.volume, backing_file)
if metadata_set:
self.assertEqual(self.snapshot.id, snapshot.id)
else:
self.assertIsNone(snapshot)
@ddt.data(True, False)
@mock.patch.object(remotefs.RemoteFSSnapDriverDistributed,
'_get_snapshot_backing_file')
def test_get_snapshot_backing_file_md_set(self, md_set,
remotefs_get_backing_file):
backing_file = 'fake_backing_file'
if md_set:
self.snapshot.metadata['backing_file'] = backing_file
ret_val = self._smbfs_driver._get_snapshot_backing_file(
self.snapshot)
# If the metadata is not set, we expect the super class method to
# be used, which is supposed to query the image.
if md_set:
self.assertEqual(backing_file, ret_val)
else:
self.assertEqual(remotefs_get_backing_file.return_value,
ret_val)
remotefs_get_backing_file.assert_called_once_with(
self.snapshot)
def test_create_volume_from_unavailable_snapshot(self):
self.snapshot.status = fields.SnapshotStatus.ERROR
self.assertRaises(
exception.InvalidSnapshot,
self._smbfs_driver.create_volume_from_snapshot,
self.volume, self.snapshot)
@ddt.data(True, False)
def test_copy_volume_to_image(self, has_parent=False):
drv = self._smbfs_driver
volume = test_utils.create_volume(
self._context, volume_type_id=fake.VOLUME_TYPE_ID,
updated_at=self.updated_at)
extra_specs = {
'image_service:store_id': 'fake-store'
}
test_utils.create_volume_type(self._context.elevated(),
id=fake.VOLUME_TYPE_ID, name="test_type",
extra_specs=extra_specs)
fake_image_meta = {'id': 'fake-image-id'}
fake_img_format = self._smbfs_driver._DISK_FORMAT_VHDX
if has_parent:
fake_volume_path = self._FAKE_SNAPSHOT_PATH
fake_parent_path = self._FAKE_VOLUME_PATH
else:
fake_volume_path = self._FAKE_VOLUME_PATH
fake_parent_path = None
fake_active_image = os.path.basename(fake_volume_path)
drv.get_active_image_from_info = mock.Mock(
return_value=fake_active_image)
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv.get_volume_format = mock.Mock(
return_value=fake_img_format)
drv._vhdutils.get_vhd_parent_path.return_value = (
fake_parent_path)
with mock.patch.object(image_utils, 'upload_volume') as (
fake_upload_volume):
drv.copy_volume_to_image(
mock.sentinel.context, volume,
mock.sentinel.image_service, fake_image_meta)
if has_parent:
fake_temp_image_name = '%s.temp_image.%s.%s' % (
volume.id,
fake_image_meta['id'],
fake_img_format)
fake_temp_image_path = os.path.join(
self._FAKE_MNT_POINT,
fake_temp_image_name)
fake_active_image_path = os.path.join(
self._FAKE_MNT_POINT,
fake_active_image)
upload_path = fake_temp_image_path
drv._vhdutils.convert_vhd.assert_called_once_with(
fake_active_image_path,
fake_temp_image_path)
drv._delete.assert_called_once_with(
fake_temp_image_path)
else:
upload_path = fake_volume_path
fake_upload_volume.assert_called_once_with(
mock.sentinel.context, mock.sentinel.image_service,
fake_image_meta, upload_path, volume_format=fake_img_format,
store_id='fake-store', base_image_ref=None, compress=True,
run_as_root=True)
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_vhd_type')
def test_copy_image_to_volume(self, mock_get_vhd_type):
drv = self._smbfs_driver
drv.get_volume_format = mock.Mock(
return_value=mock.sentinel.volume_format)
drv.local_path = mock.Mock(
return_value=self._FAKE_VOLUME_PATH)
drv.configuration = mock.MagicMock()
drv.configuration.volume_dd_blocksize = mock.sentinel.block_size
with mock.patch.object(image_utils,
'fetch_to_volume_format') as fake_fetch:
drv.copy_image_to_volume(
mock.sentinel.context, self.volume,
mock.sentinel.image_service,
mock.sentinel.image_id)
fake_fetch.assert_called_once_with(
mock.sentinel.context,
mock.sentinel.image_service,
mock.sentinel.image_id,
self._FAKE_VOLUME_PATH, mock.sentinel.volume_format,
mock.sentinel.block_size,
mock_get_vhd_type.return_value)
drv._vhdutils.resize_vhd.assert_called_once_with(
self._FAKE_VOLUME_PATH,
self.volume.size * units.Gi,
is_file_max_size=False)
drv._vhdutils.set_vhd_guid.assert_called_once_with(
self._FAKE_VOLUME_PATH,
self.volume.id)
@mock.patch.object(smbfs.WindowsSmbfsDriver, '_get_vhd_type')
def test_copy_volume_from_snapshot(self, mock_get_vhd_type):
drv = self._smbfs_driver
drv._get_snapshot_backing_file = mock.Mock(
return_value=self._FAKE_VOLUME_NAME)
drv._local_volume_dir = mock.Mock(
return_value=self._FAKE_MNT_POINT)
drv.local_path = mock.Mock(
return_value=mock.sentinel.new_volume_path)
drv._copy_volume_from_snapshot(self.snapshot,
self.volume, self.volume.size)
drv._get_snapshot_backing_file.assert_called_once_with(
self.snapshot)
drv._delete.assert_called_once_with(mock.sentinel.new_volume_path)
drv._vhdutils.convert_vhd.assert_called_once_with(
self._FAKE_VOLUME_PATH,
mock.sentinel.new_volume_path,
vhd_type=mock_get_vhd_type.return_value)
drv._vhdutils.set_vhd_guid.assert_called_once_with(
mock.sentinel.new_volume_path,
self.volume.id)
drv._vhdutils.resize_vhd.assert_called_once_with(
mock.sentinel.new_volume_path,
self.volume.size * units.Gi,
is_file_max_size=False)
def test_copy_encrypted_volume_from_snapshot(self):
# We expect an exception to be raised if an encryption
# key is provided since we don't support encryted volumes
# for the time being.
self.assertRaises(exception.NotSupportedOperation,
self._smbfs_driver._copy_volume_from_snapshot,
self.snapshot, self.volume,
self.volume.size,
mock.sentinel.src_key,
mock.sentinel.dest_key)
def test_rebase_img(self):
drv = self._smbfs_driver
drv._rebase_img(
self._FAKE_SNAPSHOT_PATH,
self._FAKE_VOLUME_NAME, 'vhdx')
drv._vhdutils.reconnect_parent_vhd.assert_called_once_with(
self._FAKE_SNAPSHOT_PATH, self._FAKE_VOLUME_PATH)
def test_copy_volume_image(self):
self._smbfs_driver._copy_volume_image(mock.sentinel.src,
mock.sentinel.dest)
self._smbfs_driver._pathutils.copy.assert_called_once_with(
mock.sentinel.src, mock.sentinel.dest)
def test_get_pool_name_from_share(self):
self._smbfs_driver._pool_mappings = {
mock.sentinel.share: mock.sentinel.pool}
pool = self._smbfs_driver._get_pool_name_from_share(
mock.sentinel.share)
self.assertEqual(mock.sentinel.pool, pool)
def test_get_share_from_pool_name(self):
self._smbfs_driver._pool_mappings = {
mock.sentinel.share: mock.sentinel.pool}
share = self._smbfs_driver._get_share_from_pool_name(
mock.sentinel.pool)
self.assertEqual(mock.sentinel.share, share)
def test_get_pool_name_from_share_exception(self):
self._smbfs_driver._pool_mappings = {}
self.assertRaises(smbfs.SmbfsException,
self._smbfs_driver._get_share_from_pool_name,
mock.sentinel.pool)
def test_get_vhd_type(self):
drv = self._smbfs_driver
mock_type = drv._get_vhd_type(qemu_subformat=True)
self.assertEqual(mock_type, 'dynamic')
mock_type = drv._get_vhd_type(qemu_subformat=False)
self.assertEqual(mock_type, 3)
self._smbfs_driver.configuration.nas_volume_prov_type = (
'thick')
mock_type = drv._get_vhd_type(qemu_subformat=True)
self.assertEqual(mock_type, 'fixed')
def test_get_managed_vol_expected_path(self):
self._vhdutils.get_vhd_format.return_value = 'vhdx'
vol_location = dict(vol_local_path=mock.sentinel.image_path,
mountpoint=self._FAKE_MNT_POINT)
path = self._smbfs_driver._get_managed_vol_expected_path(
self.volume, vol_location)
self.assertEqual(self._FAKE_VOLUME_PATH, path)
self._vhdutils.get_vhd_format.assert_called_once_with(
mock.sentinel.image_path)
@mock.patch.object(remotefs.RemoteFSManageableVolumesMixin,
'manage_existing')
def test_manage_existing(self, remotefs_manage):
model_update = dict(provider_location=self._FAKE_SHARE)
remotefs_manage.return_value = model_update
self._smbfs_driver.local_path = mock.Mock(
return_value=mock.sentinel.vol_path)
# Let's make sure that the provider location gets set.
# It's needed by self.local_path.
self.volume.provider_location = None
ret_val = self._smbfs_driver.manage_existing(
self.volume, mock.sentinel.ref)
self.assertEqual(model_update, ret_val)
self.assertEqual(self._FAKE_SHARE, self.volume.provider_location)
self._vhdutils.set_vhd_guid.assert_called_once_with(
mock.sentinel.vol_path,
self.volume.id)
self._smbfs_driver.local_path.assert_called_once_with(self.volume)
remotefs_manage.assert_called_once_with(self.volume, mock.sentinel.ref)
|
|
XXXXXXXXX XXXXX
XXXX
XXXXXXXXX XXX XXXXXXXXXX XXXXXXXX X XXXXXXXXX XXXXXXXX XXX XXXXXX XXXXXXXXX
XXX XXXXXXXXXX XXX XXXXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXX
XXXXXXXXXX XXXXX XXXXXXX XXXXXXXX XXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXX
XXXXXXXX
XX XXX XXXXXXXXXXXXX XXXXX XX XXXXXX XXXX XX XXXXXXXX XX XXXXXXXX XXX XXXXXXXX
XX XXX XXXXXXXXXXXXXXX
XXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXX XX X X
XX XXXX XXX XXXXXX XXXX XXX XXXXXXXXXXX XX XXX XXXXXXX XXXXXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXX X XXXXXXXXX XXXXXXX X XXXXXXXXXXXXXX X XXXXXXXXX XXXXXXXXX
XX XXXX XXXX XXXXXX XXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX X XXXXXXXX
XXX
XXXXXXXX XXXXXXXXXXXX X
XX XXX XXX XXXXXX XXXXXXXX XXXX XX XXXX XX XXXXXXXX XXXXX
XXX XXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXX
XX XXXXX XXX XXXXXX XXXXXXX XXXXX
XX X XXXXXXXXXXX XX XXXXXXXXX X
X
XX XXXXXX XXXX XXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX XXXXX XX
X
XXXX
XXXXXX XXXX XXXX XX XX XXXXXXX XXXXXX XX
X
XXXXXXXX XXXXXXXXXXXX X
XX XXX XXX XXXXXX XXXXXXXX XXXX XX XXXX XX XXXXXXXX XXXXX
XXX XXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXXX
XX XXXXX XXX XXXXXX XXXXXXX XXXXX
XX X XXXXXXXXXXX XX XXXXXXXXX X
X
XX XXXXXX XX XXXXX XXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXX XXXXX XX
X
XXXX
XXXXXX XXXX XXXX XX XX XXXXXXX XXXXXX XX
X
XXXXXXXX XXXXXXXXXXXXX X
XX XXX XXX XXXXXX XXXXXXXX XXXX XX XXXX XX XXXXXXXX XXXXX
XXX XXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXXX X XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXX
XX XXX XXXXXX XXXXXXXX XXXXXXXX XXXXXXX XXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXX XXXXX XX
X
XXXXXXXX XXXXXXXXXXXXX X
XX XXX XXX XXXXXX XXXXXXXX XXXX XXX XXXX XX XXXXXXXX XXXXX
XXX XXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXX XXXXXX XXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXX XX
X
XXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXX X X
XX XXX XXX XXXXXX XXXXXXXX XXXX XX XXXX XX XXXXXXXX XXXXX
XXX XXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXX XXX XXXXXX XXXXXXX XXXXX
XX X XXXXXXXXXXX XX XXXXXXXXX X
X
XX XXXXXXX XXX XXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXX XXXXXXXXXXX XX
X
XXXX
XXXXXX XXXX XXXX XX XX XXXXXXX XXXXXX XX
X
XXXXXXXX XXXXXXXXXXXX X
XX XXX XXX XXXXXX XXXXXXXX XXXX XX XXXX XX XXXXXXXX XXXXX
XXX XXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXX XXXXXXX XXX XXXXXXX XXXXXX XXXXXXXX XXXXXXX XXXXXXX XXXX XXXXXXXX
XX XX XXX XXXXXXXX XXXXXX XXXX XXX XXXXXX XX XXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXX XX
X
XXXXXXXX XXXXXXXXXXXX X
XX XXX XXX XXXXXX XXXXXXXX XXXX XX XXXX XX XXXXXXXX XXXXX
XXX XXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXX XXX XXXXXX XXXXXX XX XXX XXXXXX XXXX XXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXX
XXXXXX XXXX XXXXXXXXX XXXXXX XXX XXXX XXXXXX XX
X
XXXXXXXX XXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
X
XXXXXXXX XXXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXX X XXXXX X XXXXXXXXX X X XX XXXXXXX XXXXXX
X
XXXXXXXX XXXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXXX X XXXXXXXXX X X XXXX XXXXXXX
X
XXXXXXXXX
XXXXXXX
XXXXXX
XXX XXXXXXXXXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX XXXXXXX XXXXX XXXXXXXX XXXXXXXXXX XXX
XXXXX
XXXX XXXXXXXXXXXXXX XXXXXXXXXXXX
XXXX XXXXXX XX XXX XXXXXXXXXX XXXXXXXX XXXXX XXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXX XX XXXXXXXX XXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXXXXX
XXX
XXXX XXXXXX XXXXX XXX XX XXX XXX
XX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXX
XX XXXXXXXX XXXX XXX XXXXXX XX XXXXXXXX
XXXX
XXX
XXX XXXXXXX XX XXX XX XXXXXX XXXX XXXXX XXXXX XXX XXXXXX XXXX XX XXXX XXXXXX XXXXX
XXXX
XXXXXX
XXXX XXXX XXXXX XXXXX XXXXX XXXXXXXX XX XX XXXXXXX XX XXX XXXXXX XXXXX XXX
XXXX XXXXXXXXXXXX
XXXXXXXXXX
XXX
XXXXXXXXXXXXXXXX XXXXXXXX XXXXXXXXXX XX XXXXXXXXXXXXX XX X XXXXXXX XXXX XX XXXXXXXXXX
XXXXXXXX XXXX XXXXXX XXX XXXXXX XXXXX XXX XXX XXXXXXXX XXXXX XXXXX XXX XXX XXXXXX
XX XXXX XX XXXX XX XXXXXXXXX XXXXXXX X XXXX XXXXXX XXXXXXXXXX
XXXX
XXXXXXXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XX XXXX XXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX XXX XXX XXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXX
XX XXXXXXX XXX XXXXXXXXX XXXXXXXXXXXXX XXXX XX XXXXXXXX XXXXXXXXX
XXXXXXXXXXXXXXXXX XXXXXXXXXX X
XXX X
XXXXXX XXXXXXXX
XXXXX XXXXXXX
XX XXXXX XXX XXXXXXXXXXXX XX XXXXXXXXXXXXX XXXXXXXX
XXXXXXXXXXXXXX XXXXXXXXX XXX X X
XXX XXX X XXXXXXXXXXXXXXXXXX XX X XXXXXXXXXXX
XX X XXXXXXXXXXXXXXX XXXXXX X X
XXXXXXXXXXXX XXXXXXXXXXX XXXXXXXXX
XX X XXXXXXXXXXXXXXX XXXXXX X X
XXXXXXXXXXXX XXXXXXXXXXX XXXXXXXXX
X
X
XXX
XXXXXXXXX
XX XXXXXXXXXXXXXX
XXXX
XXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXX
XXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXX
XXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXX XXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXX XXXXXXXX XXXXXXXX
XXXX
XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXX
XXXX
XXXXXXXXX XXXXXXXXXX XXXXXXXXXXXX XXXXXXXXX XXXXX XXXX XXXX XXXX XXXXXXX XXXXXXXXXXXX
XXXXXX XXXX XX XXXX XXXXXXXX XX XXX XXXX XXXXXXXXXXXXXXXXXX
XXXX
XXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXXXXX
XXXX XXXXXXXXXXXX
XXXX
XXX
XXXXXXXX X XXX XXXX XXXXXX XXX XXX XXXXXXXX X XX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX
XX XXXXXXXXXX
XXXXXXXXX XXXXXX XXXXXXXXXX XX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X XXXXXXXXX
XXXXXXXX XXX XXXXXX XXXXXXXXX
XXXX
XXXXXX
XXXXXXX
XXXXXXX
|
|
"""
This module tests the interaction of Kafka with Zookeeper with authentication enabled
"""
import logging
import uuid
import pytest
import shakedown
import sdk_auth
import sdk_cmd
import sdk_hosts
import sdk_install
import sdk_marathon
import sdk_repository
import sdk_utils
from tests import auth
from tests import config
from tests import test_utils
log = logging.getLogger(__name__)
@pytest.fixture(scope='module', autouse=True)
def kafka_principals():
fqdn = "{service_name}.{host_suffix}".format(service_name=config.SERVICE_NAME,
host_suffix=sdk_hosts.AUTOIP_HOST_SUFFIX)
brokers = [
"kafka-0-broker",
"kafka-1-broker",
"kafka-2-broker",
]
principals = []
for b in brokers:
principals.append("kafka/{instance}.{domain}@{realm}".format(
instance=b,
domain=fqdn,
realm=sdk_auth.REALM))
principals.append("client@{realm}".format(realm=sdk_auth.REALM))
yield principals
def get_node_principals():
"""Get a list of zookeeper principals for the agent nodes in the cluster"""
principals = []
agent_ips = shakedown.get_private_agents()
agent_dashed_ips = list(map(
lambda ip: "ip-{dashed_ip}".format(dashed_ip="-".join(ip.split("."))), agent_ips))
for b in agent_dashed_ips:
principals.append("zookeeper/{instance}.{domain}@{realm}".format(
instance=b,
# TODO(elezar) we need to infer the region too
domain="us-west-2.compute.internal",
realm=sdk_auth.REALM))
return principals
@pytest.fixture(scope='module', autouse=True)
def zookeeper_principals():
zk_fqdn = "{service_name}.{host_suffix}".format(service_name="kafka-zookeeper",
host_suffix=sdk_hosts.AUTOIP_HOST_SUFFIX)
zk_ensemble = [
"zookeeper-0-server",
"zookeeper-1-server",
"zookeeper-2-server",
]
principals = []
for b in zk_ensemble:
principals.append("zookeeper/{instance}.{domain}@{realm}".format(
instance=b,
domain=zk_fqdn,
realm=sdk_auth.REALM))
principals.extend(get_node_principals())
yield principals
@pytest.fixture(scope='module', autouse=True)
def kerberos(configure_security, kafka_principals, zookeeper_principals):
try:
principals = []
principals.extend(kafka_principals)
principals.extend(zookeeper_principals)
kerberos_env = sdk_auth.KerberosEnvironment()
kerberos_env.add_principals(principals)
kerberos_env.finalize()
yield kerberos_env
finally:
kerberos_env.cleanup()
@pytest.fixture(scope='module')
def zookeeper_server(kerberos):
service_kerberos_options = {
"service": {
"name": "kafka-zookeeper",
"security": {
"kerberos": {
"enabled": True,
"kdc_host_name": kerberos.get_host(),
"kdc_host_port": int(kerberos.get_port()),
"keytab_secret": kerberos.get_keytab_path(),
}
}
}
}
try:
sdk_install.uninstall("beta-kafka-zookeeper", "kafka-zookeeper")
sdk_install.install(
"beta-kafka-zookeeper",
"kafka-zookeeper",
6,
additional_options=service_kerberos_options,
timeout_seconds=30 * 60)
yield {**service_kerberos_options, **{"package_name": "beta-kafka-zookeeper"}}
finally:
sdk_install.uninstall("beta-kafka-zookeeper", "kafka-zookeeper")
@pytest.fixture(scope='module', autouse=True)
def kafka_server(kerberos, zookeeper_server):
# Get the zookeeper DNS values
zookeeper_dns = sdk_cmd.svc_cli(zookeeper_server["package_name"],
zookeeper_server["service"]["name"],
"endpoint clientport", json=True)["dns"]
service_kerberos_options = {
"service": {
"name": config.SERVICE_NAME,
"security": {
"kerberos": {
"enabled": True,
"enabled_for_zookeeper": True,
"kdc": {
"hostname": kerberos.get_host(),
"port": int(kerberos.get_port())
},
"keytab_secret": kerberos.get_keytab_path(),
}
}
},
"kafka": {
"kafka_zookeeper_uri": ",".join(zookeeper_dns)
}
}
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
try:
sdk_install.install(
config.PACKAGE_NAME,
config.SERVICE_NAME,
config.DEFAULT_BROKER_COUNT,
additional_options=service_kerberos_options,
timeout_seconds=30 * 60)
yield {**service_kerberos_options, **{"package_name": config.PACKAGE_NAME}}
finally:
sdk_install.uninstall(config.PACKAGE_NAME, config.SERVICE_NAME)
@pytest.fixture(scope='module', autouse=True)
def kafka_client(kerberos, kafka_server):
brokers = sdk_cmd.svc_cli(
kafka_server["package_name"],
kafka_server["service"]["name"],
"endpoint broker", json=True)["dns"]
try:
client_id = "kafka-client"
client = {
"id": client_id,
"mem": 512,
"container": {
"type": "MESOS",
"docker": {
"image": "elezar/kafka-client:latest",
"forcePullImage": True
},
"volumes": [
{
"containerPath": "/tmp/kafkaconfig/kafka-client.keytab",
"secret": "kafka_keytab"
}
]
},
"secrets": {
"kafka_keytab": {
"source": kerberos.get_keytab_path(),
}
},
"networks": [
{
"mode": "host"
}
],
"env": {
"JVM_MaxHeapSize": "512",
"KAFKA_CLIENT_MODE": "test",
"KAFKA_TOPIC": "securetest",
"KAFKA_BROKER_LIST": ",".join(brokers)
}
}
sdk_marathon.install_app(client)
yield {**client, **{"brokers": list(map(lambda x: x.split(':')[0], brokers))}}
finally:
sdk_marathon.destroy_app(client_id)
@pytest.mark.dcos_min_version('1.10')
@sdk_utils.dcos_ee_only
@pytest.mark.sanity
def test_client_can_read_and_write(kafka_client, kafka_server):
client_id = kafka_client["id"]
auth.wait_for_brokers(kafka_client["id"], kafka_client["brokers"])
topic_name = "authn.test"
sdk_cmd.svc_cli(kafka_server["package_name"], kafka_server["service"]["name"],
"topic create {}".format(topic_name),
json=True)
test_utils.wait_for_topic(kafka_server["package_name"], kafka_server["service"]["name"], topic_name)
message = str(uuid.uuid4())
assert write_to_topic("client", client_id, topic_name, message)
assert message in read_from_topic("client", client_id, topic_name, 1)
def write_to_topic(cn: str, task: str, topic: str, message: str) -> bool:
return auth.write_to_topic(cn, task, topic, message,
auth.get_kerberos_client_properties(ssl_enabled=False),
auth.setup_env(cn, task))
def read_from_topic(cn: str, task: str, topic: str, message: str) -> str:
return auth.read_from_topic(cn, task, topic, message,
auth.get_kerberos_client_properties(ssl_enabled=False),
auth.setup_env(cn, task))
|
|
import datetime
import json
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange, Range
from django.contrib.postgres import forms, lookups
from django.db import models
from .utils import AttributeSetter
__all__ = [
'RangeField', 'IntegerRangeField', 'BigIntegerRangeField',
'DecimalRangeField', 'DateTimeRangeField', 'DateRangeField',
'FloatRangeField',
'RangeBoundary', 'RangeOperators',
]
class RangeBoundary(models.Expression):
"""A class that represents range boundaries."""
def __init__(self, inclusive_lower=True, inclusive_upper=False):
self.lower = '[' if inclusive_lower else '('
self.upper = ']' if inclusive_upper else ')'
def as_sql(self, compiler, connection):
return "'%s%s'" % (self.lower, self.upper), []
class RangeOperators:
# https://www.postgresql.org/docs/current/functions-range.html#RANGE-OPERATORS-TABLE
EQUAL = '='
NOT_EQUAL = '<>'
CONTAINS = '@>'
CONTAINED_BY = '<@'
OVERLAPS = '&&'
FULLY_LT = '<<'
FULLY_GT = '>>'
NOT_LT = '&>'
NOT_GT = '&<'
ADJACENT_TO = '-|-'
class RangeField(models.Field):
empty_strings_allowed = False
def __init__(self, *args, **kwargs):
# Initializing base_field here ensures that its model matches the model for self.
if hasattr(self, 'base_field'):
self.base_field = self.base_field()
super().__init__(*args, **kwargs)
@property
def model(self):
try:
return self.__dict__['model']
except KeyError:
raise AttributeError("'%s' object has no attribute 'model'" % self.__class__.__name__)
@model.setter
def model(self, model):
self.__dict__['model'] = model
self.base_field.model = model
def get_prep_value(self, value):
if value is None:
return None
elif isinstance(value, Range):
return value
elif isinstance(value, (list, tuple)):
return self.range_type(value[0], value[1])
return value
def to_python(self, value):
if isinstance(value, str):
# Assume we're deserializing
vals = json.loads(value)
for end in ('lower', 'upper'):
if end in vals:
vals[end] = self.base_field.to_python(vals[end])
value = self.range_type(**vals)
elif isinstance(value, (list, tuple)):
value = self.range_type(value[0], value[1])
return value
def set_attributes_from_name(self, name):
super().set_attributes_from_name(name)
self.base_field.set_attributes_from_name(name)
def value_to_string(self, obj):
value = self.value_from_object(obj)
if value is None:
return None
if value.isempty:
return json.dumps({"empty": True})
base_field = self.base_field
result = {"bounds": value._bounds}
for end in ('lower', 'upper'):
val = getattr(value, end)
if val is None:
result[end] = None
else:
obj = AttributeSetter(base_field.attname, val)
result[end] = base_field.value_to_string(obj)
return json.dumps(result)
def formfield(self, **kwargs):
kwargs.setdefault('form_class', self.form_field)
return super().formfield(**kwargs)
class IntegerRangeField(RangeField):
base_field = models.IntegerField
range_type = NumericRange
form_field = forms.IntegerRangeField
def db_type(self, connection):
return 'int4range'
class BigIntegerRangeField(RangeField):
base_field = models.BigIntegerField
range_type = NumericRange
form_field = forms.IntegerRangeField
def db_type(self, connection):
return 'int8range'
class DecimalRangeField(RangeField):
base_field = models.DecimalField
range_type = NumericRange
form_field = forms.DecimalRangeField
def db_type(self, connection):
return 'numrange'
class FloatRangeField(RangeField):
system_check_deprecated_details = {
'msg': (
'FloatRangeField is deprecated and will be removed in Django 3.1.'
),
'hint': 'Use DecimalRangeField instead.',
'id': 'fields.W902',
}
base_field = models.FloatField
range_type = NumericRange
form_field = forms.FloatRangeField
def db_type(self, connection):
return 'numrange'
class DateTimeRangeField(RangeField):
base_field = models.DateTimeField
range_type = DateTimeTZRange
form_field = forms.DateTimeRangeField
def db_type(self, connection):
return 'tstzrange'
class DateRangeField(RangeField):
base_field = models.DateField
range_type = DateRange
form_field = forms.DateRangeField
def db_type(self, connection):
return 'daterange'
RangeField.register_lookup(lookups.DataContains)
RangeField.register_lookup(lookups.ContainedBy)
RangeField.register_lookup(lookups.Overlap)
class DateTimeRangeContains(lookups.PostgresSimpleLookup):
"""
Lookup for Date/DateTimeRange containment to cast the rhs to the correct
type.
"""
lookup_name = 'contains'
operator = RangeOperators.CONTAINS
def process_rhs(self, compiler, connection):
# Transform rhs value for db lookup.
if isinstance(self.rhs, datetime.date):
output_field = models.DateTimeField() if isinstance(self.rhs, datetime.datetime) else models.DateField()
value = models.Value(self.rhs, output_field=output_field)
self.rhs = value.resolve_expression(compiler.query)
return super().process_rhs(compiler, connection)
def as_sql(self, compiler, connection):
sql, params = super().as_sql(compiler, connection)
# Cast the rhs if needed.
cast_sql = ''
if (
isinstance(self.rhs, models.Expression) and
self.rhs._output_field_or_none and
# Skip cast if rhs has a matching range type.
not isinstance(self.rhs._output_field_or_none, self.lhs.output_field.__class__)
):
cast_internal_type = self.lhs.output_field.base_field.get_internal_type()
cast_sql = '::{}'.format(connection.data_types.get(cast_internal_type))
return '%s%s' % (sql, cast_sql), params
DateRangeField.register_lookup(DateTimeRangeContains)
DateTimeRangeField.register_lookup(DateTimeRangeContains)
class RangeContainedBy(lookups.PostgresSimpleLookup):
lookup_name = 'contained_by'
type_mapping = {
'integer': 'int4range',
'bigint': 'int8range',
'double precision': 'numrange',
'date': 'daterange',
'timestamp with time zone': 'tstzrange',
}
operator = RangeOperators.CONTAINED_BY
def process_rhs(self, compiler, connection):
rhs, rhs_params = super().process_rhs(compiler, connection)
cast_type = self.type_mapping[self.lhs.output_field.db_type(connection)]
return '%s::%s' % (rhs, cast_type), rhs_params
def process_lhs(self, compiler, connection):
lhs, lhs_params = super().process_lhs(compiler, connection)
if isinstance(self.lhs.output_field, models.FloatField):
lhs = '%s::numeric' % lhs
return lhs, lhs_params
def get_prep_lookup(self):
return RangeField().get_prep_value(self.rhs)
models.DateField.register_lookup(RangeContainedBy)
models.DateTimeField.register_lookup(RangeContainedBy)
models.IntegerField.register_lookup(RangeContainedBy)
models.BigIntegerField.register_lookup(RangeContainedBy)
models.FloatField.register_lookup(RangeContainedBy)
@RangeField.register_lookup
class FullyLessThan(lookups.PostgresSimpleLookup):
lookup_name = 'fully_lt'
operator = RangeOperators.FULLY_LT
@RangeField.register_lookup
class FullGreaterThan(lookups.PostgresSimpleLookup):
lookup_name = 'fully_gt'
operator = RangeOperators.FULLY_GT
@RangeField.register_lookup
class NotLessThan(lookups.PostgresSimpleLookup):
lookup_name = 'not_lt'
operator = RangeOperators.NOT_LT
@RangeField.register_lookup
class NotGreaterThan(lookups.PostgresSimpleLookup):
lookup_name = 'not_gt'
operator = RangeOperators.NOT_GT
@RangeField.register_lookup
class AdjacentToLookup(lookups.PostgresSimpleLookup):
lookup_name = 'adjacent_to'
operator = RangeOperators.ADJACENT_TO
@RangeField.register_lookup
class RangeStartsWith(models.Transform):
lookup_name = 'startswith'
function = 'lower'
@property
def output_field(self):
return self.lhs.output_field.base_field
@RangeField.register_lookup
class RangeEndsWith(models.Transform):
lookup_name = 'endswith'
function = 'upper'
@property
def output_field(self):
return self.lhs.output_field.base_field
@RangeField.register_lookup
class IsEmpty(models.Transform):
lookup_name = 'isempty'
function = 'isempty'
output_field = models.BooleanField()
|
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import csv
import json
import os
import shutil
import chrome_cache
import common_util
import emulation
import sandwich_metrics
import sandwich_misc
import sandwich_runner
import task_manager
def NetworkSimulationTransformer(network_condition):
"""Creates a function that accepts a SandwichRunner as a parameter and sets
network emulation options on it.
Args:
network_condition: The network condition to apply to the sandwich runner.
Returns:
A callback transforming the SandwichRunner given in argument accordingly
"""
assert network_condition in emulation.NETWORK_CONDITIONS
def Transformer(runner):
assert isinstance(runner, sandwich_runner.SandwichRunner)
runner.network_condition = network_condition
return Transformer
class SandwichTaskBuilder(task_manager.Builder):
"""A builder for a graph of tasks, each prepares or invokes a SandwichRunner.
"""
def __init__(self, output_directory, android_device, job_path):
"""Constructor.
Args:
output_directory: As in task_manager.Builder.__init__
android_device: The android DeviceUtils to run sandwich on or None to run
it locally.
job_path: Path of the sandwich's job.
"""
task_manager.Builder.__init__(self, output_directory)
self._android_device = android_device
self._job_path = job_path
self._default_final_tasks = []
self._original_wpr_task = None
self._patched_wpr_task = None
self._reference_cache_task = None
self._subresources_for_urls_run_task = None
self._subresources_for_urls_task = None
@property
def default_final_tasks(self):
return self._default_final_tasks
def _CreateSandwichRunner(self):
"""Create a runner for non benchmark purposes."""
runner = sandwich_runner.SandwichRunner()
runner.LoadJob(self._job_path)
runner.android_device = self._android_device
return runner
def OverridePathToWprArchive(self, original_wpr_path):
"""Sets the original WPR archive path's to be used.
Args:
original_wpr_path: Path of the original WPR archive to be used.
"""
self._original_wpr_task = \
self.CreateStaticTask('common/webpages.wpr', original_wpr_path)
def PopulateWprRecordingTask(self):
"""Records the original WPR archive."""
@self.RegisterTask('common/webpages.wpr')
def BuildOriginalWpr():
common_util.EnsureParentDirectoryExists(BuildOriginalWpr.path)
runner = self._CreateSandwichRunner()
runner.wpr_archive_path = BuildOriginalWpr.path
runner.wpr_record = True
runner.Run()
self._original_wpr_task = BuildOriginalWpr
def PopulateCommonPipelines(self):
"""Creates necessary tasks to produce initial cache archive.
Also creates a task for producing a json file with a mapping of URLs to
subresources (urls-resources.json).
Here is the full dependency tree for the returned task:
common/patched-cache-validation.log
depends on: common/patched-cache.zip
depends on: common/original-cache.zip
depends on: common/webpages-patched.wpr
depends on: common/webpages.wpr
depends on: common/urls-resources.json
depends on: common/urls-resources-run/
depends on: common/webpages.wpr
Returns:
The last task of the pipeline.
"""
@self.RegisterTask('common/webpages-patched.wpr', [self._original_wpr_task])
def BuildPatchedWpr():
common_util.EnsureParentDirectoryExists(BuildPatchedWpr.path)
shutil.copyfile(self._original_wpr_task.path, BuildPatchedWpr.path)
sandwich_misc.PatchWpr(BuildPatchedWpr.path)
@self.RegisterTask('common/original-cache.zip', [BuildPatchedWpr])
def BuildOriginalCache():
runner = self._CreateSandwichRunner()
runner.wpr_archive_path = BuildPatchedWpr.path
runner.cache_archive_path = BuildOriginalCache.path
runner.cache_operation = 'save'
runner.trace_output_directory = BuildOriginalCache.run_path
runner.Run()
BuildOriginalCache.run_path = BuildOriginalCache.path[:-4] + '-run'
@self.RegisterTask('common/patched-cache.zip', [BuildOriginalCache])
def BuildPatchedCache():
sandwich_misc.PatchCacheArchive(BuildOriginalCache.path,
os.path.join(BuildOriginalCache.run_path, '0', 'trace.json'),
BuildPatchedCache.path)
@self.RegisterTask('common/subresources-for-urls-run/',
dependencies=[self._original_wpr_task])
def UrlsResourcesRun():
runner = self._CreateSandwichRunner()
runner.wpr_archive_path = self._original_wpr_task.path
runner.cache_operation = 'clear'
runner.trace_output_directory = UrlsResourcesRun.path
runner.Run()
@self.RegisterTask('common/subresources-for-urls.json', [UrlsResourcesRun])
def ListUrlsResources():
json_content = sandwich_misc.ReadSubresourceMapFromBenchmarkOutput(
UrlsResourcesRun.path)
with open(ListUrlsResources.path, 'w') as output:
json.dump(json_content, output)
@self.RegisterTask('common/patched-cache-validation.log',
[BuildPatchedCache, ListUrlsResources])
def ValidatePatchedCache():
json_content = json.load(open(ListUrlsResources.path))
ref_urls = set()
for urls in json_content.values():
ref_urls.update(set(urls))
sandwich_misc.ValidateCacheArchiveContent(
ref_urls, BuildPatchedCache.path)
self._patched_wpr_task = BuildPatchedWpr
self._reference_cache_task = BuildPatchedCache
self._subresources_for_urls_run_task = UrlsResourcesRun
self._subresources_for_urls_task = ListUrlsResources
self._default_final_tasks.append(ValidatePatchedCache)
return ValidatePatchedCache
def PopulateLoadBenchmark(self, subresource_discoverer,
transformer_list_name, transformer_list):
"""Populate benchmarking tasks from its setup tasks.
Args:
subresource_discoverer: Name of a subresources discoverer.
transformer_list_name: A string describing the transformers, will be used
in Task names (prefer names without spaces and special characters).
transformer_list: An ordered list of function that takes an instance of
SandwichRunner as parameter, would be applied immediately before
SandwichRunner.Run() in the given order.
Here is the full dependency of the added tree for the returned task:
<transformer_list_name>/<subresource_discoverer>-metrics.csv
depends on: <transformer_list_name>/<subresource_discoverer>-run/
depends on: common/<subresource_discoverer>-cache.zip
depends on: some tasks saved by PopulateCommonPipelines()
depends on: common/<subresource_discoverer>-setup.json
depends on: some tasks saved by PopulateCommonPipelines()
Returns:
task_manager.Task for
<transformer_list_name>/<subresource_discoverer>-metrics.csv
"""
assert subresource_discoverer in sandwich_misc.SUBRESOURCE_DISCOVERERS
assert 'common' not in sandwich_misc.SUBRESOURCE_DISCOVERERS
shared_task_prefix = os.path.join('common', subresource_discoverer)
task_prefix = os.path.join(transformer_list_name, subresource_discoverer)
@self.RegisterTask(shared_task_prefix + '-setup.json', merge=True,
dependencies=[self._subresources_for_urls_task])
def SetupBenchmark():
trace_path = os.path.join(self._subresources_for_urls_run_task.path, '0',
sandwich_runner.TRACE_FILENAME)
whitelisted_urls = sandwich_misc.ExtractDiscoverableUrls(
trace_path, subresource_discoverer)
urls_resources = json.load(open(self._subresources_for_urls_task.path))
# TODO(gabadie): Implement support for multiple URLs in this Task.
assert len(urls_resources) == 1
url = urls_resources.keys()[0]
url_resources = urls_resources[url]
common_util.EnsureParentDirectoryExists(SetupBenchmark.path)
with open(SetupBenchmark.path, 'w') as output:
json.dump({
'cache_whitelist': [url for url in whitelisted_urls],
'subresource_discoverer': subresource_discoverer,
'url_resources': url_resources,
}, output)
@self.RegisterTask(shared_task_prefix + '-cache.zip', merge=True,
dependencies=[
SetupBenchmark, self._reference_cache_task])
def BuildBenchmarkCacheArchive():
setup = json.load(open(SetupBenchmark.path))
chrome_cache.ApplyUrlWhitelistToCacheArchive(
cache_archive_path=self._reference_cache_task.path,
whitelisted_urls=setup['cache_whitelist'],
output_cache_archive_path=BuildBenchmarkCacheArchive.path)
@self.RegisterTask(task_prefix + '-run/',
dependencies=[BuildBenchmarkCacheArchive])
def RunBenchmark():
runner = self._CreateSandwichRunner()
for transformer in transformer_list:
transformer(runner)
runner.wpr_archive_path = self._patched_wpr_task.path
runner.wpr_out_log_path = os.path.join(RunBenchmark.path, 'wpr.log')
runner.cache_archive_path = BuildBenchmarkCacheArchive.path
runner.cache_operation = 'push'
runner.trace_output_directory = RunBenchmark.path
runner.Run()
@self.RegisterTask(task_prefix + '-metrics.csv',
dependencies=[RunBenchmark])
def ExtractMetrics():
sandwich_misc.VerifyBenchmarkOutputDirectory(
SetupBenchmark.path, RunBenchmark.path)
trace_metrics_list = \
sandwich_metrics.ExtractMetricsFromRunnerOutputDirectory(
SetupBenchmark.path, RunBenchmark.path)
trace_metrics_list.sort(key=lambda e: e['repeat_id'])
with open(ExtractMetrics.path, 'w') as csv_file:
writer = csv.DictWriter(csv_file,
fieldnames=sandwich_metrics.CSV_FIELD_NAMES)
writer.writeheader()
for trace_metrics in trace_metrics_list:
writer.writerow(trace_metrics)
self._default_final_tasks.append(ExtractMetrics)
return ExtractMetrics
|
|
"""The Channel class provides a wrapper for interacting with RabbitMQ
implementing the methods and behaviors for an AMQP Channel.
"""
import collections
import logging
import warnings
import uuid
from servicebus.pika import frame
from servicebus.pika import exceptions
from servicebus.pika import spec
from servicebus.pika.utils import is_callable
from servicebus.pika.compat import unicode_type, dictkeys, as_bytes
LOGGER = logging.getLogger(__name__)
MAX_CHANNELS = 32768
class Channel(object):
"""A Channel is the primary communication method for interacting with
RabbitMQ. It is recommended that you do not directly invoke
the creation of a channel object in your application code but rather
construct the a channel by calling the active connection's channel()
method.
"""
CLOSED = 0
OPENING = 1
OPEN = 2
CLOSING = 3
_ON_CHANNEL_CLEANUP_CB_KEY = '_on_channel_cleanup'
def __init__(self, connection, channel_number, on_open_callback=None):
"""Create a new instance of the Channel
:param pika.connection.Connection connection: The connection
:param int channel_number: The channel number for this instance
:param method on_open_callback: The method to call on channel open
"""
if not isinstance(channel_number, int):
raise exceptions.InvalidChannelNumber
self.channel_number = channel_number
self.callbacks = connection.callbacks
self.connection = connection
# The frame-handler changes depending on the type of frame processed
self.frame_dispatcher = ContentFrameDispatcher()
self._blocked = collections.deque(list())
self._blocking = None
self._has_on_flow_callback = False
self._cancelled = set()
self._consumers = dict()
self._consumers_with_noack = set()
self._on_flowok_callback = None
self._on_getok_callback = None
self._on_openok_callback = on_open_callback
self._pending = dict()
self._state = self.CLOSED
# opaque cookie value set by wrapper layer (e.g., BlockingConnection)
# via _set_cookie
self._cookie = None
def __int__(self):
"""Return the channel object as its channel number
:rtype: int
"""
return self.channel_number
def add_callback(self, callback, replies, one_shot=True):
"""Pass in a callback handler and a list replies from the
RabbitMQ broker which you'd like the callback notified of. Callbacks
should allow for the frame parameter to be passed in.
:param method callback: The method to call
:param list replies: The replies to get a callback for
:param bool one_shot: Only handle the first type callback
"""
for reply in replies:
self.callbacks.add(self.channel_number, reply, callback, one_shot)
def add_on_cancel_callback(self, callback):
"""Pass a callback function that will be called when the basic_cancel
is sent by the server. The callback function should receive a frame
parameter.
:param method callback: The method to call on callback
"""
self.callbacks.add(self.channel_number, spec.Basic.Cancel, callback,
False)
def add_on_close_callback(self, callback):
"""Pass a callback function that will be called when the channel is
closed. The callback function will receive the channel, the
reply_code (int) and the reply_text (int) sent by the server describing
why the channel was closed.
:param method callback: The method to call on callback
"""
self.callbacks.add(self.channel_number, '_on_channel_close', callback,
False, self)
def add_on_flow_callback(self, callback):
"""Pass a callback function that will be called when Channel.Flow is
called by the remote server. Note that newer versions of RabbitMQ
will not issue this but instead use TCP backpressure
:param method callback: The method to call on callback
"""
self._has_on_flow_callback = True
self.callbacks.add(self.channel_number, spec.Channel.Flow, callback,
False)
def add_on_return_callback(self, callback):
"""Pass a callback function that will be called when basic_publish as
sent a message that has been rejected and returned by the server.
:param method callback: The method to call on callback with the
signature callback(channel, method, properties,
body), where
channel: pika.Channel
method: pika.spec.Basic.Return
properties: pika.spec.BasicProperties
body: str, unicode, or bytes (python 3.x)
"""
self.callbacks.add(self.channel_number, '_on_return', callback, False)
def basic_ack(self, delivery_tag=0, multiple=False):
"""Acknowledge one or more messages. When sent by the client, this
method acknowledges one or more messages delivered via the Deliver or
Get-Ok methods. When sent by server, this method acknowledges one or
more messages published with the Publish method on a channel in
confirm mode. The acknowledgement can be for a single message or a
set of messages up to and including a specific message.
:param int delivery-tag: The server-assigned delivery tag
:param bool multiple: If set to True, the delivery tag is treated as
"up to and including", so that multiple messages
can be acknowledged with a single method. If set
to False, the delivery tag refers to a single
message. If the multiple field is 1, and the
delivery tag is zero, this indicates
acknowledgement of all outstanding messages.
"""
if not self.is_open:
raise exceptions.ChannelClosed()
return self._send_method(spec.Basic.Ack(delivery_tag, multiple))
def basic_cancel(self, callback=None, consumer_tag='', nowait=False):
"""This method cancels a consumer. This does not affect already
delivered messages, but it does mean the server will not send any more
messages for that consumer. The client may receive an arbitrary number
of messages in between sending the cancel method and receiving the
cancel-ok reply. It may also be sent from the server to the client in
the event of the consumer being unexpectedly cancelled (i.e. cancelled
for any reason other than the server receiving the corresponding
basic.cancel from the client). This allows clients to be notified of
the loss of consumers due to events such as queue deletion.
:param method callback: Method to call for a Basic.CancelOk response
:param str consumer_tag: Identifier for the consumer
:param bool nowait: Do not expect a Basic.CancelOk response
:raises: ValueError
"""
self._validate_channel_and_callback(callback)
if consumer_tag not in self.consumer_tags:
return
if callback:
if nowait is True:
raise ValueError('Can not pass a callback if nowait is True')
self.callbacks.add(self.channel_number, spec.Basic.CancelOk,
callback)
self._cancelled.add(consumer_tag)
self._rpc(spec.Basic.Cancel(consumer_tag=consumer_tag,
nowait=nowait), self._on_cancelok,
[(spec.Basic.CancelOk, {'consumer_tag': consumer_tag})] if
nowait is False else [])
def basic_consume(self, consumer_callback,
queue='',
no_ack=False,
exclusive=False,
consumer_tag=None,
arguments=None):
"""Sends the AMQP command Basic.Consume to the broker and binds messages
for the consumer_tag to the consumer callback. If you do not pass in
a consumer_tag, one will be automatically generated for you. Returns
the consumer tag.
For more information on basic_consume, see:
http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.consume
:param method consumer_callback: The method to callback when consuming
with the signature consumer_callback(channel, method, properties,
body), where
channel: pika.Channel
method: pika.spec.Basic.Deliver
properties: pika.spec.BasicProperties
body: str, unicode, or bytes (python 3.x)
:param queue: The queue to consume from
:type queue: str or unicode
:param bool no_ack: Tell the broker to not expect a response
:param bool exclusive: Don't allow other consumers on the queue
:param consumer_tag: Specify your own consumer tag
:type consumer_tag: str or unicode
:param dict arguments: Custom key/value pair arguments for the consume
:rtype: str
"""
self._validate_channel_and_callback(consumer_callback)
# If a consumer tag was not passed, create one
if not consumer_tag:
consumer_tag = self._generate_consumer_tag()
if consumer_tag in self._consumers or consumer_tag in self._cancelled:
raise exceptions.DuplicateConsumerTag(consumer_tag)
if no_ack:
self._consumers_with_noack.add(consumer_tag)
self._consumers[consumer_tag] = consumer_callback
self._pending[consumer_tag] = list()
self._rpc(spec.Basic.Consume(queue=queue,
consumer_tag=consumer_tag,
no_ack=no_ack,
exclusive=exclusive,
arguments=arguments or dict()),
self._on_eventok, [(spec.Basic.ConsumeOk,
{'consumer_tag': consumer_tag})])
return consumer_tag
def _generate_consumer_tag(self):
"""Generate a consumer tag
NOTE: this protected method may be called by derived classes
:returns: consumer tag
:rtype: str
"""
return 'ctag%i.%s' % (self.channel_number,
uuid.uuid4().hex)
def basic_get(self, callback=None, queue='', no_ack=False):
"""Get a single message from the AMQP broker. If you want to
be notified of Basic.GetEmpty, use the Channel.add_callback method
adding your Basic.GetEmpty callback which should expect only one
parameter, frame. For more information on basic_get and its
parameters, see:
http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.get
:param method callback: The method to callback with a message that has
the signature callback(channel, method, properties, body), where:
channel: pika.Channel
method: pika.spec.Basic.GetOk
properties: pika.spec.BasicProperties
body: str, unicode, or bytes (python 3.x)
:param queue: The queue to get a message from
:type queue: str or unicode
:param bool no_ack: Tell the broker to not expect a reply
"""
self._validate_channel_and_callback(callback)
self._on_getok_callback = callback
self._send_method(spec.Basic.Get(queue=queue, no_ack=no_ack))
def basic_nack(self, delivery_tag=None, multiple=False, requeue=True):
"""This method allows a client to reject one or more incoming messages.
It can be used to interrupt and cancel large incoming messages, or
return untreatable messages to their original queue.
:param int delivery-tag: The server-assigned delivery tag
:param bool multiple: If set to True, the delivery tag is treated as
"up to and including", so that multiple messages
can be acknowledged with a single method. If set
to False, the delivery tag refers to a single
message. If the multiple field is 1, and the
delivery tag is zero, this indicates
acknowledgement of all outstanding messages.
:param bool requeue: If requeue is true, the server will attempt to
requeue the message. If requeue is false or the
requeue attempt fails the messages are discarded or
dead-lettered.
"""
if not self.is_open:
raise exceptions.ChannelClosed()
return self._send_method(spec.Basic.Nack(delivery_tag, multiple,
requeue))
def basic_publish(self, exchange, routing_key, body,
properties=None,
mandatory=False,
immediate=False):
"""Publish to the channel with the given exchange, routing key and body.
For more information on basic_publish and what the parameters do, see:
http://www.rabbitmq.com/amqp-0-9-1-reference.html#basic.publish
:param exchange: The exchange to publish to
:type exchange: str or unicode
:param routing_key: The routing key to bind on
:type routing_key: str or unicode
:param body: The message body
:type body: str or unicode
:param pika.spec.BasicProperties properties: Basic.properties
:param bool mandatory: The mandatory flag
:param bool immediate: The immediate flag
"""
if not self.is_open:
raise exceptions.ChannelClosed()
if immediate:
LOGGER.warning('The immediate flag is deprecated in RabbitMQ')
if isinstance(body, unicode_type):
body = body.encode('utf-8')
properties = properties or spec.BasicProperties()
self._send_method(spec.Basic.Publish(exchange=exchange,
routing_key=routing_key,
mandatory=mandatory,
immediate=immediate),
(properties, body))
def basic_qos(self,
callback=None,
prefetch_size=0,
prefetch_count=0,
all_channels=False):
"""Specify quality of service. This method requests a specific quality
of service. The QoS can be specified for the current channel or for all
channels on the connection. The client can request that messages be sent
in advance so that when the client finishes processing a message, the
following message is already held locally, rather than needing to be
sent down the channel. Prefetching gives a performance improvement.
:param method callback: The method to callback for Basic.QosOk response
:param int prefetch_size: This field specifies the prefetch window
size. The server will send a message in
advance if it is equal to or smaller in size
than the available prefetch size (and also
falls into other prefetch limits). May be set
to zero, meaning "no specific limit",
although other prefetch limits may still
apply. The prefetch-size is ignored if the
no-ack option is set.
:param int prefetch_count: Specifies a prefetch window in terms of whole
messages. This field may be used in
combination with the prefetch-size field; a
message will only be sent in advance if both
prefetch windows (and those at the channel
and connection level) allow it. The
prefetch-count is ignored if the no-ack
option is set.
:param bool all_channels: Should the QoS apply to all channels
"""
self._validate_channel_and_callback(callback)
return self._rpc(spec.Basic.Qos(prefetch_size, prefetch_count,
all_channels), callback,
[spec.Basic.QosOk])
def basic_reject(self, delivery_tag, requeue=True):
"""Reject an incoming message. This method allows a client to reject a
message. It can be used to interrupt and cancel large incoming messages,
or return untreatable messages to their original queue.
:param int delivery-tag: The server-assigned delivery tag
:param bool requeue: If requeue is true, the server will attempt to
requeue the message. If requeue is false or the
requeue attempt fails the messages are discarded or
dead-lettered.
:raises: TypeError
"""
if not self.is_open:
raise exceptions.ChannelClosed()
if not isinstance(delivery_tag, int):
raise TypeError('delivery_tag must be an integer')
return self._send_method(spec.Basic.Reject(delivery_tag, requeue))
def basic_recover(self, callback=None, requeue=False):
"""This method asks the server to redeliver all unacknowledged messages
on a specified channel. Zero or more messages may be redelivered. This
method replaces the asynchronous Recover.
:param method callback: Method to call when receiving Basic.RecoverOk
:param bool requeue: If False, the message will be redelivered to the
original recipient. If True, the server will
attempt to requeue the message, potentially then
delivering it to an alternative subscriber.
"""
self._validate_channel_and_callback(callback)
return self._rpc(spec.Basic.Recover(requeue), callback,
[spec.Basic.RecoverOk])
def close(self, reply_code=0, reply_text="Normal Shutdown"):
"""Will invoke a clean shutdown of the channel with the AMQP Broker.
:param int reply_code: The reply code to close the channel with
:param str reply_text: The reply text to close the channel with
"""
if not self.is_open:
raise exceptions.ChannelClosed()
LOGGER.debug('Channel.close(%s, %s)', reply_code, reply_text)
if self._consumers:
LOGGER.debug('Cancelling %i consumers', len(self._consumers))
for consumer_tag in dictkeys(self._consumers):
self.basic_cancel(consumer_tag=consumer_tag)
self._set_state(self.CLOSING)
self._rpc(spec.Channel.Close(reply_code, reply_text, 0, 0),
self._on_closeok, [spec.Channel.CloseOk])
def confirm_delivery(self, callback=None, nowait=False):
"""Turn on Confirm mode in the channel. Pass in a callback to be
notified by the Broker when a message has been confirmed as received or
rejected (Basic.Ack, Basic.Nack) from the broker to the publisher.
For more information see:
http://www.rabbitmq.com/extensions.html#confirms
:param method callback: The callback for delivery confirmations
:param bool nowait: Do not send a reply frame (Confirm.SelectOk)
"""
self._validate_channel_and_callback(callback)
if (self.connection.publisher_confirms is False or self.connection.basic_nack is False):
raise exceptions.MethodNotImplemented('Not Supported on Server')
# Add the ack and nack callbacks
if callback is not None:
self.callbacks.add(self.channel_number, spec.Basic.Ack, callback,
False)
self.callbacks.add(self.channel_number, spec.Basic.Nack, callback,
False)
# Send the RPC command
self._rpc(spec.Confirm.Select(nowait), self._on_selectok,
[spec.Confirm.SelectOk] if nowait is False else [])
@property
def consumer_tags(self):
"""Property method that returns a list of currently active consumers
:rtype: list
"""
return dictkeys(self._consumers)
def exchange_bind(self,
callback=None,
destination=None,
source=None,
routing_key='',
nowait=False,
arguments=None):
"""Bind an exchange to another exchange.
:param method callback: The method to call on Exchange.BindOk
:param destination: The destination exchange to bind
:type destination: str or unicode
:param source: The source exchange to bind to
:type source: str or unicode
:param routing_key: The routing key to bind on
:type routing_key: str or unicode
:param bool nowait: Do not wait for an Exchange.BindOk
:param dict arguments: Custom key/value pair arguments for the binding
"""
self._validate_channel_and_callback(callback)
return self._rpc(spec.Exchange.Bind(0, destination, source, routing_key,
nowait, arguments or dict()),
callback, [spec.Exchange.BindOk] if nowait is False
else [])
def exchange_declare(self,
callback=None,
exchange=None,
exchange_type='direct',
passive=False,
durable=False,
auto_delete=False,
internal=False,
nowait=False,
arguments=None,
type=None):
"""This method creates an exchange if it does not already exist, and if
the exchange exists, verifies that it is of the correct and expected
class.
If passive set, the server will reply with Declare-Ok if the exchange
already exists with the same name, and raise an error if not and if the
exchange does not already exist, the server MUST raise a channel
exception with reply code 404 (not found).
:param method callback: Call this method on Exchange.DeclareOk
:param exchange: The exchange name consists of a non-empty
:type exchange: str or unicode
sequence of these characters: letters,
digits, hyphen, underscore, period, or
colon.
:param str exchange_type: The exchange type to use
:param bool passive: Perform a declare or just check to see if it exists
:param bool durable: Survive a reboot of RabbitMQ
:param bool auto_delete: Remove when no more queues are bound to it
:param bool internal: Can only be published to by other exchanges
:param bool nowait: Do not expect an Exchange.DeclareOk response
:param dict arguments: Custom key/value pair arguments for the exchange
:param str type: The deprecated exchange type parameter
"""
self._validate_channel_and_callback(callback)
if type is not None:
warnings.warn('type is deprecated, use exchange_type instead',
DeprecationWarning)
if exchange_type == 'direct' and type != exchange_type:
exchange_type = type
return self._rpc(spec.Exchange.Declare(0, exchange, exchange_type,
passive, durable, auto_delete,
internal, nowait,
arguments or dict()), callback,
[spec.Exchange.DeclareOk] if nowait is False else [])
def exchange_delete(self,
callback=None,
exchange=None,
if_unused=False,
nowait=False):
"""Delete the exchange.
:param method callback: The method to call on Exchange.DeleteOk
:param exchange: The exchange name
:type exchange: str or unicode
:param bool if_unused: only delete if the exchange is unused
:param bool nowait: Do not wait for an Exchange.DeleteOk
"""
self._validate_channel_and_callback(callback)
return self._rpc(spec.Exchange.Delete(0, exchange, if_unused, nowait),
callback, [spec.Exchange.DeleteOk] if nowait is False
else [])
def exchange_unbind(self,
callback=None,
destination=None,
source=None,
routing_key='',
nowait=False,
arguments=None):
"""Unbind an exchange from another exchange.
:param method callback: The method to call on Exchange.UnbindOk
:param destination: The destination exchange to unbind
:type destination: str or unicode
:param source: The source exchange to unbind from
:type source: str or unicode
:param routing_key: The routing key to unbind
:type routing_key: str or unicode
:param bool nowait: Do not wait for an Exchange.UnbindOk
:param dict arguments: Custom key/value pair arguments for the binding
"""
self._validate_channel_and_callback(callback)
return self._rpc(spec.Exchange.Unbind(0, destination, source,
routing_key, nowait, arguments),
callback, [spec.Exchange.UnbindOk] if nowait is False
else [])
def flow(self, callback, active):
"""Turn Channel flow control off and on. Pass a callback to be notified
of the response from the server. active is a bool. Callback should
expect a bool in response indicating channel flow state. For more
information, please reference:
http://www.rabbitmq.com/amqp-0-9-1-reference.html#channel.flow
:param method callback: The callback method
:param bool active: Turn flow on or off
"""
self._validate_channel_and_callback(callback)
self._on_flowok_callback = callback
self._rpc(spec.Channel.Flow(active), self._on_flowok,
[spec.Channel.FlowOk])
@property
def is_closed(self):
"""Returns True if the channel is closed.
:rtype: bool
"""
return self._state == self.CLOSED
@property
def is_closing(self):
"""Returns True if the channel is closing.
:rtype: bool
"""
return self._state == self.CLOSING
@property
def is_open(self):
"""Returns True if the channel is open.
:rtype: bool
"""
return self._state == self.OPEN
def open(self):
"""Open the channel"""
self._set_state(self.OPENING)
self._add_callbacks()
self._rpc(spec.Channel.Open(), self._on_openok, [spec.Channel.OpenOk])
def queue_bind(self, callback, queue, exchange,
routing_key=None,
nowait=False,
arguments=None):
"""Bind the queue to the specified exchange
:param method callback: The method to call on Queue.BindOk
:param queue: The queue to bind to the exchange
:type queue: str or unicode
:param exchange: The source exchange to bind to
:type exchange: str or unicode
:param routing_key: The routing key to bind on
:type routing_key: str or unicode
:param bool nowait: Do not wait for a Queue.BindOk
:param dict arguments: Custom key/value pair arguments for the binding
"""
self._validate_channel_and_callback(callback)
replies = [spec.Queue.BindOk] if nowait is False else []
if routing_key is None:
routing_key = queue
return self._rpc(spec.Queue.Bind(0, queue, exchange, routing_key,
nowait, arguments or dict()), callback,
replies)
def queue_declare(self, callback,
queue='',
passive=False,
durable=False,
exclusive=False,
auto_delete=False,
nowait=False,
arguments=None):
"""Declare queue, create if needed. This method creates or checks a
queue. When creating a new queue the client can specify various
properties that control the durability of the queue and its contents,
and the level of sharing for the queue.
Leave the queue name empty for a auto-named queue in RabbitMQ
:param method callback: The method to call on Queue.DeclareOk
:param queue: The queue name
:type queue: str or unicode
:param bool passive: Only check to see if the queue exists
:param bool durable: Survive reboots of the broker
:param bool exclusive: Only allow access by the current connection
:param bool auto_delete: Delete after consumer cancels or disconnects
:param bool nowait: Do not wait for a Queue.DeclareOk
:param dict arguments: Custom key/value arguments for the queue
"""
if queue:
condition = (spec.Queue.DeclareOk,
{'queue': queue})
else:
condition = spec.Queue.DeclareOk
replies = [condition] if nowait is False else []
self._validate_channel_and_callback(callback)
return self._rpc(spec.Queue.Declare(0, queue, passive, durable,
exclusive, auto_delete, nowait,
arguments or dict()), callback,
replies)
def queue_delete(self,
callback=None,
queue='',
if_unused=False,
if_empty=False,
nowait=False):
"""Delete a queue from the broker.
:param method callback: The method to call on Queue.DeleteOk
:param queue: The queue to delete
:type queue: str or unicode
:param bool if_unused: only delete if it's unused
:param bool if_empty: only delete if the queue is empty
:param bool nowait: Do not wait for a Queue.DeleteOk
"""
replies = [spec.Queue.DeleteOk] if nowait is False else []
self._validate_channel_and_callback(callback)
return self._rpc(spec.Queue.Delete(0, queue, if_unused, if_empty,
nowait), callback, replies)
def queue_purge(self, callback=None, queue='', nowait=False):
"""Purge all of the messages from the specified queue
:param method callback: The method to call on Queue.PurgeOk
:param queue: The queue to purge
:type queue: str or unicode
:param bool nowait: Do not expect a Queue.PurgeOk response
"""
replies = [spec.Queue.PurgeOk] if nowait is False else []
self._validate_channel_and_callback(callback)
return self._rpc(spec.Queue.Purge(0, queue, nowait), callback, replies)
def queue_unbind(self,
callback=None,
queue='',
exchange=None,
routing_key=None,
arguments=None):
"""Unbind a queue from an exchange.
:param method callback: The method to call on Queue.UnbindOk
:param queue: The queue to unbind from the exchange
:type queue: str or unicode
:param exchange: The source exchange to bind from
:type exchange: str or unicode
:param routing_key: The routing key to unbind
:type routing_key: str or unicode
:param dict arguments: Custom key/value pair arguments for the binding
"""
self._validate_channel_and_callback(callback)
if routing_key is None:
routing_key = queue
return self._rpc(spec.Queue.Unbind(0, queue, exchange, routing_key,
arguments or dict()), callback,
[spec.Queue.UnbindOk])
def tx_commit(self, callback=None):
"""Commit a transaction
:param method callback: The callback for delivery confirmations
"""
self._validate_channel_and_callback(callback)
return self._rpc(spec.Tx.Commit(), callback, [spec.Tx.CommitOk])
def tx_rollback(self, callback=None):
"""Rollback a transaction.
:param method callback: The callback for delivery confirmations
"""
self._validate_channel_and_callback(callback)
return self._rpc(spec.Tx.Rollback(), callback, [spec.Tx.RollbackOk])
def tx_select(self, callback=None):
"""Select standard transaction mode. This method sets the channel to use
standard transactions. The client must use this method at least once on
a channel before using the Commit or Rollback methods.
:param method callback: The callback for delivery confirmations
"""
self._validate_channel_and_callback(callback)
return self._rpc(spec.Tx.Select(), callback, [spec.Tx.SelectOk])
# Internal methods
def _add_callbacks(self):
"""Callbacks that add the required behavior for a channel when
connecting and connected to a server.
"""
# Add a callback for Basic.GetEmpty
self.callbacks.add(self.channel_number, spec.Basic.GetEmpty,
self._on_getempty, False)
# Add a callback for Basic.Cancel
self.callbacks.add(self.channel_number, spec.Basic.Cancel,
self._on_cancel, False)
# Deprecated in newer versions of RabbitMQ but still register for it
self.callbacks.add(self.channel_number, spec.Channel.Flow,
self._on_flow, False)
# Add a callback for when the server closes our channel
self.callbacks.add(self.channel_number, spec.Channel.Close,
self._on_close, True)
def _add_on_cleanup_callback(self, callback):
"""For internal use only (e.g., Connection needs to remove closed
channels from its channel container). Pass a callback function that will
be called when the channel is being cleaned up after all channel-close
callbacks callbacks.
:param method callback: The method to call on callback with the
signature: callback(channel)
"""
self.callbacks.add(self.channel_number, self._ON_CHANNEL_CLEANUP_CB_KEY,
callback, one_shot=True, only_caller=self)
def _add_pending_msg(self, consumer_tag, method_frame, header_frame, body):
"""Add the received message to the pending message stack.
:param str consumer_tag: The consumer tag for the message
:param pika.frame.Method method_frame: The received method frame
:param pika.frame.Header header_frame: The received header frame
:param body: The message body
:type body: str or unicode
"""
self._pending[consumer_tag].append((self, method_frame.method,
header_frame.properties, body))
def _cleanup(self):
"""Remove all consumers and any callbacks for the channel."""
self.callbacks.process(self.channel_number,
self._ON_CHANNEL_CLEANUP_CB_KEY, self,
self)
self._consumers = dict()
self.callbacks.cleanup(str(self.channel_number))
self._cookie = None
def _cleanup_consumer_ref(self, consumer_tag):
"""Remove any references to the consumer tag in internal structures
for consumer state.
:param str consumer_tag: The consumer tag to cleanup
"""
if consumer_tag in self._consumers_with_noack:
self._consumers_with_noack.remove(consumer_tag)
if consumer_tag in self._consumers:
del self._consumers[consumer_tag]
if consumer_tag in self._pending:
del self._pending[consumer_tag]
self._cancelled.discard(consumer_tag)
def _get_cookie(self):
"""Used by the wrapper implementation (e.g., `BlockingChannel`) to
retrieve the cookie that it set via `_set_cookie`
:returns: opaque cookie value that was set via `_set_cookie`
"""
return self._cookie
def _get_pending_msg(self, consumer_tag):
"""Get a pending message for the consumer tag from the stack.
:param str consumer_tag: The consumer tag to get a message from
:rtype: tuple(pika.frame.Header, pika.frame.Method, str|unicode)
"""
return self._pending[consumer_tag].pop(0)
def _handle_content_frame(self, frame_value):
"""This is invoked by the connection when frames that are not registered
with the CallbackManager have been found. This should only be the case
when the frames are related to content delivery.
The frame_dispatcher will be invoked which will return the fully formed
message in three parts when all of the body frames have been received.
:param pika.amqp_object.Frame frame_value: The frame to deliver
"""
try:
response = self.frame_dispatcher.process(frame_value)
except exceptions.UnexpectedFrameError:
return self._unexpected_frame(frame_value)
if response:
if isinstance(response[0].method, spec.Basic.Deliver):
self._on_deliver(*response)
elif isinstance(response[0].method, spec.Basic.GetOk):
self._on_getok(*response)
elif isinstance(response[0].method, spec.Basic.Return):
self._on_return(*response)
def _has_content(self, method_frame):
"""Return a bool if it's a content method as defined by the spec
:param pika.amqp_object.Method method_frame: The method frame received
"""
return spec.has_content(method_frame.INDEX)
def _on_cancel(self, method_frame):
"""When the broker cancels a consumer, delete it from our internal
dictionary.
:param pika.frame.Method method_frame: The method frame received
"""
if method_frame.method.consumer_tag in self._cancelled:
# User-initiated cancel is waiting for Cancel-ok
return
self._cleanup_consumer_ref(method_frame.method.consumer_tag)
def _on_cancelok(self, method_frame):
"""Called in response to a frame from the Broker when the
client sends Basic.Cancel
:param pika.frame.Method method_frame: The method frame received
"""
self._cleanup_consumer_ref(method_frame.method.consumer_tag)
def _on_close(self, method_frame):
"""Handle the case where our channel has been closed for us
:param pika.frame.Method method_frame: The close frame
"""
LOGGER.debug('%s', method_frame)
LOGGER.warning('Received remote Channel.Close (%s): %s',
method_frame.method.reply_code,
method_frame.method.reply_text)
if self.connection.is_open:
self._send_method(spec.Channel.CloseOk())
self._set_state(self.CLOSED)
self.callbacks.process(self.channel_number, '_on_channel_close', self,
self, method_frame.method.reply_code,
method_frame.method.reply_text)
self._cleanup()
def _on_closeok(self, method_frame):
"""Invoked when RabbitMQ replies to a Channel.Close method
:param pika.frame.Method method_frame: The CloseOk frame
"""
self._set_state(self.CLOSED)
self.callbacks.process(self.channel_number, '_on_channel_close', self,
self, 0, '')
self._cleanup()
def _on_deliver(self, method_frame, header_frame, body):
"""Cope with reentrancy. If a particular consumer is still active when
another delivery appears for it, queue the deliveries up until it
finally exits.
:param pika.frame.Method method_frame: The method frame received
:param pika.frame.Header header_frame: The header frame received
:param body: The body received
:type body: str or unicode
"""
consumer_tag = method_frame.method.consumer_tag
if consumer_tag in self._cancelled:
if self.is_open and consumer_tag not in self._consumers_with_noack:
self.basic_reject(method_frame.method.delivery_tag)
return
if consumer_tag not in self._consumers:
return self._add_pending_msg(consumer_tag, method_frame,
header_frame, body)
while self._pending[consumer_tag]:
self._consumers[consumer_tag](*self._get_pending_msg(consumer_tag))
self._consumers[consumer_tag](self, method_frame.method,
header_frame.properties, body)
def _on_eventok(self, method_frame):
"""Generic events that returned ok that may have internal callbacks.
We keep a list of what we've yet to implement so that we don't silently
drain events that we don't support.
:param pika.frame.Method method_frame: The method frame received
"""
LOGGER.debug('Discarding frame %r', method_frame)
def _on_flow(self, method_frame_unused):
"""Called if the server sends a Channel.Flow frame.
:param pika.frame.Method method_frame_unused: The Channel.Flow frame
"""
if self._has_on_flow_callback is False:
LOGGER.warning('Channel.Flow received from server')
def _on_flowok(self, method_frame):
"""Called in response to us asking the server to toggle on Channel.Flow
:param pika.frame.Method method_frame: The method frame received
"""
self.flow_active = method_frame.method.active
if self._on_flowok_callback:
self._on_flowok_callback(method_frame.method.active)
self._on_flowok_callback = None
else:
LOGGER.warning('Channel.FlowOk received with no active callbacks')
def _on_getempty(self, method_frame):
"""When we receive an empty reply do nothing but log it
:param pika.frame.Method method_frame: The method frame received
"""
LOGGER.debug('Received Basic.GetEmpty: %r', method_frame)
def _on_getok(self, method_frame, header_frame, body):
"""Called in reply to a Basic.Get when there is a message.
:param pika.frame.Method method_frame: The method frame received
:param pika.frame.Header header_frame: The header frame received
:param body: The body received
:type body: str or unicode
"""
if self._on_getok_callback is not None:
callback = self._on_getok_callback
self._on_getok_callback = None
callback(self, method_frame.method, header_frame.properties, body)
else:
LOGGER.error('Basic.GetOk received with no active callback')
def _on_openok(self, frame_unused):
"""Called by our callback handler when we receive a Channel.OpenOk and
subsequently calls our _on_openok_callback which was passed into the
Channel constructor. The reason we do this is because we want to make
sure that the on_open_callback parameter passed into the Channel
constructor is not the first callback we make.
:param pika.frame.Method frame_unused: Unused Channel.OpenOk frame
"""
self._set_state(self.OPEN)
if self._on_openok_callback is not None:
self._on_openok_callback(self)
def _on_return(self, method_frame, header_frame, body):
"""Called if the server sends a Basic.Return frame.
:param pika.frame.Method method_frame: The Basic.Return frame
:param pika.frame.Header header_frame: The content header frame
:param body: The message body
:type body: str or unicode
"""
if not self.callbacks.process(self.channel_number, '_on_return', self,
self,
method_frame.method,
header_frame.properties,
body):
LOGGER.warning('Basic.Return received from server (%r, %r)',
method_frame.method, header_frame.properties)
def _on_selectok(self, method_frame):
"""Called when the broker sends a Confirm.SelectOk frame
:param pika.frame.Method method_frame: The method frame received
"""
LOGGER.debug("Confirm.SelectOk Received: %r", method_frame)
def _on_synchronous_complete(self, method_frame_unused):
"""This is called when a synchronous command is completed. It will undo
the blocking state and send all the frames that stacked up while we
were in the blocking state.
:param pika.frame.Method method_frame_unused: The method frame received
"""
LOGGER.debug('%i blocked frames', len(self._blocked))
self._blocking = None
while len(self._blocked) > 0 and self._blocking is None:
self._rpc(*self._blocked.popleft())
def _rpc(self, method_frame, callback=None, acceptable_replies=None):
"""Shortcut wrapper to the Connection's rpc command using its callback
stack, passing in our channel number.
:param pika.amqp_object.Method method_frame: The method frame to call
:param method callback: The callback for the RPC response
:param list acceptable_replies: The replies this RPC call expects
"""
# Make sure the channel is open
if self.is_closed:
raise exceptions.ChannelClosed
# If the channel is blocking, add subsequent commands to our stack
if self._blocking:
return self._blocked.append([method_frame, callback,
acceptable_replies])
# Validate we got None or a list of acceptable_replies
if acceptable_replies and not isinstance(acceptable_replies, list):
raise TypeError("acceptable_replies should be list or None")
# Validate the callback is callable
if callback and not is_callable(callback):
raise TypeError("callback should be None, a function or method.")
# Block until a response frame is received for synchronous frames
if method_frame.synchronous:
self._blocking = method_frame.NAME
# If acceptable replies are set, add callbacks
if acceptable_replies:
for reply in acceptable_replies or list():
if isinstance(reply, tuple):
reply, arguments = reply
else:
arguments = None
LOGGER.debug('Adding in on_synchronous_complete callback')
self.callbacks.add(self.channel_number, reply,
self._on_synchronous_complete,
arguments=arguments)
if callback:
LOGGER.debug('Adding passed in callback')
self.callbacks.add(self.channel_number, reply, callback,
arguments=arguments)
self._send_method(method_frame)
def _send_method(self, method_frame, content=None):
"""Shortcut wrapper to send a method through our connection, passing in
the channel number
:param pika.object.Method method_frame: The method frame to send
:param tuple content: If set, is a content frame, is tuple of
properties and body.
"""
self.connection._send_method(self.channel_number, method_frame, content)
def _set_cookie(self, cookie):
"""Used by wrapper layer (e.g., `BlockingConnection`) to link the
channel implementation back to the proxy. See `_get_cookie`.
:param cookie: an opaque value; typically a proxy channel implementation
instance (e.g., `BlockingChannel` instance)
"""
self._cookie = cookie
def _set_state(self, connection_state):
"""Set the channel connection state to the specified state value.
:param int connection_state: The connection_state value
"""
self._state = connection_state
def _unexpected_frame(self, frame_value):
"""Invoked when a frame is received that is not setup to be processed.
:param pika.frame.Frame frame_value: The frame received
"""
LOGGER.warning('Unexpected frame: %r', frame_value)
def _validate_channel_and_callback(self, callback):
if not self.is_open:
raise exceptions.ChannelClosed()
if callback is not None and not is_callable(callback):
raise ValueError('callback must be a function or method')
class ContentFrameDispatcher(object):
"""Handle content related frames, building a message and return the message
back in three parts upon receipt.
"""
def __init__(self):
"""Create a new instance of the Dispatcher passing in the callback
manager.
"""
self._method_frame = None
self._header_frame = None
self._seen_so_far = 0
self._body_fragments = list()
def process(self, frame_value):
"""Invoked by the Channel object when passed frames that are not
setup in the rpc process and that don't have explicit reply types
defined. This includes Basic.Publish, Basic.GetOk and Basic.Return
:param Method|Header|Body frame_value: The frame to process
"""
if (isinstance(frame_value, frame.Method) and spec.has_content(frame_value.method.INDEX)):
self._method_frame = frame_value
elif isinstance(frame_value, frame.Header):
self._header_frame = frame_value
if frame_value.body_size == 0:
return self._finish()
elif isinstance(frame_value, frame.Body):
return self._handle_body_frame(frame_value)
else:
raise exceptions.UnexpectedFrameError(frame_value)
def _finish(self):
"""Invoked when all of the message has been received
:rtype: tuple(pika.frame.Method, pika.frame.Header, str)
"""
content = (self._method_frame, self._header_frame,
b''.join(self._body_fragments))
self._reset()
return content
def _handle_body_frame(self, body_frame):
"""Receive body frames and append them to the stack. When the body size
matches, call the finish method.
:param Body body_frame: The body frame
:raises: pika.exceptions.BodyTooLongError
:rtype: tuple(pika.frame.Method, pika.frame.Header, str)|None
"""
self._seen_so_far += len(body_frame.fragment)
self._body_fragments.append(body_frame.fragment)
if self._seen_so_far == self._header_frame.body_size:
return self._finish()
elif self._seen_so_far > self._header_frame.body_size:
raise exceptions.BodyTooLongError(self._seen_so_far,
self._header_frame.body_size)
return None
def _reset(self):
"""Reset the values for processing frames"""
self._method_frame = None
self._header_frame = None
self._seen_so_far = 0
self._body_fragments = list()
|
|
# Copyright 2017-present Kensho Technologies, LLC.
"""Functions that ensure the IR generated by the front-end satisfies all invariants."""
from typing import Dict, List
from funcy import pairwise
from .blocks import (
Backtrack,
CoerceType,
ConstructResult,
Filter,
Fold,
MarkLocation,
OutputSource,
QueryRoot,
Recurse,
Traverse,
Unfold,
)
from .compiler_entities import BasicBlock
from .helpers import FoldScopeLocation
from .ir_lowering_common.common import extract_folds_from_ir_blocks
from .metadata import QueryMetadataTable
def self_consistency_check_ir_blocks_from_frontend(
ir_blocks: List[BasicBlock], query_metadata_table: QueryMetadataTable
) -> None:
"""Assert that IR blocks originating from the frontend do not have nonsensical structure.
Args:
ir_blocks: list of BasicBlocks representing the IR to self-consistency check.
query_metadata_table: QueryMetadataTable object that captures information about the query.
Raises:
AssertionError, if the IR has unexpected structure. If the IR produced by the front-end
cannot be successfully and correctly used to generate MATCH or Gremlin due to a bug,
this is the method that should catch the problem.
"""
if not ir_blocks:
raise AssertionError("Received no ir_blocks: {}".format(ir_blocks))
_assert_fold_scope_locations_are_unique(ir_blocks)
_assert_no_nested_folds(ir_blocks)
_assert_query_root_block(ir_blocks)
_assert_output_source_follower_blocks(ir_blocks)
_assert_block_pairwise_constraints(ir_blocks)
_assert_mark_location_preceding_optional_traverse(ir_blocks)
_assert_every_location_is_marked(ir_blocks)
_assert_coerce_type_outside_of_fold(ir_blocks)
_assert_all_marked_locations_are_registered(ir_blocks, query_metadata_table)
_assert_registered_locations_parent_locations(query_metadata_table)
def _assert_registered_locations_parent_locations(
query_metadata_table: QueryMetadataTable,
) -> None:
"""Assert that all registered locations' parent locations are also registered."""
for location, location_info in query_metadata_table.registered_locations:
if (
location != query_metadata_table.root_location
and not query_metadata_table.root_location.is_revisited_at(location)
):
# If the location is not the root location and is not a revisit of the root,
# then it must have a parent location.
if location_info.parent_location is None:
raise AssertionError(
"Found a location that is not the root location of the query "
"or a revisit of the root, but does not have a parent: "
"{} {}".format(location, location_info)
)
if location_info.parent_location is not None:
# Make sure the parent_location is also registered.
# If the location is not registered, the following line will raise an error.
query_metadata_table.get_location_info(location_info.parent_location)
def _assert_all_marked_locations_are_registered(
ir_blocks: List[BasicBlock], query_metadata_table: QueryMetadataTable
) -> None:
"""Assert that all locations in MarkLocation blocks have registered and valid metadata."""
# Grab all the registered locations, then make sure that:
# - Any location that appears in a MarkLocation block is also registered.
# - There are no registered locations that do not appear in a MarkLocation block.
registered_locations = {location for location, _ in query_metadata_table.registered_locations}
ir_encountered_locations = {
block.location for block in ir_blocks if isinstance(block, MarkLocation)
}
unregistered_locations = ir_encountered_locations - registered_locations
unencountered_locations = registered_locations - ir_encountered_locations
if unregistered_locations:
raise AssertionError(
"IR blocks unexpectedly contain locations not registered in the "
"QueryMetadataTable: {}".format(unregistered_locations)
)
if unencountered_locations:
raise AssertionError(
"QueryMetadataTable unexpectedly contains registered locations that "
"never appear in the IR blocks: {}".format(unencountered_locations)
)
def _assert_fold_scope_locations_are_unique(ir_blocks: List[BasicBlock]) -> None:
"""Assert that every FoldScopeLocation that exists on a Fold block is unique."""
observed_locations: Dict[FoldScopeLocation, Fold] = dict()
for block in ir_blocks:
if isinstance(block, Fold):
alternate = observed_locations.get(block.fold_scope_location, None)
if alternate is not None:
raise AssertionError(
"Found two Fold blocks with identical FoldScopeLocations: "
"{} {} {}".format(alternate, block, ir_blocks)
)
observed_locations[block.fold_scope_location] = block
def _assert_no_nested_folds(ir_blocks: List[BasicBlock]) -> None:
"""Assert that there are no nested Fold contexts, and that every Fold has a matching Unfold."""
fold_seen = False
for block in ir_blocks:
if isinstance(block, Fold):
if fold_seen:
raise AssertionError("Found a nested Fold contexts: {}".format(ir_blocks))
else:
fold_seen = True
elif isinstance(block, Unfold):
if not fold_seen:
raise AssertionError(
"Found an Unfold block without a matching Fold: {}".format(ir_blocks)
)
else:
fold_seen = False
def _assert_query_root_block(ir_blocks: List[BasicBlock]) -> None:
"""Assert that QueryRoot is always the first block, and only the first block."""
if not isinstance(ir_blocks[0], QueryRoot):
raise AssertionError("The first block was not QueryRoot: {}".format(ir_blocks))
for block in ir_blocks[1:]:
if isinstance(block, QueryRoot):
raise AssertionError("Found QueryRoot after the first block: {}".format(ir_blocks))
def _self_consistency_check_construct_result_block(ir_blocks: List[BasicBlock]) -> None:
"""Assert that ConstructResult is always the last block, and only the last block."""
if not isinstance(ir_blocks[-1], ConstructResult):
raise AssertionError("The last block was not ConstructResult: {}".format(ir_blocks))
for block in ir_blocks[:-1]:
if isinstance(block, ConstructResult):
raise AssertionError(
"Found ConstructResult before the last block: {}".format(ir_blocks)
)
def _assert_output_source_follower_blocks(ir_blocks: List[BasicBlock]) -> None:
"""Ensure there are no Traverse / Backtrack / Recurse blocks after an OutputSource block."""
seen_output_source = False
for block in ir_blocks:
if isinstance(block, OutputSource):
seen_output_source = True
elif seen_output_source:
if isinstance(block, (Backtrack, Traverse, Recurse)):
raise AssertionError(
"Found Backtrack / Traverse / Recurse "
"after OutputSource block: "
"{}".format(ir_blocks)
)
def _assert_block_pairwise_constraints(ir_blocks: List[BasicBlock]) -> None:
"""Assert that adjacent blocks obey all invariants."""
for first_block, second_block in pairwise(ir_blocks):
# Always Filter before MarkLocation, never after.
if isinstance(first_block, MarkLocation) and isinstance(second_block, Filter):
raise AssertionError("Found Filter after MarkLocation block: {}".format(ir_blocks))
# There's no point in marking the same location twice in a row.
if isinstance(first_block, MarkLocation) and isinstance(second_block, MarkLocation):
raise AssertionError("Found consecutive MarkLocation blocks: {}".format(ir_blocks))
# Traverse blocks with optional=True are immediately followed
# by a MarkLocation, CoerceType or Filter block.
if isinstance(first_block, Traverse) and first_block.optional:
if not isinstance(second_block, (MarkLocation, CoerceType, Filter)):
raise AssertionError(
"Expected MarkLocation, CoerceType or Filter after Traverse "
"with optional=True. Found: {}".format(ir_blocks)
)
# Backtrack blocks with optional=True are immediately followed by a MarkLocation block.
if isinstance(first_block, Backtrack) and first_block.optional:
if not isinstance(second_block, MarkLocation):
raise AssertionError(
"Expected MarkLocation after Backtrack with optional=True, "
"but none was found: {}".format(ir_blocks)
)
# Recurse blocks are immediately preceded by a MarkLocation or Backtrack block.
if isinstance(second_block, Recurse):
if not (isinstance(first_block, MarkLocation) or isinstance(first_block, Backtrack)):
raise AssertionError(
"Expected MarkLocation or Backtrack before Recurse, but none "
"was found: {}".format(ir_blocks)
)
def _assert_mark_location_preceding_optional_traverse(
ir_blocks: List[BasicBlock],
) -> None:
"""Assert that optional Traverse blocks are preceded by a MarkLocation."""
# Once all fold blocks are removed, each optional Traverse must have
# a MarkLocation block immediately before it.
_, new_ir_blocks = extract_folds_from_ir_blocks(ir_blocks)
for first_block, second_block in pairwise(new_ir_blocks):
# Traverse blocks with optional=True are immediately preceded by a MarkLocation block.
if isinstance(second_block, Traverse) and second_block.optional:
if not isinstance(first_block, MarkLocation):
raise AssertionError(
"Expected MarkLocation before Traverse with optional=True, "
"but none was found: {}".format(ir_blocks)
)
def _assert_every_location_is_marked(ir_blocks: List[BasicBlock]) -> None:
"""Ensure that every new location is marked with a MarkLocation block."""
# Exactly one MarkLocation block is found between any block that starts an interval of blocks
# that all affect the same query position, and the first subsequent block that affects a
# different position in the query. Such intervals include the following examples:
# - from Fold to Unfold
# - from QueryRoot to Traverse/Recurse
# - from one Traverse to the next Traverse
# - from Traverse to Backtrack
found_start_block = False
mark_location_blocks_count = 0
start_interval_types = (QueryRoot, Traverse, Recurse, Fold)
end_interval_types = (Backtrack, ConstructResult, Recurse, Traverse, Unfold)
for block in ir_blocks:
# Terminate started intervals before opening new ones.
if isinstance(block, end_interval_types) and found_start_block:
found_start_block = False
if mark_location_blocks_count != 1:
raise AssertionError(
"Expected 1 MarkLocation block between traversals, found: "
"{} {}".format(mark_location_blocks_count, ir_blocks)
)
# Now consider opening new intervals or processing MarkLocation blocks.
if isinstance(block, MarkLocation):
mark_location_blocks_count += 1
elif isinstance(block, start_interval_types):
found_start_block = True
mark_location_blocks_count = 0
def _assert_coerce_type_outside_of_fold(ir_blocks: List[BasicBlock]) -> None:
"""Ensure that CoerceType not in a @fold are followed by a MarkLocation or Filter block."""
is_in_fold = False
for first_block, second_block in pairwise(ir_blocks):
if isinstance(first_block, Fold):
is_in_fold = True
if not is_in_fold and isinstance(first_block, CoerceType):
if not isinstance(second_block, (MarkLocation, Filter)):
raise AssertionError(
"Expected MarkLocation or Filter after CoerceType, "
"but none was found: {}".format(ir_blocks)
)
if isinstance(second_block, Unfold):
is_in_fold = False
|
|
# -*- coding: utf-8 -*-
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.serialization import jsonutils
import yaml
from nailgun import consts
from nailgun import objects
from nailgun.orchestrator.deployment_graph import DeploymentGraph
from nailgun.test.base import BaseIntegrationTest
from nailgun.utils import reverse
class BaseGraphTasksTests(BaseIntegrationTest):
def setUp(self):
super(BaseGraphTasksTests, self).setUp()
self.env.create()
self.cluster = self.env.clusters[0]
def get_correct_tasks(self):
yaml_tasks = """
- id: primary-controller
type: group
role: [primary-controller]
required_for: [deploy]
parameters:
strategy:
type: one_by_one
- id: controller
type: group
role: [primary-controller]
requires: [primary-controller]
required_for: [deploy]
parameters:
strategy:
type: parallel
amount: 2
"""
return yaml.load(yaml_tasks)
def get_corrupted_tasks(self):
yaml_tasks = """
- id: primary-controller
required_for: [deploy]
parameters:
strategy:
type: one_by_one
"""
return yaml.load(yaml_tasks)
def get_tasks_with_cycles(self):
yaml_tasks = """
- id: primary-controller
type: role
requires: [controller]
- id: controller
type: role
requires: [primary-controller]
"""
return yaml.load(yaml_tasks)
class TestReleaseGraphHandler(BaseGraphTasksTests):
def test_get_deployment_tasks(self):
resp = self.app.get(
reverse('ReleaseDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.release_id}),
headers=self.default_headers,
)
release_tasks = objects.Release.get_deployment_tasks(
self.cluster.release)
self.assertEqual(resp.json, release_tasks)
def test_upload_deployment_tasks(self):
tasks = self.get_correct_tasks()
resp = self.app.put(
reverse('ReleaseDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.release_id}),
params=jsonutils.dumps(tasks),
headers=self.default_headers,
)
release_tasks = objects.Release.get_deployment_tasks(
self.cluster.release)
self.assertEqual(release_tasks, resp.json)
def test_upload_tasks_without_type(self):
tasks = self.get_corrupted_tasks()
resp = self.app.put(
reverse('ReleaseDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.release_id}),
params=jsonutils.dumps(tasks),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(resp.status_code, 400)
def test_upload_tasks_with_cycles(self):
tasks = self.get_tasks_with_cycles()
resp = self.app.put(
reverse('ReleaseDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.release_id}),
params=jsonutils.dumps(tasks),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(resp.status_code, 400)
def test_post_tasks(self):
resp = self.app.post(
reverse('ReleaseDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.release_id}),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(resp.status_code, 405)
def test_delete_tasks(self):
resp = self.app.delete(
reverse('ReleaseDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.release_id}),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(resp.status_code, 405)
class TestClusterGraphHandler(BaseGraphTasksTests):
def test_get_deployment_tasks(self):
resp = self.app.get(
reverse('ClusterDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.id}),
headers=self.default_headers,
)
cluster_tasks = objects.Cluster.get_deployment_tasks(self.cluster)
self.assertEqual(resp.json, cluster_tasks)
def test_deployment_tasks_equals_to_release(self):
resp = self.app.get(
reverse('ClusterDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.id}),
headers=self.default_headers,
)
release_tasks = objects.Release.get_deployment_tasks(
self.cluster.release)
self.assertEqual(resp.json, release_tasks)
def test_upload_deployment_tasks(self):
tasks = self.get_correct_tasks()
resp = self.app.put(
reverse('ClusterDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.id}),
params=jsonutils.dumps(tasks),
headers=self.default_headers,
)
cluster_tasks = objects.Cluster.get_deployment_tasks(self.cluster)
self.assertEqual(cluster_tasks, resp.json)
def test_upload_tasks_without_type(self):
tasks = self.get_corrupted_tasks()
resp = self.app.put(
reverse('ClusterDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.id}),
params=jsonutils.dumps(tasks),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(resp.status_code, 400)
def test_upload_tasks_with_cycles(self):
tasks = self.get_tasks_with_cycles()
resp = self.app.put(
reverse('ClusterDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.id}),
params=jsonutils.dumps(tasks),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(resp.status_code, 400)
def test_post_tasks(self):
resp = self.app.post(
reverse('ClusterDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.id}),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(resp.status_code, 405)
def test_delete_tasks(self):
resp = self.app.delete(
reverse('ClusterDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.id}),
headers=self.default_headers,
expect_errors=True
)
self.assertEqual(resp.status_code, 405)
class TestStartEndTaskPassedCorrectly(BaseGraphTasksTests):
def assert_passed_correctly(self, url, **kwargs):
with mock.patch.object(DeploymentGraph,
'find_subgraph') as mfind_subgraph:
resp = self.app.get(
url,
params=kwargs,
headers=self.default_headers,
)
self.assertEqual(resp.status_code, 200)
defaults = {'start': None, 'end': None}
defaults.update(kwargs)
mfind_subgraph.assert_called_with(**defaults)
def test_end_passed_correctly_for_cluster(self):
self.assert_passed_correctly(
reverse('ClusterDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.id}), end='task')
def test_end_passed_correctly_for_release(self):
self.assert_passed_correctly(
reverse('ReleaseDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.release.id}), end='task')
def test_start_passed_correctly_release(self):
self.assert_passed_correctly(
reverse('ReleaseDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.release.id}), start='task')
def test_start_passed_correctly_cluster(self):
self.assert_passed_correctly(
reverse('ClusterDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.id}), end='task')
def test_start_end_passed_correctly_cluster(self):
self.assert_passed_correctly(
reverse('ClusterDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.id}),
end='task', start='another_task')
def test_start_end_passed_correctly_release(self):
self.assert_passed_correctly(
reverse('ReleaseDeploymentTasksHandler',
kwargs={'obj_id': self.cluster.release.id}),
end='task', start='another_task')
@mock.patch.object(objects.Cluster, 'get_deployment_tasks')
class TestTaskDeployGraph(BaseGraphTasksTests):
content_type = 'text/vnd.graphviz'
def setUp(self):
super(TestTaskDeployGraph, self).setUp()
self.env.create()
self.cluster = self.env.clusters[0]
self.tasks = [
{'id': 'pre_deployment', 'type': 'stage'},
{'id': 'deploy', 'type': 'stage'},
{'id': 'post_deployment', 'type': 'stage'},
{'id': 'pre-A', 'required_for': ['pre_deployment'],
'type': 'puppet'},
{'id': 'pre-B', 'required_for': ['pre_deployment'],
'type': 'puppet', 'requires': ['pre-A']},
{'id': 'pre-C', 'required_for': ['pre_deployment'],
'type': 'puppet', 'requires': ['pre-A', 'pre-D']},
{'id': 'pre-D', 'required_for': ['pre_deployment'],
'type': 'puppet'},
]
def test_get_all_tasks(self, m_get_tasks):
m_get_tasks.return_value = self.tasks
resp = self.app.get(
reverse('TaskDeployGraph', kwargs={'cluster_id': self.cluster.id})
)
self.assertEqual(resp.content_type, self.content_type)
self.assertIn('"pre-A" -> pre_deployment', resp.body)
self.assertIn('"pre-A" -> "pre-B"', resp.body)
self.assertIn('"pre-A" -> "pre-C"', resp.body)
def test_use_certain_tasks(self, m_get_tasks):
m_get_tasks.return_value = self.tasks
resp = self.app.get(
reverse('TaskDeployGraph', kwargs={
'cluster_id': self.cluster.id,
}) + '?tasks=pre-A,pre-C',
)
self.assertEqual(resp.content_type, self.content_type)
self.assertIn('"pre-A" -> "pre-B"', resp.body)
self.assertIn('"pre-A" -> "pre-C"', resp.body)
def test_error_raised_on_non_existent_tasks(self, m_get_tasks):
m_get_tasks.return_value = self.tasks
resp = self.app.get(
reverse('TaskDeployGraph', kwargs={
'cluster_id': self.cluster.id,
}) + '?tasks=nonexistent',
expect_errors=True,
)
self.assertEqual(resp.status_code, 400)
self.assertIn('Tasks nonexistent are not present in deployment graph',
resp.body)
def test_use_single_task(self, m_get_tasks):
m_get_tasks.return_value = self.tasks
resp = self.app.get(
reverse('TaskDeployGraph', kwargs={
'cluster_id': self.cluster.id,
}) + '?parents_for=pre-B',
)
self.assertEqual(resp.content_type, self.content_type)
self.assertIn('"pre-A" -> "pre-B"', resp.body)
self.assertNotIn('pre_deployment', resp.body)
self.assertNotIn('pre-C', resp.body)
def test_error_raised_on_non_existent_signle_task(self, m_get_tasks):
m_get_tasks.return_value = self.tasks
resp = self.app.get(
reverse('TaskDeployGraph', kwargs={
'cluster_id': self.cluster.id,
}) + '?parents_for=nonexistent',
expect_errors=True,
)
self.assertEqual(resp.status_code, 400)
self.assertIn('Task nonexistent is not present in graph', resp.body)
def test_single_task_from_tasks_subset(self, m_get_tasks):
"""If only pre-B and pre-A tasks will be executed,
what requirements pre-C will have?
"""
m_get_tasks.return_value = self.tasks
resp = self.app.get(
reverse('TaskDeployGraph', kwargs={
'cluster_id': self.cluster.id,
}) + '?tasks=pre-B,pre-A&parents_for=pre-C',
)
self.assertEqual(resp.content_type, self.content_type)
self.assertIn('"pre-A" -> "pre-C"', resp.body)
self.assertIn('"pre-D" -> "pre-C"', resp.body)
self.assertNotIn('pre_deployment', resp.body)
self.assertNotIn('pre-B', resp.body)
def test_remove_tasks_by_type(self, m_get_tasks):
tasks = []
for task_type in consts.INTERNAL_TASKS:
tasks.append({
'id': 'task-{0}'.format(task_type),
'type': task_type,
})
m_get_tasks.return_value = tasks
resp = self.app.get(
reverse('TaskDeployGraph', kwargs={
'cluster_id': self.cluster.id,
}) + '?remove={0}'.format(
','.join(consts.INTERNAL_TASKS)),
)
for task in tasks:
self.assertNotIn(task['id'], resp.body)
def test_remove_non_existent_type(self, m_get_tasks):
m_get_tasks.return_value = self.tasks
resp = self.app.get(
reverse('TaskDeployGraph', kwargs={
'cluster_id': self.cluster.id,
}) + '?remove=nonexistent',
expect_errors=True,
)
self.assertEqual(resp.status_code, 400)
self.assertIn('Task types nonexistent do not exist', resp.body)
|
|
from zerver.lib.test_classes import WebhookTestCase
TOPIC = "sandbox"
TOPIC_BRANCH_EVENTS = "sandbox / {branch}"
class Bitbucket3HookTests(WebhookTestCase):
STREAM_NAME = "bitbucket3"
URL_TEMPLATE = "/api/v1/external/bitbucket3?stream={stream}&api_key={api_key}"
FIXTURE_DIR_NAME = "bitbucket3"
# Diagnostics events:
def test_ping(self) -> None:
expected_message = "Congratulations! The Bitbucket Server webhook was configured successfully!"
self.check_webhook("diagnostics_ping", "Bitbucket Server Ping", expected_message)
def test_ping_with_user_defined_topic(self) -> None:
self.url = self.build_webhook_url(topic="my topic")
expected_message = "Congratulations! The Bitbucket Server webhook was configured successfully!"
self.check_webhook("diagnostics_ping", "my topic", expected_message)
# Core repo events:
def test_commit_comment_added(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) commented on [508d1b6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/commits/508d1b67f1f8f3a25f543a030a7a178894aa9907):\n~~~ quote\nJust an arbitrary comment on a commit.\n~~~"""
self.check_webhook("commit_comment_added", TOPIC, expected_message)
def test_commit_comment_edited(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) edited their comment on [508d1b6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/commits/508d1b67f1f8f3a25f543a030a7a178894aa9907):\n~~~ quote\nJust an arbitrary comment on a commit. Nothing to see here...\n~~~"""
self.check_webhook("commit_comment_edited", TOPIC, expected_message)
def test_commit_comment_deleted(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) deleted their comment on [508d1b6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/commits/508d1b67f1f8f3a25f543a030a7a178894aa9907):\n~~~ quote\n~~Just an arbitrary comment on a commit. Nothing to see here...~~\n~~~"""
self.check_webhook("commit_comment_deleted", TOPIC, expected_message)
def test_bitbucket3_repo_forked(self) -> None:
expected_message = """User Hemanth V. Alluri(login: [hypro999](http://139.59.64.214:7990/users/hypro999)) forked the repository into [sandbox fork](http://139.59.64.214:7990/users/hypro999/repos/sandbox-fork/browse)."""
self.check_webhook("repo_forked", TOPIC, expected_message)
def test_bitbucket3_repo_modified(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) changed the name of the **sandbox** repo from **sandbox** to **sandbox v2**."""
expected_topic = "sandbox v2"
self.check_webhook("repo_modified", expected_topic, expected_message)
# Repo push events:
def test_push_add_branch(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) created branch2 branch."""
expected_topic = TOPIC_BRANCH_EVENTS.format(branch="branch2")
self.check_webhook("repo_push_add_branch", expected_topic, expected_message)
def test_push_add_tag(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) pushed tag newtag."""
self.check_webhook("repo_push_add_tag", TOPIC, expected_message)
def test_push_delete_branch(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) deleted branch branch2."""
expected_topic = TOPIC_BRANCH_EVENTS.format(branch="branch2")
self.check_webhook("repo_push_delete_branch", expected_topic, expected_message)
def test_push_delete_tag(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) removed tag test-tag."""
self.check_webhook("repo_push_delete_tag", TOPIC, expected_message)
def test_push_update_single_branch(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) pushed to branch master. Head is now e68c981ef53dbab0a5ca320a2d8d80e216c70528."""
expected_topic = TOPIC_BRANCH_EVENTS.format(branch="master")
self.check_webhook("repo_push_update_single_branch", expected_topic, expected_message)
def test_push_update_multiple_branches(self) -> None:
branch1_content = """[hypro999](http://139.59.64.214:7990/users/hypro999) pushed to branch branch1. Head is now 3980c2be32a7e23c795741d5dc1a2eecb9b85d6d."""
master_content = """[hypro999](http://139.59.64.214:7990/users/hypro999) pushed to branch master. Head is now fc43d13cff1abb28631196944ba4fc4ad06a2cf2."""
self.subscribe(self.test_user, self.STREAM_NAME)
payload = self.get_body("repo_push_update_multiple_branches")
msg = self.send_webhook_payload(
self.test_user,
self.url,
payload,
content_type="application/json",
)
msg = self.get_second_to_last_message()
self.assert_stream_message(
message=msg,
stream_name=self.STREAM_NAME,
topic_name=TOPIC_BRANCH_EVENTS.format(branch="branch1"),
content=branch1_content,
)
msg = self.get_last_message()
self.assert_stream_message(
message=msg,
stream_name=self.STREAM_NAME,
topic_name=TOPIC_BRANCH_EVENTS.format(branch="master"),
content=master_content,
)
def test_push_update_multiple_branches_with_branch_filter(self) -> None:
self.url = self.build_webhook_url(branches='master')
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) pushed to branch master. Head is now fc43d13cff1abb28631196944ba4fc4ad06a2cf2."""
expected_topic = TOPIC_BRANCH_EVENTS.format(branch="master")
self.check_webhook("repo_push_update_multiple_branches", expected_topic, expected_message)
self.url = self.build_webhook_url(branches='branch1')
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) pushed to branch branch1. Head is now 3980c2be32a7e23c795741d5dc1a2eecb9b85d6d."""
expected_topic = TOPIC_BRANCH_EVENTS.format(branch="branch1")
self.check_webhook("repo_push_update_multiple_branches", expected_topic, expected_message)
# Core PR events:
def test_pr_opened_without_reviewers(self) -> None:
expected_topic = "sandbox / PR #1 Branch1"
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) opened [PR #1](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/1) from `branch1` to `master`:\n\n~~~ quote\n* Add file2.txt\r\n* Add file3.txt\n~~~"""
self.check_webhook(
"pull_request_opened_without_reviewers", expected_topic, expected_message
)
def test_pr_opened_without_description(self) -> None:
expected_topic = "sandbox / PR #2 Add notes feature."
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) opened [PR #2](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/2) from `master` to `master`."""
self.check_webhook(
"pull_request_opened_without_description", expected_topic, expected_message
)
def test_pr_opened_with_two_reviewers(self) -> None:
expected_topic = "sandbox / PR #5 Add Notes Feature"
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) opened [PR #5](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/5) from `master` to `master` (assigned to [shimura](http://139.59.64.214:7990/users/shimura) and [sougo](http://139.59.64.214:7990/users/sougo) for review)."""
self.check_webhook(
"pull_request_opened_with_two_reviewers", expected_topic, expected_message
)
def test_pr_opened_with_two_reviewers_and_user_defined_topic(self) -> None:
expected_topic = "sandbox / PR #5 Add Notes Feature"
expected_topic = "custom_topic"
self.url = self.build_webhook_url(topic='custom_topic')
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) opened [PR #5 Add Notes Feature](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/5) from `master` to `master` (assigned to [shimura](http://139.59.64.214:7990/users/shimura) and [sougo](http://139.59.64.214:7990/users/sougo) for review)."""
self.check_webhook(
"pull_request_opened_with_two_reviewers", expected_topic, expected_message
)
def test_pr_opened_with_mulitple_reviewers(self) -> None:
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) opened [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6) from `master` to `master` (assigned to [sougo](http://139.59.64.214:7990/users/sougo), [zura](http://139.59.64.214:7990/users/zura) and [shimura](http://139.59.64.214:7990/users/shimura) for review):\n\n~~~ quote\nAdd a simple text file for further testing purposes.\n~~~"""
self.check_webhook(
"pull_request_opened_with_multiple_reviewers", expected_topic, expected_message
)
def test_pr_modified(self) -> None:
expected_topic = "sandbox / PR #1 Branch1"
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) modified [PR #1](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/1) from `branch1` to `master` (assigned to [shimura](http://139.59.64.214:7990/users/shimura) for review):\n\n~~~ quote\n* Add file2.txt\n* Add file3.txt\nBoth of these files would be important additions to the project!\n~~~"""
self.check_webhook("pull_request_modified", expected_topic, expected_message)
def test_pr_modified_with_include_title(self) -> None:
expected_topic = "custom_topic"
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) modified [PR #1 Branch1](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/1) from `branch1` to `master` (assigned to [shimura](http://139.59.64.214:7990/users/shimura) for review):\n\n~~~ quote\n* Add file2.txt\n* Add file3.txt\nBoth of these files would be important additions to the project!\n~~~"""
self.url = self.build_webhook_url(topic='custom_topic')
self.check_webhook("pull_request_modified", expected_topic, expected_message)
def test_pr_deleted(self) -> None:
expected_topic = "sandbox / PR #2 Add notes feature."
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) deleted [PR #2](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/2)."""
self.check_webhook("pull_request_deleted", expected_topic, expected_message)
def test_pr_deleted_with_include_title(self) -> None:
expected_topic = "custom_topic"
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) deleted [PR #2 Add notes feature.](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/2)"""
self.url = self.build_webhook_url(topic='custom_topic')
self.check_webhook("pull_request_deleted", expected_topic, expected_message)
def test_pr_declined(self) -> None:
expected_topic = "sandbox / PR #7 Crazy Idea"
expected_message = """[zura](http://139.59.64.214:7990/users/zura) declined [PR #7](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/7)."""
self.check_webhook("pull_request_declined", expected_topic, expected_message)
def test_pr_merged(self) -> None:
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
expected_message = """[zura](http://139.59.64.214:7990/users/zura) merged [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6)."""
self.check_webhook("pull_request_merged", expected_topic, expected_message)
# PR reviewer events:
def test_pr_approved(self) -> None:
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
expected_message = """[zura](http://139.59.64.214:7990/users/zura) approved [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6)."""
self.check_webhook("pull_request_approved", expected_topic, expected_message)
def test_pr_unapproved(self) -> None:
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
expected_message = """[zura](http://139.59.64.214:7990/users/zura) unapproved [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6)."""
self.check_webhook("pull_request_unapproved", expected_topic, expected_message)
def test_pr_marked_as_needs_review(self) -> None:
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
expected_message = """[zura](http://139.59.64.214:7990/users/zura) marked [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6) as \"needs work\"."""
self.check_webhook("pull_request_needs_work", expected_topic, expected_message)
def test_pr_marked_as_needs_review_and_include_title(self) -> None:
expected_topic = "custom_topic"
expected_message = """[zura](http://139.59.64.214:7990/users/zura) marked [PR #6 sample_file: Add sample_file.txt.](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6) as \"needs work\"."""
self.url = self.build_webhook_url(topic='custom_topic')
self.check_webhook("pull_request_needs_work", expected_topic, expected_message)
def test_pull_request_reviewer_added(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) reassigned [PR #1](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/1) to [shimura](http://139.59.64.214:7990/users/shimura)."""
expected_topic = "sandbox / PR #1 Branch1"
self.check_webhook("pull_request_add_reviewer", expected_topic, expected_message)
def test_pull_request_reviewer_added_and_include_title(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) reassigned [PR #1 Branch1](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/1) to [shimura](http://139.59.64.214:7990/users/shimura)."""
expected_topic = "custom_topic"
self.url = self.build_webhook_url(topic='custom_topic')
self.check_webhook("pull_request_add_reviewer", expected_topic, expected_message)
def test_pull_request_reviewers_added(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) reassigned [PR #1](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/1) to [shimura](http://139.59.64.214:7990/users/shimura) and [sougo](http://139.59.64.214:7990/users/sougo)."""
expected_topic = "sandbox / PR #1 Branch1"
self.check_webhook("pull_request_add_two_reviewers", expected_topic, expected_message)
def test_pull_request_remove_all_reviewers(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) removed all reviewers from [PR #1](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/1)."""
expected_topic = "sandbox / PR #1 Branch1"
self.check_webhook("pull_request_remove_reviewer", expected_topic, expected_message)
def test_pull_request_remove_all_reviewers_with_title(self) -> None:
expected_message = """[hypro999](http://139.59.64.214:7990/users/hypro999) removed all reviewers from [PR #1 Branch1](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/1)."""
expected_topic = "sandbox / PR #1 Branch1"
expected_topic = "custom_topic"
self.url = self.build_webhook_url(topic='custom_topic')
self.check_webhook("pull_request_remove_reviewer", expected_topic, expected_message)
# PR comment events:
def test_pull_request_comment_added(self) -> None:
expected_message = """[zura](http://139.59.64.214:7990/users/zura) commented on [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6):\n\n~~~ quote\nThis seems like a pretty good idea.\n~~~"""
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
self.check_webhook("pull_request_comment_added", expected_topic, expected_message)
def test_pull_request_comment_edited(self) -> None:
expected_message = """[zura](http://139.59.64.214:7990/users/zura) edited their comment on [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6):\n\n~~~ quote\nThis seems like a pretty good idea. @shimura what do you think?\n~~~"""
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
self.check_webhook("pull_request_comment_edited", expected_topic, expected_message)
def test_pull_request_comment_deleted(self) -> None:
expected_message = """[zura](http://139.59.64.214:7990/users/zura) deleted their comment on [PR #6](http://139.59.64.214:7990/projects/SBOX/repos/sandbox/pull-requests/6):\n\n~~~ quote\n~~This seems like a pretty good idea. @shimura what do you think?~~\n~~~"""
expected_topic = "sandbox / PR #6 sample_file: Add sample_file.txt."
self.check_webhook("pull_request_comment_deleted", expected_topic, expected_message)
|
|
# file: layers.py
# brief: A number of objects to wrap caffe layers for conversion
# author: Andrea Vedaldi
from collections import OrderedDict
from math import floor, ceil
from operator import mul
import numpy as np
from numpy import array
import scipy
import scipy.io
import scipy.misc
import copy
import collections
# Recent Caffes just pass a string as a type; this is used for legacy support
layers_type = {}
layers_type[0] = 'none'
layers_type[1] = 'accuracy'
layers_type[2] = 'bnll'
layers_type[3] = 'concat'
layers_type[4] = 'conv'
layers_type[5] = 'data'
layers_type[6] = 'dropout'
layers_type[7] = 'euclidean_loss'
layers_type[8] = 'flatten'
layers_type[9] = 'hdf5_data'
layers_type[10] = 'hdf5_output'
layers_type[28] = 'hinge_loss'
layers_type[11] = 'im2col'
layers_type[12] = 'image_data'
layers_type[13] = 'infogain_loss'
layers_type[14] = 'inner_product'
layers_type[15] = 'lrn'
layers_type[25] = 'eltwise'
layers_type[29] = 'memory_data'
layers_type[16] = 'multinomial_logistic_loss'
layers_type[17] = 'pool'
layers_type[26] = 'power'
layers_type[18] = 'relu'
layers_type[19] = 'sigmoid'
layers_type[27] = 'sigmoid_cross_entropy_loss'
layers_type[20] = 'softmax'
layers_type[21] = 'softmax_loss'
layers_type[22] = 'split'
layers_type[23] = 'tanh'
layers_type[24] = 'window_data'
layers_type[39] = 'deconvolution'
layers_type[40] = 'crop'
def getFilterOutputSize(size, kernelSize, stride, pad):
return [floor((size[0] + pad[0]+pad[1] - kernelSize[0]) / stride[0]) + 1., \
floor((size[1] + pad[2]+pad[3] - kernelSize[1]) / stride[1]) + 1.]
def getFilterTransform(ks, stride, pad):
y1 = 1. - pad[0] ;
y2 = 1. - pad[0] + ks[0] - 1 ;
x1 = 1. - pad[2] ;
x2 = 1. - pad[2] + ks[1] - 1 ;
h = y2 - y1 + 1. ;
w = x2 - x1 + 1. ;
return CaffeTransform([h, w], stride, [(y1+y2)/2, (x1+x2)/2])
def reorder(aList, order):
return [aList[i] for i in order]
def row(x):
return np.array(x,dtype=float).reshape(1,-1)
def rowarray(x):
return x.reshape(1,-1)
def rowcell(x):
return np.array(x,dtype=object).reshape(1,-1)
def dictToMatlabStruct(d):
if not d:
return np.zeros((0,))
dt = []
for x in d.keys():
pair = (x,object)
if isinstance(d[x], np.ndarray): pair = (x,type(d[x]))
dt.append(pair)
y = np.empty((1,),dtype=dt)
for x in d.keys():
y[x][0] = d[x]
return y
# --------------------------------------------------------------------
# MatConvNet in NumPy
# --------------------------------------------------------------------
mlayerdt = [('name',object),
('type',object),
('inputs',object),
('outputs',object),
('params',object),
('block',object)]
mparamdt = [('name',object),
('value',object)]
minputdt = [('name',object),
('size',object)]
# --------------------------------------------------------------------
# Vars and params
# --------------------------------------------------------------------
class CaffeBlob(object):
def __init__(self, name):
self.name = name
self.shape = None
self.value = np.zeros(shape=(0,0), dtype='float32')
self.bgrInput = False
self.transposable = True # first two dimensions are spatial
def transpose(self):
if self.shape: self.shape = [self.shape[k] for k in [1,0,2,3]]
def toMatlab(self):
mparam = np.empty(shape=[1,], dtype=mparamdt)
mparam['name'][0] = self.name
mparam['value'][0] = self.value
return mparam
def toMatlabSimpleNN(self):
return self.value
def hasValue(self):
return reduce(mul, self.value.shape, 1) > 0
class CaffeTransform(object):
def __init__(self, size, stride, offset):
self.shape = size
self.stride = stride
self.offset = offset
def __str__(self):
return "<%s %s %s>" % (self.shape, self.stride, self.offset)
def composeTransforms(a, b):
size = [0.,0.]
stride = [0.,0.]
offset = [0.,0.]
for i in [0,1]:
size[i] = a.stride[i] * (b.shape[i] - 1) + a.shape[i]
stride[i] = a.stride[i] * b.stride[i]
offset[i] = a.stride[i] * (b.offset[i] - 1) + a.offset[i]
c = CaffeTransform(size, stride, offset)
return c
def transposeTransform(a):
size = [0.,0.]
stride = [0.,0.]
offset = [0.,0.]
for i in [0,1]:
size[i] = (a.shape[i] + a.stride[i] - 1.0) / a.stride[i]
stride[i] = 1.0/a.stride[i]
offset[i] = (1.0 + a.stride[i] - a.offset[i]) / a.stride[i]
c = CaffeTransform(size, stride, offset)
return c
# --------------------------------------------------------------------
# Errors
# --------------------------------------------------------------------
class ConversionError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
# --------------------------------------------------------------------
# Basic Layers
# --------------------------------------------------------------------
class CaffeLayer(object):
def __init__(self, name, inputs, outputs):
self.name = name
self.inputs = inputs
self.outputs = outputs
self.params = []
self.model = None
def reshape(self, model):
pass
def display(self):
print "Layer \'{}\'".format(self.name)
print " +- type: %s" % (self.__class__.__name__)
print " +- inputs: %s" % (self.inputs,)
print " +- outputs: %s" % (self.outputs,)
print " +- params: %s" % (self.params,)
def getTransforms(self, model):
transforms = []
for i in enumerate(self.inputs):
row = []
for j in enumerate(self.outputs):
row.append(CaffeTransform([1.,1.], [1.,1.], [1.,1.]))
transforms.append(row)
return transforms
def transpose(self, model):
pass
def setBlob(self, model, i, blob):
assert(False)
def toMatlab(self):
mlayer = np.empty(shape=[1,],dtype=mlayerdt)
mlayer['name'][0] = self.name
mlayer['type'][0] = None
mlayer['inputs'][0] = rowcell(self.inputs)
mlayer['outputs'][0] = rowcell(self.outputs)
mlayer['params'][0] = rowcell(self.params)
mlayer['block'][0] = dictToMatlabStruct({})
return mlayer
def toMatlabSimpleNN(self):
mparam = collections.OrderedDict() ;
mparam['name'] = self.name
mparam['type'] = None
return mparam
class CaffeElementWise(CaffeLayer):
def reshape(self, model):
for i in range(len(self.inputs)):
model.vars[self.outputs[i]].shape = \
model.vars[self.inputs[i]].shape
class CaffeReLU(CaffeElementWise):
def __init__(self, name, inputs, outputs):
super(CaffeReLU, self).__init__(name, inputs, outputs)
def toMatlab(self):
mlayer = super(CaffeReLU, self).toMatlab()
mlayer['type'][0] = u'dagnn.ReLU'
mlayer['block'][0] = dictToMatlabStruct(
{'leak': float(0.0) })
# todo: leak factor
return mlayer
def toMatlabSimpleNN(self):
mlayer = super(CaffeReLU, self).toMatlabSimpleNN()
mlayer['type'] = u'relu'
mlayer['leak'] = float(0.0)
return mlayer
class CaffeLRN(CaffeElementWise):
def __init__(self, name, inputs, outputs,
local_size,
alpha,
beta,
norm_region,
kappa):
super(CaffeLRN, self).__init__(name, inputs, outputs)
self.local_size = local_size
self.alpha = alpha
self.beta = beta
self.norm_region = norm_region
self.kappa = kappa
assert(norm_region == 'across_channels')
def toMatlab(self):
mlayer = super(CaffeLRN, self).toMatlab()
mlayer['type'][0] = u'dagnn.LRN'
mlayer['block'][0] = dictToMatlabStruct(
{'param': row([self.local_size,
self.kappa,
self.alpha / self.local_size,
self.beta])})
return mlayer
def toMatlabSimpleNN(self):
mlayer = super(CaffeLRN, self).toMatlabSimpleNN()
mlayer['type'] = u'lrn'
mlayer['param'] = row([self.local_size,
self.kappa,
self.alpha / self.local_size,
self.beta])
return mlayer
class CaffeSoftMax(CaffeElementWise):
def __init__(self, name, inputs, outputs):
super(CaffeSoftMax, self).__init__(name, inputs, outputs)
def toMatlab(self):
mlayer = super(CaffeSoftMax, self).toMatlab()
mlayer['type'][0] = u'dagnn.SoftMax'
return mlayer
def toMatlabSimpleNN(self):
mlayer = super(CaffeSoftMax, self).toMatlabSimpleNN()
mlayer['type'] = u'softmax'
return mlayer
class CaffeSoftMaxLoss(CaffeElementWise):
def __init__(self, name, inputs, outputs):
super(CaffeSoftMaxLoss, self).__init__(name, inputs, outputs)
def toMatlab(self):
mlayer = super(CaffeSoftMaxLoss, self).toMatlab()
mlayer['type'][0] = u'dagnn.SoftMaxLoss'
return mlayer
def toMatlabSimpleNN(self):
mlayer = super(CaffeSoftMaxLoss, self).toMatlabSimpleNN()
mlayer['type'] = u'softmax'
return mlayer
class CaffeDropout(CaffeElementWise):
def __init__(self, name, inputs, outputs, ratio):
super(CaffeDropout, self).__init__(name, inputs, outputs)
self.ratio = ratio
def toMatlab(self):
mlayer = super(CaffeDropout, self).toMatlab()
mlayer['type'][0] = u'dagnn.DropOut'
mlayer['block'][0] = dictToMatlabStruct({'rate': float(self.ratio)})
return mlayer
def toMatlabSimpleNN(self):
mlayer = super(CaffeDropout, self).toMatlabSimpleNN()
mlayer['type'] = u'dropout'
mlayer['rate'] = float(self.ratio)
return mlayer
def display(self):
super(CaffeDropout, self).display()
print " c- ratio (dropout rate):", self.ratio
class CaffeData(CaffeLayer):
def __init__(self, name, inputs, outputs):
super(CaffeData, self).__init__(name, inputs, outputs)
def reshape(self, model):
# todo: complete otehr cases
shape = [layer.transform_param.crop_size,
layer.transform_param.crop_size,
3,
layer.batch_size]
model.vars[self.outputs[0]].shape = shape
def toMatlab(self):
return None
def toMatlabSimpleNN(self):
return None
# --------------------------------------------------------------------
# Convolution
# --------------------------------------------------------------------
class CaffeConv(CaffeLayer):
def __init__(self, name, inputs, outputs,
num_output,
bias_term,
pad,
kernel_size,
stride,
dilation,
group):
super(CaffeConv, self).__init__(name, inputs, outputs)
if len(kernel_size) == 1 : kernel_size = kernel_size * 2
if len(stride) == 1 : stride = stride * 2
if len(pad) == 1 : pad = pad * 4
elif len(pad) == 2 : pad = [pad[0], pad[0], pad[1], pad[1]]
self.num_output = num_output
self.bias_term = bias_term
self.pad = pad
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.group = group
self.params = [name + '_filter']
if bias_term: self.params.append(name + '_bias')
self.filter_depth = None
def display(self):
super(CaffeConv, self).display()
print " +- filter dimension:", self.filter_depth
print " c- num_output (num filters): %s" % self.num_output
print " c- bias_term: %s" % self.bias_term
print " c- pad: %s" % (self.pad,)
print " c- kernel_size: %s" % self.kernel_size
print " c- stride: %s" % (self.stride,)
print " c- dilation: %s" % (self.dilation,)
print " c- group: %s" % (self.group,)
def reshape(self, model):
varin = model.vars[self.inputs[0]]
varout = model.vars[self.outputs[0]]
if not varin.shape: return
varout.shape = getFilterOutputSize(varin.shape[0:2],
self.kernel_size,
self.stride,
self.pad) \
+ [self.num_output, varin.shape[3]]
self.filter_depth = varin.shape[2] / self.group
def getTransforms(self, model):
return [[getFilterTransform(self.kernel_size, self.stride, self.pad)]]
def setBlob(self, model, i, blob):
assert(i < 2)
if i == 0:
assert(blob.shape[0] == self.kernel_size[0])
assert(blob.shape[1] == self.kernel_size[1])
assert(blob.shape[3] == self.num_output)
self.filter_depth = blob.shape[2]
elif i == 1:
assert(blob.shape[0] == self.num_output)
model.params[self.params[i]].value = blob
model.params[self.params[i]].shape = blob.shape
def transpose(self, model):
self.kernel_size = reorder(self.kernel_size, [1,0])
self.stride = reorder(self.stride, [1,0])
self.pad = reorder(self.pad, [2,3,0,1])
if model.params[self.params[0]].hasValue():
print "Layer %s: transposing filters" % self.name
param = model.params[self.params[0]]
param.value = param.value.transpose([1,0,2,3])
if model.vars[self.inputs[0]].bgrInput:
print "Layer %s: BGR to RGB conversion" % self.name
param.value = param.value[:,:,: : -1,:]
def toMatlab(self):
size = self.kernel_size + [self.filter_depth, self.num_output]
mlayer = super(CaffeConv, self).toMatlab()
mlayer['type'][0] = u'dagnn.Conv'
mlayer['block'][0] = dictToMatlabStruct(
{'hasBias': self.bias_term,
'size': row(size),
'pad': row(self.pad),
'stride': row(self.stride)})
return mlayer
def toMatlabSimpleNN(self):
size = self.kernel_size + [self.filter_depth, self.num_output]
mlayer = super(CaffeConv, self).toMatlabSimpleNN()
mlayer['type'] = u'conv'
mlayer['weights'] = np.empty([1,len(self.params)], dtype=np.object)
mlayer['size'] = row(size)
mlayer['pad'] = row(self.pad)
mlayer['stride'] = row(self.stride)
for p, name in enumerate(self.params):
mlayer['weights'][0,p] = self.model.params[name].toMatlabSimpleNN()
return mlayer
# --------------------------------------------------------------------
# InnerProduct
# --------------------------------------------------------------------
# special case: inner product
class CaffeInnerProduct(CaffeConv):
def __init__(self, name, inputs, outputs, num_output, bias_term, axis):
super(CaffeInnerProduct, self).__init__(name, inputs, outputs,
num_output = num_output,
bias_term = bias_term,
pad = [0, 0, 0, 0],
kernel_size = [1, 1],
stride = [1, 1],
dilation = [],
group = 1)
self.axis = axis
assert(axis == 1)
def setBlob(self, model, i, blob):
assert(i < 1 + self.bias_term)
if i == 0:
self.filter_depth = blob.shape[0]
assert(blob.shape[1] == self.num_output)
blob = blob.reshape([1, 1, self.filter_depth, self.num_output])
elif i == 1:
assert(blob.shape[0] == self.num_output)
model.params[self.params[i]].value = blob
model.params[self.params[i]].shape = blob.shape
def reshape(self, model):
if not model.vars[self.inputs[0]].shape: return
s = model.vars[self.inputs[0]].shape
self.kernel_size = [s[0], s[1], s[2], self.num_output]
print "Layer %s: inner product converted to filter bank of shape %s" \
% (self.name, self.kernel_size)
param = model.params[self.params[0]]
if param.hasValue():
print "Layer %s: reshaping inner product paramters of shape %s into a filter bank" % (self.name, param.value.shape)
param.value = param.value.reshape(self.kernel_size, order='F')
super(CaffeInnerProduct, self).reshape(model)
# --------------------------------------------------------------------
# Deconvolution
# --------------------------------------------------------------------
class CaffeDeconvolution(CaffeConv):
def __init__(self, name, inputs, outputs,
num_output,
bias_term,
pad,
kernel_size,
stride,
dilation,
group):
super(CaffeDeconvolution, self).__init__(name, inputs, outputs,
num_output = num_output,
bias_term = bias_term,
pad = pad,
kernel_size = kernel_size,
stride = stride,
dilation = dilation,
group = group)
def setBlob(self, model, i, blob):
assert(i < 2)
if i == 0:
assert(blob.shape[0] == self.kernel_size[0])
assert(blob.shape[1] == self.kernel_size[1])
assert(blob.shape[2] == self.num_output)
self.filter_depth = blob.shape[3]
elif i == 1:
assert(blob.shape[0] == self.num_output)
model.params[self.params[i]].value = blob
model.params[self.params[i]].shape = blob.shape
def reshape(self, model):
inshape = model.vars[self.inputs[0]].shape
if not inshape: return
model.vars[self.outputs[0]].shape = \
getFilterOutputSize(inshape[0:2],
self.kernel_size, self.stride, self.pad) + \
[self.num_output, inshape[3]]
self.filter_depth = inshape[2]
def getTransforms(self, model):
t = getFilterTransform(self.kernel_size, self.stride, self.pad)
t = transposeTransform(t)
return [[t]]
def transpose(self, model):
self.kernel_size = reorder(self.kernel_size, [1,0])
self.stride = reorder(self.stride, [1,0])
self.pad = reorder(self.pad, [2,3,0,1])
if model.params[self.params[0]].hasValue():
print "Layer %s transposing filters" % self.name
param = model.params[self.params[0]]
param.value = param.value.transpose([1,0,2,3])
if model.vars[self.inputs[0]].bgrInput:
print "Layer %s BGR to RGB conversion" % self.name
param.value = param.value[:,:,:,: : -1]
def toMatlab(self):
size = self.kernel_size + [self.num_output, self.filter_depth / self.group]
mlayer = super(CaffeDeconvolution, self).toMatlab()
mlayer['type'][0] = u'dagnn.ConvTranspose'
mlayer['block'][0] = dictToMatlabStruct(
{'hasBias': self.bias_term,
'size': row(size),
'upsample': row(self.stride),
'crop': row(self.pad)})
return mlayer
def toMatlabSimpleNN(self):
size = self.kernel_size + [self.num_output, self.filter_depth / self.group]
mlayer = super(CaffeDeconvolution, self).toMatlabSimpleNN()
mlayer['type'] = u'convt'
mlayer['weights'] = np.empty([1,len(self.params)], dtype=np.object)
mlayer['size'] = row(size)
mlayer['upsample'] = row(self.stride)
mlayer['crop'] = row(self.pad)
for p, name in enumerate(self.params):
mlayer['weights'][0,p] = self.model.params[name].toMatlabSimpleNN()
return mlayer
# --------------------------------------------------------------------
# Pooling
# --------------------------------------------------------------------
class CaffePooling(CaffeLayer):
def __init__(self, name, inputs, outputs,
method,
pad,
kernel_size,
stride):
super(CaffePooling, self).__init__(name, inputs, outputs)
if len(kernel_size) == 1 : kernel_size = kernel_size * 2
if len(stride) == 1 : stride = stride * 2
if len(pad) == 1 : pad = pad * 4
elif len(pad) == 2 : pad = [pad[0], pad[0], pad[1], pad[1]]
self.method = method
self.pad = pad
self.kernel_size = kernel_size
self.stride = stride
self.pad_corrected = None
def display(self):
super(CaffePooling, self).display()
print " +- pad_corrected: %s" % (self.pad_corrected,)
print " c- method: ", self.method
print " c- pad: %s" % (self.pad,)
print " c- kernel_size: %s" % (self.kernel_size,)
print " c- stride: %s" % (self.stride,)
def reshape(self, model):
shape = model.vars[self.inputs[0]].shape
if not shape: return
# MatConvNet uses a slighly different definition of padding, which we think
# is the correct one (it corresponds to the filters)
self.pad_corrected = copy.deepcopy(self.pad)
for i in [0, 1]:
self.pad_corrected[1 + i*2] = min(
self.pad[1 + i*2] + self.stride[i] - 1,
self.kernel_size[i] - 1)
model.vars[self.outputs[0]].shape = \
getFilterOutputSize(shape[0:2],
self.kernel_size,
self.stride,
self.pad_corrected) + shape[2:5]
def getTransforms(self, model):
return [[getFilterTransform(self.kernel_size, self.stride, self.pad)]]
def transpose(self, model):
self.kernel_size = reorder(self.kernel_size, [1,0])
self.stride = reorder(self.stride, [1,0])
self.pad = reorder(self.pad, [2,3,0,1])
if self.pad_corrected:
self.pad_corrected = reorder(self.pad_corrected, [2,3,0,1])
def toMatlab(self):
mlayer = super(CaffePooling, self).toMatlab()
mlayer['type'][0] = u'dagnn.Pooling'
mlayer['block'][0] = dictToMatlabStruct(
{'method': self.method,
'poolSize': row(self.kernel_size),
'stride': row(self.stride),
'pad': row(self.pad_corrected)})
if not self.pad_corrected:
print "Warning: pad correction for layer %s could not be computed because the layer input shape could not be determined" % (self.name)
return mlayer
def toMatlabSimpleNN(self):
mlayer = super(CaffePooling, self).toMatlabSimpleNN()
mlayer['type'] = u'pool'
mlayer['method'] = self.method
mlayer['pool'] = row(self.kernel_size)
mlayer['stride'] = row(self.stride)
mlayer['pad'] = row(self.pad_corrected)
if not self.pad_corrected:
print "Warning: pad correction for layer %s could not be computed because the layer input shape could not be determined" % (self.name)
return mlayer
# --------------------------------------------------------------------
# ROIPooling
# --------------------------------------------------------------------
class CaffeROIPooling(CaffeLayer):
def __init__(self, name, inputs, outputs,
pooled_w,
pooled_h,
spatial_scale):
super(CaffeROIPooling, self).__init__(name, inputs, outputs)
self.pooled_w = pooled_w
self.pooled_h = pooled_h
self.spatial_scale = spatial_scale
self.flatten = True
def display(self):
super(CaffeROIPooling, self).display()
print " c- pooled_w: %s" % (self.pooled_w,)
print " c- pooled_h: %s" % (self.pooled_h,)
print " c- spatial_scale: %s" % (self.spatial_scale,)
print " c- flatten: %s" % (self.flatten,)
def reshape(self, model):
shape1 = model.vars[self.inputs[0]].shape
shape2 = model.vars[self.inputs[1]].shape
if not shape1 or not shape2: return
numChannels = shape1[2]
numROIs = reduce(mul, shape2, 1) / 5
if self.flatten:
oshape = [1,
1,
self.pooled_w * self.pooled_h * numChannels,
numROIs]
else:
oshape = [self.pooled_w,
self.pooled_h,
numChannels,
numROIs]
model.vars[self.outputs[0]].shape = oshape
def getTransforms(self, model):
# no transform
return [[CaffeTransform([1.,1.], [1.,1.], [1.,1.])]]
def transpose(self, model):
assert(not self.flatten)
tmp = self.pooled_w
self.pooled_w = self.pooled_h
self.pooled_h = tmp
def toMatlab(self):
mlayer = super(CaffeROIPooling, self).toMatlab()
mlayer['type'][0] = u'dagnn.ROIPooling'
mlayer['block'][0] = dictToMatlabStruct(
{'subdivisions':row([self.pooled_w, self.pooled_h]),
'transform':self.spatial_scale,
'flatten':self.flatten})
return mlayer
def toMatlabSimpleNN(self):
mlayer = super(CaffeROIPooling, self).toMatlabSimpleNN()
mlayer['type'] = u'roipool'
mlayer['subdivisions'] = row([self.pooled_w, self.pooled_h])
mlayer['transform'] = self.spatial_scale
mlayer['flatten'] = self.flatten
return mlayer
# --------------------------------------------------------------------
# Scale
# --------------------------------------------------------------------
class CaffeScale(CaffeLayer):
def __init__(self, name, inputs, outputs,
axis,
num_axes,
bias_term):
super(CaffeScale, self).__init__(name, inputs, outputs)
self.axis = axis
self.num_axes = num_axes
self.bias_term = bias_term
if len(self.inputs) == 1:
self.params.append(name + '_mult')
if len(self.inputs) < 2 and self.bias_term:
self.params.append(name + '_bias')
self.mult_size = [0, 0, 0, 0]
def display(self):
super(CaffeScale, self).display()
print " +- mult_size: %s" % (self.mult_size,)
print " c- axis: %s" % (self.axis,)
print " c- num_axes: %s" % (self.num_axes,)
print " c- bias_term: %s" % (self.bias_term,)
def reshape(self, model):
model.vars[self.outputs[0]].shape = model.vars[self.inputs[0]].shape
def setBlob(self, model, i, blob):
assert(i < self.bias_term + 1)
# Caffe *ends* with WIDTH, we start with it, blobs are already swapped here
k = 3 - self.axis
# This means that the MULT dimensions are aligned to the INPUT
# dimensions such that MULT[end] <-> INPUT[k]. For MatConvNet,
# we simply add singletion dimensions at the beginning of MULT
# to achieve this effect. BIAS is the same.
mshape = tuple([1] * (k - len(blob.shape) + 1) + list(blob.shape))
blob = blob.reshape(mshape)
model.params[self.params[i]].value = blob
model.params[self.params[i]].shape = blob.shape
if i == 0: self.mult_size = blob.shape
def getTransforms(self, model):
# The second input can be either a variable or a paramter; in
# both cases, there is no transform for it
return [[CaffeTransform([1.,1.], [1.,1.], [1.,1.])]]
def transpose(self, model):
if len(self.inputs) == 1:
# we only need to transpose if the scale is a parameter, not an input
for i in range(1 + self.bias_term):
param = model.params[self.params[i]]
n = len(param.shape)
if n >= 2:
order = range(n)
order[0] = 1
order[1] = 0
param.value = param.value.transpose(order)
def toMatlab(self):
mlayer = super(CaffeScale, self).toMatlab()
mlayer['type'][0] = u'dagnn.Scale'
mlayer['block'][0] = dictToMatlabStruct(
{'size': row(self.mult_size),
'hasBias': self.bias_term})
return mlayer
def toMatlabSimpleNN(self):
mlayer = super(CaffeScale, self).toMatlabSimpleNN()
# SimpleNN works only if the scaling blob is a parameter (and not a variable)
mlayer['type'] = u'scale'
mlayer['size'] = row(self.mult_size)
mlayer['hasBias'] = self.bias_term
return mlayer
# --------------------------------------------------------------------
# BatchNorm
# --------------------------------------------------------------------
class CaffeBatchNorm(CaffeLayer):
def __init__(self, name, inputs, outputs, use_global_stats, moving_average_fraction, eps):
super(CaffeBatchNorm, self).__init__(name, inputs, outputs)
self.use_global_stats = use_global_stats
self.moving_average_fraction = moving_average_fraction
self.eps = eps
self.params = [name + u'_mean',
name + u'_variance',
name + u'_scale_factor']
def display(self):
super(CaffeBatchNorm, self).display()
print " c- use_global_stats: %s" % (self.use_global_stats,)
print " c- moving_average_fraction: %s" % (self.moving_average_fraction,)
print " c- eps: %s" % (self.eps)
def setBlob(self, model, i, blob):
assert(i < 3)
model.params[self.params[i]].value = blob
model.params[self.params[i]].shape = blob.shape
def reshape(self, model):
shape = model.vars[self.inputs[0]].shape
mean = model.params[self.params[0]].value
variance = model.params[self.params[1]].value
scale_factor = model.params[self.params[2]].value
for i in range(3): del model.params[self.params[i]]
self.params = [self.name + u'_mult',
self.name + u'_bias',
self.name + u'_moments']
model.addParam(self.params[0])
model.addParam(self.params[1])
model.addParam(self.params[2])
if shape:
mult = np.ones((shape[2],),dtype='float32')
bias = np.zeros((shape[2],),dtype='float32')
model.params[self.params[0]].value = mult
model.params[self.params[0]].shape = mult.shape
model.params[self.params[1]].value = bias
model.params[self.params[1]].shape = bias.shape
if mean.size:
moments = np.concatenate(
(mean.reshape(-1,1) / scale_factor,
np.sqrt(variance.reshape(-1,1) / scale_factor + self.eps)),
axis=1)
model.params[self.params[2]].value = moments
model.params[self.params[2]].shape = moments.shape
model.vars[self.outputs[0]].shape = shape
def toMatlab(self):
mlayer = super(CaffeBatchNorm, self).toMatlab()
mlayer['type'][0] = u'dagnn.BatchNorm'
mlayer['block'][0] = dictToMatlabStruct(
{'epsilon': self.eps})
return mlayer
def toMatlabSimpleNN(self):
mlayer = super(CaffeBatchNorm, self).toMatlabSimpleNN()
mlayer['type'] = u'bnorm'
mlayer['epsilon'] = self.eps
return mlayer
# --------------------------------------------------------------------
# Concat
# --------------------------------------------------------------------
class CaffeConcat(CaffeLayer):
def __init__(self, name, inputs, outputs, concatDim):
super(CaffeConcat, self).__init__(name, inputs, outputs)
self.concatDim = concatDim
def transpose(self, model):
self.concatDim = [1, 0, 2, 3][self.concatDim]
def reshape(self, model):
sizes = [model.vars[x].shape for x in self.inputs]
osize = copy.deepcopy(sizes[0])
osize[self.concatDim] = 0
for thisSize in sizes:
for i in range(len(thisSize)):
if self.concatDim == i:
osize[i] = osize[i] + thisSize[i]
else:
if osize[i] != thisSize[i]:
print "Warning: concat layer: inconsistent input dimensions", sizes
model.vars[self.outputs[0]].shape = osize
def display(self):
super(CaffeConcat, self).display()
print " Concat Dim: ", self.concatDim
def toMatlab(self):
mlayer = super(CaffeConcat, self).toMatlab()
mlayer['type'][0] = u'dagnn.Concat'
mlayer['block'][0] = dictToMatlabStruct({'dim': float(self.concatDim) + 1})
return mlayer
def toMatlabSimpleNN(self):
raise ConversionError('Concat layers do not work in a SimpleNN network')
# --------------------------------------------------------------------
# EltWise (Sum, ...)
# --------------------------------------------------------------------
class CaffeEltWise(CaffeElementWise):
def __init__(self, name, inputs, outputs,
operation,
coeff,
stable_prod_grad):
super(CaffeEltWise, self).__init__(name, inputs, outputs)
self.operation = operation
self.coeff = coeff
self.stable_prod_grad = stable_prod_grad
def toMatlab(self):
mlayer = super(CaffeEltWise, self).toMatlab()
if self.operation == 'sum':
mlayer['type'][0] = u'dagnn.Sum'
else:
# not implemented
assert(False)
return mlayer
def display(self):
super(CaffeEltWise, self).display()
print " c- operation: ", self.operation
print " c- coeff: %s" % self.coeff
print " c- stable_prod_grad: %s" % self.stable_prod_grad
def reshape(self, model):
model.vars[self.outputs[0]].shape = \
model.vars[self.inputs[0]].shape
for i in range(1, len(self.inputs)):
assert(model.vars[self.inputs[0]].shape == model.vars[self.inputs[i]].shape)
def toMatlabSimpleNN(self):
raise ConversionError('EltWise (sum, ...) layers do not work in a SimpleNN network')
# --------------------------------------------------------------------
# Crop
# --------------------------------------------------------------------
class CaffeCrop(CaffeLayer):
def __init__(self, name, inputs, outputs):
super(CaffeCrop, self).__init__(name, inputs, outputs)
self.crop = []
def display(self):
super(CaffeCrop, self).display()
print " Crop: %s" % self.crop
def reshape(self, model):
# this is quite complex as we need to compute on the fly
# the geometry
tfs1 = model.getParentTransforms(self.inputs[0], self.name)
tfs2 = model.getParentTransforms(self.inputs[1], self.name)
print
print self.name, self.inputs[0]
for a,x in enumerate(tfs1): print "%10s %s" % (x,tfs1[x])
print self.name, self.inputs[1]
for a,x in enumerate(tfs2): print "%10s %s" % (x,tfs2[x])
# the goal is to crop inputs[0] to make it as big as inputs[1] and
# aligned to it; so now we find the map from inputs[0] to inputs[1]
tf = None
for name, tf2 in tfs2.items():
if tfs1.has_key(name):
tf1 = tfs1[name]
tf = composeTransforms(transposeTransform(tf2), tf1)
break
if tf is None:
print "Error: could not find common ancestor for inputs '%s' and '%s' of the CaffeCrop layer '%s'" % (self.inputs[0], self.inputs[1], self.name)
sys.exit(1)
print " Transformation %s -> %s = %s" % (self.inputs[0],
self.inputs[1], tf)
# for this to make sense it shoudl be tf.stride = 1
assert(tf.stride[0] == 1 and tf.stride[1] == 1)
# finally we can get the crops!
self.crop = [0.,0.]
for i in [0,1]:
# i' = alpha (i - 1) + beta + crop = 1 for i = 1
# crop = 1 - beta
self.crop[i] = round(1 - tf.offset[i])
print " Crop %s" % self.crop
# print
# print "resolved"
# tfs3 = model.getParentTransforms(self.outputs[0])
# for a,x in enumerate(tfs3): print "%10s %s" % (x,tfs3[x])
# now compute output variable size, which will be the size of the second input
model.vars[self.outputs[0]].shape = model.vars[self.inputs[1]].shape
def getTransforms(self, model):
t = CaffeTransform([1.,1.], [1.,1.], [1.+self.crop[0],1.+self.crop[1]])
return [[t],[None]]
def toMatlab(self):
mlayer = super(CaffeCrop, self).toMatlab()
mlayer['type'][0] = u'dagnn.Crop'
mlayer['block'][0] = dictToMatlabStruct({'crop': row(self.crop)})
return mlayer
def toMatlabSimpleNN(self):
# todo: simple 1 input crop layers should be supported though!
raise ConversionError('Crop layers do not work in a SimpleNN network')
# --------------------------------------------------------------------
# Caffe Model
# --------------------------------------------------------------------
class CaffeModel(object):
def __init__(self):
self.layers = OrderedDict()
self.vars = OrderedDict()
self.params = OrderedDict()
def addLayer(self, layer):
ename = layer.name
while self.layers.has_key(ename):
ename = ename + 'x'
if layer.name != ename:
print "Warning: a layer with name %s was already found, using %s instead" % \
(layer.name, ename)
layer.name = ename
for v in layer.inputs: self.addVar(v)
for v in layer.outputs: self.addVar(v)
for p in layer.params: self.addParam(p)
self.layers[layer.name] = layer
def addVar(self, name):
if not self.vars.has_key(name):
self.vars[name] = CaffeBlob(name)
def addParam(self, name):
if not self.params.has_key(name):
self.params[name] = CaffeBlob(name)
def renameLayer(self, old, new):
self.layers[old].name = new
# reinsert layer with new name -- this mess is to preserve the order
layers = OrderedDict([(new,v) if k==old else (k,v)
for k,v in self.layers.items()])
self.layers = layers
def renameVar(self, old, new, afterLayer=None):
self.vars[old].name = new
if afterLayer is not None:
start = self.layers.keys().index(afterLayer) + 1
else:
start = 0
# fix all references to the variable
for layer in self.layers.values()[start:-1]:
layer.inputs = [new if x==old else x for x in layer.inputs]
layer.outputs = [new if x==old else x for x in layer.outputs]
self.vars[new] = copy.deepcopy(self.vars[old])
# check if we can delete the old one (for afterLayet != None)
stillUsed = False
for layer in self.layers.values():
stillUsed = stillUsed or old in layer.inputs or old in layer.outputs
if not stillUsed:
del self.vars[old]
def renameParam(self, old, new):
self.params[old].name = new
# fix all references to the variable
for layer in self.layers.itervalues():
layer.params = [new if x==old else x for x in layer.params]
var = self.params[old]
del self.params[old]
self.params[new] = var
def removeParam(self, name):
del self.params[name]
def removeLayer(self, name):
# todo: fix this stuff for weight sharing
layer = self.layers[name]
for paramName in layer.params:
self.removeParam(paramName)
del self.layers[name]
def getLayersWithOutput(self, varName):
layerNames = []
for layer in self.layers.itervalues():
if varName in layer.outputs:
layerNames.append(layer.name)
return layerNames
def getLayersWithInput(self, varName):
layerNames = []
for layer in self.layers.itervalues():
if varName in layer.inputs:
layerNames.append(layer.name)
return layerNames
def reshape(self):
for layer in self.layers.itervalues():
layer.reshape(self)
def display(self):
for layer in self.layers.itervalues():
layer.display()
for var in self.vars.itervalues():
print 'Variable \'{}\''.format(var.name)
print ' + shape (computed): %s' % (var.shape,)
for par in self.params.itervalues():
print 'Parameter \'{}\''.format(par.name)
print ' + data found: %s' % (par.shape is not None)
print ' + data shape: %s' % (par.shape,)
def transpose(self):
for var in self.vars.itervalues():
if var.transposable: var.transpose()
for layer in self.layers.itervalues():
layer.transpose(self)
def getParentTransforms(self, variableName, topLayerName=None):
layerNames = self.layers.keys()
if topLayerName:
layerIndex = layerNames.index(topLayerName)
else:
layerIndex = len(self.layers) + 1
transforms = OrderedDict()
transforms[variableName] = CaffeTransform([1.,1.], [1.,1.], [1.,1.])
for layerName in reversed(layerNames[0:layerIndex]):
layer = self.layers[layerName]
layerTfs = layer.getTransforms(self)
for i, inputName in enumerate(layer.inputs):
tfs = []
if transforms.has_key(inputName):
tfs.append(transforms[inputName])
for j, outputName in enumerate(layer.outputs):
if layerTfs[i][j] is None: continue
if transforms.has_key(outputName):
composed = composeTransforms(layerTfs[i][j], transforms[outputName])
tfs.append(composed)
if len(tfs) > 0:
# should resolve conflicts, not simply pick the first tf
transforms[inputName] = tfs[0]
return transforms
|
|
__author__ = "Fabio Giuseppe Di Benedetto"
from rcmp_inter_communication import threading
from rcmp_command import logging, RCMP_LOGGER_NAME, CREATE_SERVICE_SPACE, START_SERVICE_LAUNCHER, \
START_SERVICE_NODE, DELETE_SERVICE_SPACE, KILL_SERVICE_LAUNCHER, KILL_SERVICE_NODE, \
DELETE_ALL_L_SERVICE_SPACE, KILL_ALL_L_SERVICE_NODE, KILL_ALL_L_SERVICE_LAUNCHER, RCMPCommandHandler
from rcmp_service_command import ServiceNode, ServiceLauncher, ServiceSpace
class MServiceManager:
postfix = "autogn"
def __init__(self):
self._logger = logging.getLogger(RCMP_LOGGER_NAME)
# we keep track of the services managed by the platform node instance
# (the running services on it)
self.ms = {}
# lock for synchronization around managed_services
self.ms_lock = threading.RLock()
# for all the method relative to managed service:
# key_mask is a dict in which values are keys for access the
# managed services (mask of indexes): we re-use the params we
# received by the request to accomplish this task; to
# traverse key_mask we use km_traversal_mask which is the list
# of keys in key_mask to take the values we need; the order in
# which we pick the elements of km_traversal_mask defines the
# level in this sort of tree
def add_managed_service(self, key_mask, ms_data):
"""Add a managed service."""
try:
self.ms_lock.acquire()
is_new = False
if ServiceSpace.SS_ADDRESS_KEY and ServiceSpace.SS_PORT_KEY in key_mask:
# the params needed for have the service space are available
if key_mask[ServiceSpace.SS_ADDRESS_KEY] not in self.ms:
# the specified service space address doesn't exist
self.ms[key_mask[ServiceSpace.SS_ADDRESS_KEY]] = {}
is_new = True
if key_mask[ServiceSpace.SS_PORT_KEY] not in self.ms[key_mask[ServiceSpace.SS_ADDRESS_KEY]]:
# the specified service space port doesn't exist
self.ms[key_mask[ServiceSpace.SS_ADDRESS_KEY]][key_mask[ServiceSpace.SS_PORT_KEY]] = {}
is_new = True
target = self.ms[key_mask[ServiceSpace.SS_ADDRESS_KEY]][key_mask[ServiceSpace.SS_PORT_KEY]]
if is_new:
if key_mask[RCMPCommandHandler.COMMAND_KEY] == CREATE_SERVICE_SPACE:
if ServiceSpace.SS_NAME_KEY in key_mask:
target[ServiceSpace.SS_NAME_KEY] = key_mask[ServiceSpace.SS_NAME_KEY]
target[ServiceSpace.SS_OWNED] = True
target.update(ms_data)
target[ServiceSpace.SN_LIST] = []
target[ServiceSpace.SL_LIST] = []
if key_mask[RCMPCommandHandler.COMMAND_KEY] == START_SERVICE_NODE:
sn = {}
# adding a node can happen that the node already exist: the new running node
# kills the old one, but we must clean the data structure and put the new info
idx = 0
for old_sn in target[ServiceSpace.SN_LIST]:
if old_sn[ServiceNode.SN_NAME_KEY] == key_mask[ServiceNode.SN_NAME_KEY]:
target[ServiceSpace.SN_LIST].pop(idx)
idx += 1
if ServiceNode.SN_PACKAGE_KEY in key_mask:
sn[ServiceNode.SN_PACKAGE_KEY] = key_mask[ServiceNode.SN_PACKAGE_KEY]
if ServiceNode.SN_TYPE_KEY in key_mask:
sn[ServiceNode.SN_TYPE_KEY] = key_mask[ServiceNode.SN_TYPE_KEY]
if ServiceNode.SN_NAME_KEY in key_mask:
sn[ServiceNode.SN_NAME_KEY] = key_mask[ServiceNode.SN_NAME_KEY]
if ServiceNode.SN_AUTO_GEN_NAME_KEY in key_mask:
sn[ServiceNode.SN_AUTO_GEN_NAME_KEY] = key_mask[ServiceNode.SN_AUTO_GEN_NAME_KEY]
if ServiceNode.SN_PARAMS_KEY in key_mask:
sn[ServiceNode.SN_PARAMS_KEY] = key_mask[ServiceNode.SN_PARAMS_KEY]
if ServiceSpace.SS_OWNED in target and target[ServiceSpace.SS_OWNED]:
sn[ServiceNode.SN_LOCAL_KEY] = True
sn.update(ms_data)
target[ServiceSpace.SN_LIST].append(sn)
if key_mask[RCMPCommandHandler.COMMAND_KEY] == START_SERVICE_LAUNCHER:
sl = {}
if ServiceLauncher.SL_PACKAGE_KEY in key_mask:
sl[ServiceLauncher.SL_PACKAGE_KEY] = key_mask[ServiceLauncher.SL_PACKAGE_KEY]
if ServiceLauncher.SL_F_LAUNCHER_KEY in key_mask:
sl[ServiceLauncher.SL_F_LAUNCHER_KEY] = key_mask[ServiceLauncher.SL_F_LAUNCHER_KEY]
if ServiceLauncher.SL_NAME_KEY in key_mask:
sl[ServiceLauncher.SL_NAME_KEY] = key_mask[ServiceLauncher.SL_NAME_KEY]
else:
idx = 0
for old_sl in target[ServiceSpace.SL_LIST]:
if ServiceLauncher.SL_PACKAGE_KEY in key_mask and \
old_sl[ServiceLauncher.SL_PACKAGE_KEY] == key_mask[ServiceLauncher.SL_PACKAGE_KEY]:
if ServiceLauncher.SL_F_LAUNCHER_KEY in key_mask and \
old_sl[ServiceLauncher.SL_F_LAUNCHER_KEY] == \
key_mask[ServiceLauncher.SL_F_LAUNCHER_KEY]:
if ServiceLauncher.SL_AUTO_GEN_NAME_KEY in old_sl and \
old_sl[ServiceLauncher.SL_AUTO_GEN_NAME_KEY]:
idx += 1
sl[ServiceLauncher.SL_NAME_KEY] = "%s_%s%d" % \
(key_mask[ServiceLauncher.SL_F_LAUNCHER_KEY], self.postfix, idx)
sl[ServiceLauncher.SL_AUTO_GEN_NAME_KEY] = True
if ServiceLauncher.SL_PARAMS_KEY in key_mask:
sl[ServiceLauncher.SL_PARAMS_KEY] = key_mask[ServiceLauncher.SL_PARAMS_KEY]
sl.update(ms_data)
target[ServiceSpace.SL_LIST].append(sl)
self._logger.debug("------ key_mask ------")
self._logger.debug(key_mask)
self._logger.debug("------ ms_data ------")
self._logger.debug(ms_data)
self._logger.debug("------ ms ------")
self._logger.debug(self.ms)
self._logger.debug("---------------")
finally:
self.ms_lock.release()
def get_managed_service_data(self, key_mask):
"""Get the data for a managed service. Return a dictionary with the process object running that service
or a list of dictionaries with the characteristics specified in key_mask. In case of list, the dictionaries
have the name of the node relative to that dictionary; in case of launcher the dictionary has the process
object and another field with the list of nodes managed by that launcher."""
try:
self.ms_lock.acquire()
self._logger.debug("------ key_mask ------")
self._logger.debug(key_mask)
self._logger.debug("------ ms ------")
self._logger.debug(self.ms)
self._logger.debug("---------------")
if key_mask[RCMPCommandHandler.COMMAND_KEY] == DELETE_ALL_L_SERVICE_SPACE:
ss_l = []
for ss_address in self.ms.keys():
for ss_port in self.ms[ss_address].keys():
target = {ServiceSpace.SS_ADDRESS_KEY: ss_address, ServiceSpace.SS_PORT_KEY: ss_port}
ss_l.append(target)
if ss_l:
return ss_l[0] if len(ss_l) == 1 else ss_l
else:
if ServiceSpace.SS_ADDRESS_KEY and ServiceSpace.SS_PORT_KEY in key_mask:
# the params needed for have the service space are available
if key_mask[ServiceSpace.SS_ADDRESS_KEY] not in self.ms:
raise ValueError("%s not managed by this platform node instance" %
key_mask[ServiceSpace.SS_ADDRESS_KEY])
if key_mask[ServiceSpace.SS_PORT_KEY] not in self.ms[key_mask[ServiceSpace.SS_ADDRESS_KEY]]:
raise ValueError("%s not managed by this platform node instance" %
key_mask[ServiceSpace.SS_PORT_KEY])
target = self.ms[key_mask[ServiceSpace.SS_ADDRESS_KEY]][key_mask[ServiceSpace.SS_PORT_KEY]]
if key_mask[RCMPCommandHandler.COMMAND_KEY] == DELETE_SERVICE_SPACE:
return target
if key_mask[RCMPCommandHandler.COMMAND_KEY] == KILL_SERVICE_NODE:
if ServiceNode.SN_NAME_KEY in key_mask and key_mask[ServiceNode.SN_NAME_KEY]:
for sn in target[ServiceSpace.SN_LIST]:
if sn[ServiceNode.SN_NAME_KEY] == key_mask[ServiceNode.SN_NAME_KEY]:
return sn
elif ServiceNode.SN_PACKAGE_KEY in key_mask and key_mask[ServiceNode.SN_PACKAGE_KEY] and \
ServiceNode.SN_TYPE_KEY in key_mask and key_mask[ServiceNode.SN_TYPE_KEY]:
# we can use package and type only if the user doesn't know the name (it is auto generated
# by ros platform): can be more than one in case of anonymous option (that adds a numeric
# postfix to keep names unique)
sn_l = []
for sn in target[ServiceSpace.SN_LIST]:
if sn[ServiceNode.SN_PACKAGE_KEY] == key_mask[ServiceNode.SN_PACKAGE_KEY] and \
sn[ServiceNode.SN_TYPE_KEY] == key_mask[ServiceNode.SN_TYPE_KEY] and \
ServiceNode.SN_AUTO_GEN_NAME_KEY in sn and sn[ServiceNode.SN_AUTO_GEN_NAME_KEY]:
sn_l.append(sn)
if sn_l:
return sn_l[0] if len(sn_l) == 1 else sn_l
# if we arrive here is because we haven't found a node with the specified name
if ServiceNode.SN_NAME_KEY in key_mask and key_mask[ServiceNode.SN_NAME_KEY]:
raise ValueError("Service node '%s' isn't managed by this platform node instance" %
key_mask[ServiceNode.SN_NAME_KEY])
else:
raise ValueError("Service node without user defined name isn't managed "
"by this platform node instance")
if key_mask[RCMPCommandHandler.COMMAND_KEY] == KILL_ALL_L_SERVICE_NODE:
sn_l = []
for sn in target[ServiceSpace.SN_LIST]:
sn_l.append(sn)
if sn_l:
return sn_l[0] if len(sn_l) == 1 else sn_l
if key_mask[RCMPCommandHandler.COMMAND_KEY] == KILL_SERVICE_LAUNCHER:
if ServiceLauncher.SL_NAME_KEY in key_mask and key_mask[ServiceLauncher.SL_NAME_KEY]:
for sl in target[ServiceSpace.SL_LIST]:
if sl[ServiceLauncher.SL_NAME_KEY] == key_mask[ServiceLauncher.SL_NAME_KEY]:
return sl
elif ServiceLauncher.SL_PACKAGE_KEY in key_mask and \
key_mask[ServiceLauncher.SL_PACKAGE_KEY] and \
ServiceLauncher.SL_F_LAUNCHER_KEY in key_mask and \
key_mask[ServiceLauncher.SL_F_LAUNCHER_KEY]:
# we can use launcher file name only if the user doesn't know the name (it is auto generated):
# can be more than one
sl_l = []
for sl in target[ServiceSpace.SL_LIST]:
if sl[ServiceLauncher.SL_PACKAGE_KEY] == key_mask[ServiceLauncher.SL_PACKAGE_KEY] and \
sl[ServiceLauncher.SL_F_LAUNCHER_KEY] == \
key_mask[ServiceLauncher.SL_F_LAUNCHER_KEY] and \
ServiceLauncher.SL_AUTO_GEN_NAME_KEY in sl and \
sl[ServiceLauncher.SL_AUTO_GEN_NAME_KEY]:
sl_l.append(sl)
if sl_l:
return sl_l[0] if len(sl_l) == 1 else sl_l
# if we arrive here is because we haven't found a node with the specified name
if ServiceLauncher.SL_NAME_KEY in key_mask and key_mask[ServiceLauncher.SL_NAME_KEY]:
raise ValueError("Service launcher '%s' isn't managed by this platform node instance" %
key_mask[ServiceLauncher.SL_NAME_KEY])
else:
raise ValueError("Service launcher without user defined name isn't managed "
"by this platform node instance")
if key_mask[RCMPCommandHandler.COMMAND_KEY] == KILL_ALL_L_SERVICE_LAUNCHER:
sl_l = []
for sl in target[ServiceSpace.SL_LIST]:
sl_l.append(sl)
if sl_l:
return sl_l[0] if len(sl_l) == 1 else sl_l
finally:
self.ms_lock.release()
def get_managed_service(self, key_mask):
"""Get the branch of managed services"""
try:
self.ms_lock.acquire()
branch = None
if ServiceSpace.SS_ADDRESS_KEY and ServiceSpace.SS_PORT_KEY in key_mask:
# the params needed for have the service space are available
# if key_mask[ServiceSpace.SS_ADDRESS_KEY] not in self.ms:
# raise ValueError("%s not managed by this platform node instance" %
# key_mask[ServiceSpace.SS_ADDRESS_KEY])
# if key_mask[ServiceSpace.SS_PORT_KEY] not in self.ms[key_mask[ServiceSpace.SS_ADDRESS_KEY]]:
# raise ValueError("%s not managed by this platform node instance" %
# key_mask[ServiceSpace.SS_PORT_KEY])
if key_mask[ServiceSpace.SS_ADDRESS_KEY] not in self.ms or \
key_mask[ServiceSpace.SS_PORT_KEY] not in self.ms[key_mask[ServiceSpace.SS_ADDRESS_KEY]]:
self._logger.error("Trying to access unavailable services using service space %s:%s" %
(key_mask[ServiceSpace.SS_ADDRESS_KEY], key_mask[ServiceSpace.SS_PORT_KEY]))
self._logger.error("---- ms dump - begin ----")
self._logger.error(self.ms)
self._logger.error("---- ms dump - end ----")
else:
branch = self.ms[key_mask[ServiceSpace.SS_ADDRESS_KEY]][key_mask[ServiceSpace.SS_PORT_KEY]]
return branch
finally:
self.ms_lock.release()
def delete_managed_service(self, key_mask, ms_data):
"""Delete a managed service."""
try:
self.ms_lock.acquire()
self._logger.debug("------ key_mask ------")
self._logger.debug(key_mask)
self._logger.debug("------ ms_data ------")
self._logger.debug(ms_data)
self._logger.debug("------ ms before delete------")
self._logger.debug(self.ms)
self._logger.debug("---------------")
if ServiceSpace.SS_ADDRESS_KEY and ServiceSpace.SS_PORT_KEY in key_mask:
# the params needed for have the service space are available
if key_mask[ServiceSpace.SS_ADDRESS_KEY] not in self.ms:
raise ValueError("%s not managed by this platform node instance" %
key_mask[ServiceSpace.SS_ADDRESS_KEY])
if key_mask[ServiceSpace.SS_PORT_KEY] not in self.ms[key_mask[ServiceSpace.SS_ADDRESS_KEY]]:
raise ValueError("%s not managed by this platform node instance" %
key_mask[ServiceSpace.SS_PORT_KEY])
target = self.ms[key_mask[ServiceSpace.SS_ADDRESS_KEY]][key_mask[ServiceSpace.SS_PORT_KEY]]
if key_mask[RCMPCommandHandler.COMMAND_KEY] == DELETE_SERVICE_SPACE:
if len(self.ms[key_mask[ServiceSpace.SS_ADDRESS_KEY]].keys()) > 1:
# there are other service spaces in this address and must be preserved
return self.ms[key_mask[ServiceSpace.SS_ADDRESS_KEY]].pop(key_mask[ServiceSpace.SS_PORT_KEY])
else:
return self.ms.pop(key_mask[ServiceSpace.SS_ADDRESS_KEY])
if key_mask[RCMPCommandHandler.COMMAND_KEY] == KILL_SERVICE_NODE or \
key_mask[RCMPCommandHandler.COMMAND_KEY] == KILL_ALL_L_SERVICE_NODE:
if (ServiceNode.SN_NAME_KEY in key_mask and key_mask[ServiceNode.SN_NAME_KEY]) or \
(ServiceNode.SN_NAME_KEY in ms_data and ms_data[ServiceNode.SN_NAME_KEY]):
if len(target[ServiceSpace.SN_LIST]) == 1 and len(target[ServiceSpace.SL_LIST]) == 0 \
and not(ServiceSpace.SS_OWNED in target and target[ServiceSpace.SS_OWNED]):
# only sn_list has one element (presumably the one we want delete) and this is a service
# space that the platform node instance doesn't own
if ServiceNode.SN_NAME_KEY in key_mask and key_mask[ServiceNode.SN_NAME_KEY]:
if target[ServiceSpace.SN_LIST][0][ServiceNode.SN_NAME_KEY] != \
key_mask[ServiceNode.SN_NAME_KEY]:
raise ValueError("Service node '%s' isn't managed by this platform node instance" %
key_mask[ServiceNode.SN_NAME_KEY])
elif ServiceNode.SN_NAME_KEY in ms_data and ms_data[ServiceNode.SN_NAME_KEY]:
if target[ServiceSpace.SN_LIST][0][ServiceNode.SN_NAME_KEY] != \
ms_data[ServiceNode.SN_NAME_KEY]:
raise ValueError("Service node '%s' isn't managed by this platform node instance" %
ms_data[ServiceNode.SN_NAME_KEY])
if len(self.ms[key_mask[ServiceSpace.SS_ADDRESS_KEY]].keys()) > 1:
# there are other service spaces in this address and must be preserved
return self.ms[key_mask[ServiceSpace.SS_ADDRESS_KEY]]\
.pop(key_mask[ServiceSpace.SS_PORT_KEY])
else:
return self.ms.pop(key_mask[ServiceSpace.SS_ADDRESS_KEY])
# in all other cases we must delete only the element
idx = 0
for sn in target[ServiceSpace.SN_LIST]:
if ServiceNode.SN_NAME_KEY in key_mask and key_mask[ServiceNode.SN_NAME_KEY]:
if sn[ServiceNode.SN_NAME_KEY] == key_mask[ServiceNode.SN_NAME_KEY]:
return target[ServiceSpace.SN_LIST].pop(idx)
elif ServiceNode.SN_NAME_KEY in ms_data and ms_data[ServiceNode.SN_NAME_KEY]:
if sn[ServiceNode.SN_NAME_KEY] == ms_data[ServiceNode.SN_NAME_KEY]:
return target[ServiceSpace.SN_LIST].pop(idx)
idx += 1
# if we arrive here is because we haven't found a node with the specified name
raise ValueError("Service node '%s' isn't managed by this platform node instance" %
key_mask[ServiceNode.SN_NAME_KEY])
if key_mask[RCMPCommandHandler.COMMAND_KEY] == KILL_SERVICE_LAUNCHER or \
key_mask[RCMPCommandHandler.COMMAND_KEY] == KILL_ALL_L_SERVICE_LAUNCHER:
if (ServiceLauncher.SL_NAME_KEY in key_mask and key_mask[ServiceLauncher.SL_NAME_KEY]) or \
(ServiceLauncher.SL_NAME_KEY in ms_data and ms_data[ServiceLauncher.SL_NAME_KEY]):
if len(target[ServiceSpace.SL_LIST]) == 1 and len(target[ServiceSpace.SN_LIST]) == 0 \
and not(ServiceSpace.SS_OWNED in target and target[ServiceSpace.SS_OWNED]):
# only sl_list has one element (presumably the one we want delete) and this is a service
# space that the platform node instance doesn't own
if ServiceLauncher.SL_NAME_KEY in key_mask and key_mask[ServiceLauncher.SL_NAME_KEY]:
if target[ServiceSpace.SL_LIST][0][ServiceLauncher.SL_NAME_KEY] != \
key_mask[ServiceLauncher.SL_NAME_KEY]:
raise ValueError("Service launcher '%s' isn't managed by this platform node instance" %
key_mask[ServiceLauncher.SL_NAME_KEY])
elif ServiceLauncher.SL_NAME_KEY in ms_data and ms_data[ServiceLauncher.SL_NAME_KEY]:
if target[ServiceSpace.SL_LIST][0][ServiceLauncher.SL_NAME_KEY] != \
ms_data[ServiceLauncher.SL_NAME_KEY]:
raise ValueError("Service launcher '%s' isn't managed by this platform node instance" %
ms_data[ServiceLauncher.SL_NAME_KEY])
if len(self.ms[key_mask[ServiceSpace.SS_ADDRESS_KEY]].keys()) > 1:
# there are other service spaces in this address and must be preserved
return self.ms[key_mask[ServiceSpace.SS_ADDRESS_KEY]]\
.pop(key_mask[ServiceSpace.SS_PORT_KEY])
else:
return self.ms.pop(key_mask[ServiceSpace.SS_ADDRESS_KEY])
# in all other cases we must delete only the element
idx = 0
for sl in target[ServiceSpace.SL_LIST]:
if ServiceLauncher.SL_NAME_KEY in key_mask and key_mask[ServiceLauncher.SL_NAME_KEY]:
if sl[ServiceLauncher.SL_NAME_KEY] == key_mask[ServiceLauncher.SL_NAME_KEY]:
return target[ServiceSpace.SL_LIST].pop(idx)
elif ServiceLauncher.SL_NAME_KEY in ms_data and ms_data[ServiceLauncher.SL_NAME_KEY]:
if sl[ServiceLauncher.SL_NAME_KEY] == ms_data[ServiceLauncher.SL_NAME_KEY]:
return target[ServiceSpace.SL_LIST].pop(idx)
idx += 1
# if we arrive here is because we haven't found a launcher with the specified name
raise ValueError("Service launcher '%s' isn't managed by this platform node instance" %
key_mask[ServiceLauncher.SL_NAME_KEY])
finally:
self.ms_lock.release()
|
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
#
#
# Parts of this code is from IPyVolume (24.05.2017), used here under
# this copyright and license with permission from the author
# (see https://github.com/jupyter-widgets/ipywidgets/pull/1387)
"""
Functions for generating embeddable HTML/javascript of a widget.
"""
import json
from .widgets import Widget, DOMWidget
from .widgets.widget_link import Link
from ._version import __html_manager_version__
snippet_template = u"""
{load}
<script type="application/vnd.jupyter.widget-state+json">
{json_data}
</script>
{widget_views}
"""
load_template = u"""<script src="{embed_url}" crossorigin="anonymous"></script>"""
load_requirejs_template = u"""
<!-- Load require.js. Delete this if your page already loads require.js -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" crossorigin="anonymous"></script>
<script src="{embed_url}" crossorigin="anonymous"></script>
"""
requirejs_snippet_template = u"""
<script type="application/vnd.jupyter.widget-state+json">
{json_data}
</script>
{widget_views}
"""
html_template = u"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>{title}</title>
</head>
<body>
{snippet}
</body>
</html>
"""
widget_view_template = u"""<script type="application/vnd.jupyter.widget-view+json">
{view_spec}
</script>"""
DEFAULT_EMBED_SCRIPT_URL = u'https://unpkg.com/@jupyter-widgets/html-manager@%s/dist/embed.js'%__html_manager_version__
DEFAULT_EMBED_REQUIREJS_URL = u'https://unpkg.com/@jupyter-widgets/html-manager@%s/dist/embed-amd.js'%__html_manager_version__
def _find_widget_refs_by_state(widget, state):
"""Find references to other widgets in a widget's state"""
# Copy keys to allow changes to state during iteration:
keys = tuple(state.keys())
for key in keys:
value = getattr(widget, key)
# Trivial case: Direct references to other widgets:
if isinstance(value, Widget):
yield value
# Also check for buried references in known, JSON-able structures
# Note: This might miss references buried in more esoteric structures
elif isinstance(value, (list, tuple)):
for item in value:
if isinstance(item, Widget):
yield item
elif isinstance(value, dict):
for item in value.values():
if isinstance(item, Widget):
yield item
def _get_recursive_state(widget, store=None, drop_defaults=False):
"""Gets the embed state of a widget, and all other widgets it refers to as well"""
if store is None:
store = dict()
state = widget._get_embed_state(drop_defaults=drop_defaults)
store[widget.model_id] = state
# Loop over all values included in state (i.e. don't consider excluded values):
for ref in _find_widget_refs_by_state(widget, state['state']):
if ref.model_id not in store:
_get_recursive_state(ref, store, drop_defaults=drop_defaults)
return store
def add_resolved_links(store, drop_defaults):
"""Adds the state of any link models between two models in store"""
for widget_id, widget in Widget.widgets.items(): # go over all widgets
if isinstance(widget, Link) and widget_id not in store:
if widget.source[0].model_id in store and widget.target[0].model_id in store:
store[widget.model_id] = widget._get_embed_state(drop_defaults=drop_defaults)
def dependency_state(widgets, drop_defaults=True):
"""Get the state of all widgets specified, and their dependencies.
This uses a simple dependency finder, including:
- any widget directly referenced in the state of an included widget
- any widget in a list/tuple attribute in the state of an included widget
- any widget in a dict attribute in the state of an included widget
- any jslink/jsdlink between two included widgets
What this alogrithm does not do:
- Find widget references in nested list/dict structures
- Find widget references in other types of attributes
Note that this searches the state of the widgets for references, so if
a widget reference is not included in the serialized state, it won't
be considered as a dependency.
"""
# collect the state of all relevant widgets
if widgets is None:
# Get state of all widgets, no smart resolution needed.
state = Widget.get_manager_state(drop_defaults=drop_defaults, widgets=None)['state']
else:
try:
widgets[0]
except (IndexError, TypeError):
widgets = [widgets]
state = {}
for widget in widgets:
_get_recursive_state(widget, state, drop_defaults)
# Add any links between included widgets:
add_resolved_links(state, drop_defaults)
return state
def embed_data(views, drop_defaults=True, state=None):
"""Gets data for embedding.
Use this to get the raw data for embedding if you have special
formatting needs.
Parameters
----------
views: widget or collection of widgets or None
The widgets to include views for. If None, all DOMWidgets are
included (not just the displayed ones).
drop_defaults: boolean
Whether to drop default values from the widget states.
state: dict or None (default)
The state to include. When set to None, the state of all widgets
know to the widget manager is included. Otherwise it uses the
passed state directly. This allows for end users to include a
smaller state, under the responsibility that this state is
sufficient to reconstruct the embedded views.
Returns
-------
A dictionary with the following entries:
manager_state: dict of the widget manager state data
view_specs: a list of widget view specs
"""
if views is None:
views = [w for w in Widget.widgets.values() if isinstance(w, DOMWidget)]
else:
try:
views[0]
except (IndexError, TypeError):
views = [views]
if state is None:
# Get state of all known widgets
state = Widget.get_manager_state(drop_defaults=drop_defaults, widgets=None)['state']
# Rely on ipywidget to get the default values
json_data = Widget.get_manager_state(widgets=[])
# but plug in our own state
json_data['state'] = state
view_specs = [w.get_view_spec() for w in views]
return dict(manager_state=json_data, view_specs=view_specs)
def embed_snippet(views,
drop_defaults=True,
state=None,
indent=2,
embed_url=None,
requirejs=True
):
"""Return a snippet that can be embedded in an HTML file.
Parameters
----------
views: widget or collection of widgets or None
The widgets to include views for. If None, all DOMWidgets are
included (not just the displayed ones).
drop_defaults: boolean
Whether to drop default values from the widget states.
state: dict or None (default)
The state to include. When set to None, the state of all widgets
know to the widget manager is included. Otherwise it uses the
passed state directly. This allows for end users to include a
smaller state, under the responsibility that this state is
sufficient to reconstruct the embedded views.
indent: integer, string or None
The indent to use for the JSON state dump. See `json.dumps` for
full description.
embed_url: string or None
Allows for overriding the URL used to fetch the widget manager
for the embedded code. This defaults (None) to an `unpkg` CDN url.
requirejs: boolean (True)
Enables the requirejs-based embedding, which allows for custom widgets.
If True, the embed_url should point to an AMD module.
Returns
-------
A unicode string with an HTML snippet containing several `<script>` tags.
"""
data = embed_data(views, drop_defaults=drop_defaults, state=state)
widget_views = u'\n'.join(
widget_view_template.format(**dict(view_spec=json.dumps(view_spec)))
for view_spec in data['view_specs']
)
if embed_url is None:
embed_url = DEFAULT_EMBED_REQUIREJS_URL if requirejs else DEFAULT_EMBED_SCRIPT_URL
load = load_requirejs_template if requirejs else load_template
values = {
'load': load.format(embed_url=embed_url),
'json_data': json.dumps(data['manager_state'], indent=indent),
'widget_views': widget_views,
}
return snippet_template.format(**values)
def embed_minimal_html(fp, views, title=u'IPyWidget export', template=None, **kwargs):
"""Write a minimal HTML file with widget views embedded.
Parameters
----------
fp: filename or file-like object
The file to write the HTML output to.
views: widget or collection of widgets or None
The widgets to include views for. If None, all DOMWidgets are
included (not just the displayed ones).
title: title for the html page
template: template string for the html,
Further it accepts keyword args similar to `embed_snippet`.
"""
snippet = embed_snippet(views, **kwargs)
values = {
'title': title,
'snippet': snippet,
}
if template is None:
template = html_template
html_code = template.format(**values)
# Check if fp is writable:
if hasattr(fp, 'write'):
fp.write(html_code)
else:
# Assume fp is a filename:
with open(fp, "w") as f:
f.write(html_code)
|
|
from jinja2 import FileSystemLoader, Environment
from shutil import copyfile
import datetime
import json
import numpy as np
import os
import pprint
import shutil
import time
import re
import vendors.google
import vendors.microsoft
import vendors.clarifai_
import vendors.ibm
import vendors.cloudsight_
import vendors.rekognition
SETTINGS = None
def settings(name):
"""Fetch a settings parameter."""
# Initialize settings if necessary.
global SETTINGS
if SETTINGS is None:
# Change this dict to suit your taste.
SETTINGS = {
'api_keys_filepath' : './api_keys.json',
'input_images_dir' : 'input_images',
'output_dir' : 'output',
'static_dir' : 'static',
'output_image_height' : 200,
'vendors' : {
'google' : vendors.google,
'msft' : vendors.microsoft,
'clarifai' : vendors.clarifai_,
'ibm' : vendors.ibm,
'cloudsight' : vendors.cloudsight_,
'rekognition' : vendors.rekognition,
},
'resize': True,
'statistics': [
'response_time',
'tags_count',
],
'tagged_images': False,
'tags_filepath': './tags.json',
}
if SETTINGS['tagged_images']:
SETTINGS['statistics'] += [
'matching_tags_count',
'matching_confidence'
]
# Load API keys
with open(SETTINGS['api_keys_filepath']) as data_file:
SETTINGS['api_keys'] = json.load(data_file)
return SETTINGS[name]
if settings('resize'):
from PIL import Image
def log_status(filepath, vendor_name, msg):
filename = os.path.basename(filepath)
print("%s -> %s" % ((filename + ", " + vendor_name).ljust(40), msg))
def resize_and_save(input_image_filepath, output_image_filepath):
image = Image.open(input_image_filepath)
height = image.size[0]
width = image.size[1]
aspect_ratio = float(width) / float(height)
new_height = settings('output_image_height')
new_width = int(aspect_ratio * new_height)
image.thumbnail((new_width, new_height))
image.save(output_image_filepath)
def render_from_template(directory, template_name, **kwargs):
loader = FileSystemLoader(directory)
env = Environment(loader=loader)
template = env.get_template(template_name)
return template.render(**kwargs)
def vendor_statistics(image_results):
vendor_stats = {}
if len(settings('statistics')) == 0:
return vendor_stats
for vendor in settings('vendors'):
vendor_results = []
for image_result in image_results:
for res in image_result['vendors']:
if res['vendor_name'] == vendor:
vendor_results.append(res)
vendor_stats[vendor] = []
for stat_key in settings('statistics'):
values = np.array([vr[stat_key] for vr in vendor_results])
vendor_stats[vendor].append({
'name': 'mean_' + stat_key,
'value': np.average(values)
})
vendor_stats[vendor].append({
'name': 'stdev_' + stat_key,
'value': np.std(values)
})
return vendor_stats
def find_matching_tags(tags, standardized_result):
matching_tags = set()
for tag in tags:
p = re.compile(tag, re.IGNORECASE)
for res_tag in standardized_result['tags']:
if p.search(res_tag[0]):
matching_tags.add(res_tag)
return list(matching_tags)
def process_all_images():
image_results = []
# Create the output directory
if not os.path.exists(settings('output_dir')):
os.makedirs(settings('output_dir'))
# Read image labels
if settings('tagged_images'):
with(open(settings('tags_filepath'), 'r')) as tags_file:
tags = json.loads(tags_file.read())
# Loop through all input images.
for filename in os.listdir(settings('input_images_dir')):
# Only process files that have these image extensions.
if not filename.endswith(('.png', '.jpg', '.jpeg', '.gif', '.bmp')):
continue
# Create a full path so we can read these files.
filepath = os.path.join(settings('input_images_dir'), filename)
# Read desired tags to compare against if specified
image_tags = []
if settings('tagged_images'):
image_tags = tags.get(filename, [])
# Create an output object for the image
image_result = {
'input_image_filepath' : filepath,
'output_image_filepath' : filename,
'vendors' : [],
'image_tags' : image_tags,
}
image_results.append(image_result)
# If there's no output file, then resize or copy the input file over
output_image_filepath = os.path.join(settings('output_dir'), filename)
if not(os.path.isfile(output_image_filepath)):
log_status(filepath, "", "writing output image in %s" % output_image_filepath)
if settings('resize'):
resize_and_save(filepath, output_image_filepath)
else:
copyfile(filepath, output_image_filepath)
# Walk through all vendor APIs to call.
for vendor_name, vendor_module in sorted(settings('vendors').iteritems(), reverse=True):
# Figure out filename to store and retrive cached JSON results.
output_json_filename = filename + "." + vendor_name + ".json"
output_json_path = os.path.join(settings('output_dir'), output_json_filename)
# Check if the call is already cached.
if os.path.isfile(output_json_path):
# If so, read the result from the .json file stored in the output dir.
log_status(filepath, vendor_name, "skipping API call, already cached")
with open(output_json_path, 'r') as infile:
api_result = json.loads(infile.read())
else:
# If not, make the API call for this particular vendor.
log_status(filepath, vendor_name, "calling API")
api_call_start = time.time()
api_result = vendor_module.call_vision_api(filepath, settings('api_keys'))
api_result['response_time'] = time.time() - api_call_start
# And cache the result in a .json file
log_status(filepath, vendor_name, "success, storing result in %s" % output_json_path)
with open(output_json_path, 'w') as outfile:
api_result_str = json.dumps(api_result, sort_keys=True, indent=4, separators=(',', ': '))
outfile.write(api_result_str)
# Sleep so we avoid hitting throttling limits
time.sleep(1)
# Parse the JSON result we fetched (via API call or from cache)
standardized_result = vendor_module.get_standardized_result(api_result)
# Sort tags if found
if 'tags' in standardized_result:
standardized_result['tags'].sort(key=lambda tup: tup[1], reverse=True)
# If expected tags are provided, calculate accuracy
tags_count = 0
matching_tags = []
matching_confidence = 0
if 'tags' in standardized_result:
tags_count = len(standardized_result['tags'])
if settings('tagged_images'):
matching_tags = find_matching_tags(image_tags, standardized_result)
if len(matching_tags) > 0:
matching_confidence = sum([t[1] for t in matching_tags]) / len(matching_tags)
image_result['vendors'].append({
'api_result' : api_result,
'vendor_name' : vendor_name,
'standardized_result' : standardized_result,
'output_json_filename' : output_json_filename,
'response_time' : api_result['response_time'],
'tags_count' : tags_count,
'matching_tags' : matching_tags,
'matching_tags_count' : len(matching_tags),
'matching_confidence' : matching_confidence,
})
# Compute global statistics for each vendor
vendor_stats = vendor_statistics(image_results)
# Sort image_results output by filename (so that future runs produce comparable output)
image_results.sort(key=lambda image_result: image_result['output_image_filepath'])
# Render HTML file with all results.
output_html = render_from_template(
'.',
os.path.join(settings('static_dir'), 'template.html'),
image_results=image_results,
vendor_stats=vendor_stats,
process_date=datetime.datetime.today()
)
# Write HTML output.
output_html_filepath = os.path.join(settings('output_dir'), 'output.html')
with open(output_html_filepath, 'w') as output_html_file:
output_html_file.write(output_html.encode('utf-8'))
if __name__ == "__main__":
process_all_images()
|
|
# Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import sys
import json
import time
import requests
from uai.utils.utils import _verfy_ac
from uai.utils.logger import uai_logger
from uaitrain.arch_conf.base_conf import *
from uai.utils.retcode_checker import *
MAX_POLL_STEPS = 200
DEPLOY_ID_FILE = './deploy_id.log'
UCLOUD_API_URL = 'http://api.ucloud.cn'
PARAMS_DEFAULT_REGION = 'cn-bj2'
PARAMS_DEFAULT_ZONE ='cn-bj2-04'
PARAMS_DEFAULT_BUSINESSGROUP = "Default"
PACKAGE_TYPE = {'os':'OS', 'language':'Python', 'ai_arch_v':'AIFrame', 'accelerator':'Accelerator'}
class UaiCmdTool(object):
""" The Base Create Tool Class with UAI
"""
def __init__(self, parser):
self.parser = parser
self.conf_params = {}
self.cmd_params = {}
self._add_args()
def _add_args(self):
self.config = ArchJsonConf('', self.parser)
def cmd(self):
""" Create the task of specified task id
"""
# if self.conf_params:
self._load_args()
self._format_request_param()
self._cmd_request()
def _load_args(self):
self.config.load_params()
self.conf_params = self.config.params
def _format_request_param(self):
self._format_account_param()
if self.conf_params['commands'] == 'checkbase':
self._format_checkbase_param()
self.cmd_url = UCLOUD_API_URL
if self.conf_params['commands'] == 'create':
self._format_create_param()
self.cmd_url = UCLOUD_API_URL
else:
uai_logger.error("Command is not valid: {0} ".format(self.conf_params['commands']))
raise RuntimeError("Command is not valid: {0} ".format(self.conf_params['commands']))
def _format_account_param(self):
self.cmd_params['PublicKey'] = self.conf_params['public_key']
if self.conf_params['project_id']:
self.cmd_params['ProjectId'] = self.conf_params['project_id']
def _format_create_param(self):
self.cmd_params['Region'] = PARAMS_DEFAULT_REGION
self.cmd_params['Zone'] = PARAMS_DEFAULT_ZONE
self.cmd_params['TrainJobName'] = self.conf_params['job_name']
self.cmd_params['TrainPublicKey'] = self.conf_params['public_key']
self.cmd_params['TrainPrivateKey'] = self.conf_params['private_key']
self.cmd_params['TrainWorkId'] = self.conf_params['worker_id']
self.cmd_params['CodeUhubPath'] = self.conf_params['uhub_path']
self.cmd_params['DataUfilePath'] = self.conf_params['ufile_datapath']
self.cmd_params['OutputUfilePath'] = self.conf_params['ufile_outputpath']
self.cmd_params['DockerCmd'] = self.conf_params['docker_cmd']
self.cmd_params['MaxExecuteTime'] = self.conf_params['max_exectime']
self.cmd_params['Action'] = 'CreateUAITrainJob'
def _format_checkbase_param(self):
self.cmd_params['OSVersion'] = self.conf_params['os']
self.cmd_params['PythonVersion'] = self.conf_params['language']
self.cmd_params['AIFrameVersion'] = self.conf_params['ai_arch_v']
self.cmd_params['AcceleratorID'] = self.conf_params['accelerator']
# #Action must be applied at last
self.cmd_params['Action'] = 'CheckUAITrainBaseImgExists'
def _format_availableenv_param(self, type):
self.cmd_params['PkgType'] = PACKAGE_TYPE[type]
# #Action must be applied at last
self.cmd_params['Action'] = 'GetUAITrainEnvPkg'
def _cmd_request(self):
if self.conf_params['commands'] == 'availableenv':
self._cmd_writefile_package(self.conf_params['pkg_type'])
else:
self._cmd_common_request()
def _cmd_common_request(self):
if ('Signature' in self.cmd_params) is True:
self.cmd_params.pop('Signature')
self.cmd_params['Signature'] = _verfy_ac(self.conf_params['private_key'],
self.cmd_params)
uai_logger.info("Call http request: {0} ".format(get_request(self.cmd_url, params=self.cmd_params)))
r = requests.get(self.cmd_url, params=self.cmd_params)
self.rsp = json.loads(r.text, encoding='utf-8')
if self.rsp["RetCode"] != 0:
uai_logger.error("{0} Fail: [{1}]{2}".format(self.cmd_params["Action"], self.rsp["RetCode"], self.rsp["Message"].encode('utf-8')))
raise RuntimeError("{0} Fail: [{1}]{2}".format(self.cmd_params["Action"], self.rsp["RetCode"], self.rsp["Message"].encode('utf-8')))
else:
del self.rsp['Action']
uai_logger.info("{0} Success: {1}".format(self.cmd_params["Action"], get_response(self.rsp,0)))
def _cmd_writefile_package(self, filepath):
if ('Signature' in self.cmd_params) is True:
self.cmd_params.pop('Signature')
self.cmd_params['Signature'] = _verfy_ac(self.conf_params['private_key'],
self.cmd_params)
uai_logger.info("Call http request: {0} ".format(get_request(self.cmd_url, params=self.cmd_params)))
r = requests.get(self.cmd_url, params=self.cmd_params)
rsp = json.loads(r.text, encoding='utf-8')
if rsp["RetCode"] != 0:
uai_logger.error("{0} Fail: [{1}]{2}".format(self.cmd_params["Action"], rsp["RetCode"], rsp["Message"].encode('utf-8')))
raise RuntimeError(
"{0} Fail: [{1}]{2}".format(self.cmd_params["Action"], rsp["RetCode"], rsp["Message"].encode('utf-8')))
else:
with open(filepath, 'w') as f:
json.dump(rsp["PkgSet"], f)
def translate_pkg_params(self):
if self.conf_params['os'] and type(self.conf_params['os']) is str:
self.conf_params['os'] = \
self._translate_pkg_to_id('os', self.conf_params['os'].split(','))[0]
if self.conf_params['language'] and type(self.conf_params['language']) is str:
self.conf_params['language'] = \
self._translate_pkg_to_id('language', self.conf_params['language'].split(','))[0]
if self.conf_params['ai_arch_v'] and type(self.conf_params['ai_arch_v']) is str:
self.conf_params['ai_arch_v'] = \
self._translate_pkg_to_id('ai_arch_v', self.conf_params['ai_arch_v'].split(','))[0]
if self.conf_params['accelerator'] and type(self.conf_params['accelerator']) is str:
self.conf_params['accelerator'] = \
self._translate_pkg_to_id('accelerator', self.conf_params['accelerator'].split(','))[0]
def _translate_pkg_to_id(self, pkgtype, pkglist):
if not os.path.exists(pkgtype):
# raise RuntimeError("{0} file doesn't found, please download from github "
# "and put it under the same directory as deploy tool".format(pkgtype))
uai_logger.info("Start download {0} package info".format(pkgtype))
self.conf_params['pkg_type'] = pkgtype
self._format_account_param()
self._format_availableenv_param(pkgtype)
self.cmd_url = UCLOUD_API_URL
self._cmd_writefile_package(pkgtype)
resultlist = []
uai_logger.info("Start translate {0} package to their id, packages: {1}".format(pkgtype, pkglist))
for avpkg in json.load(open(pkgtype), 'utf-8'):
for pkg in pkglist:
if pkgtype == 'os' or pkgtype == 'language' or pkgtype == 'ai_arch_v':
versionsplit = pkg.rfind('-')
if versionsplit >= 0:
if avpkg["PkgName"] == pkg[:versionsplit] and (
avpkg["PkgVersion"] == "" or avpkg["PkgVersion"] == pkg[versionsplit + 1:]):
pkglist.remove(pkg)
resultlist.append(avpkg["PkgId"])
elif versionsplit < 0:
if avpkg["PkgName"] == pkg:
pkglist.remove(pkg)
resultlist.append(avpkg["PkgId"])
else:
if avpkg["PkgName"] == pkg:
pkglist.remove(pkg)
resultlist.append(avpkg["PkgId"])
if len(pkglist) != 0:
uai_logger.error("Some {0} package is not supported: {1}".format(pkgtype, pkglist))
raise RuntimeError("Some {0} package is not supported: {1}".format(pkgtype, pkglist))
uai_logger.info("End translate {0} package to their id, result: {1}".format(pkgtype, resultlist))
return resultlist
def get_base_image(self, conf_params):
self.conf_params = conf_params
self.conf_params["commands"] = "checkbase"
self._format_account_param()
self._format_checkbase_param()
self.cmd_url = UCLOUD_API_URL
self._cmd_common_request()
return self.rsp["BimgName"][0]
|
|
# Copyright 2014 Ahmed El-Hassany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from pox.lib.addresses import EthAddr
from pox.lib.addresses import IPAddr
from pox.lib.packet.ethernet import ethernet
from pox.lib.packet.arp import arp
from sts.entities.hosts import HostAbstractClass
from sts.entities.hosts import HostInterfaceAbstractClass
from sts.entities.hosts import HostInterface
from sts.entities.hosts import Host
from sts.entities.hosts import NamespaceHost
class HostAbstractClassTest(unittest.TestCase):
def get_concrete_class(self):
"""Simple mock for the abstract methods and properties"""
class HostTestImpl(HostAbstractClass):
def send(self, interface, packet):
return True
def receive(self, interface, packet):
return True
return HostTestImpl
def test_init(self):
interfaces = ["eth1"]
name = 'Host1'
hid = 1
host_cls = self.get_concrete_class()
h = host_cls(interfaces=interfaces, name=name, hid=hid)
self.assertEquals(interfaces, h.interfaces)
self.assertEquals(name, h.name)
self.assertEquals(hid, h.hid)
self.assertTrue(h.has_port(interfaces[0]))
self.assertFalse(h.has_port("fake_interface"))
class HostInterfaceAbstractClassTest(unittest.TestCase):
def get_concrete_class(self):
"""Simple mock for the abstract methods and properties"""
class HostInterfaceTestImpl(HostInterfaceAbstractClass):
@property
def port_no(self):
return 1
@property
def _hw_addr_hash(self):
return self.hw_addr.__hash__()
@property
def _ips_hashes(self):
return [ip.__hash__() for ip in self.ips]
return HostInterfaceTestImpl
def test_init(self):
hw_addr = 'ff:ee:dd:cc:bb:aa'
ip = '192.168.56.1'
ips = [ip]
name = "eth0"
iface_cls = self.get_concrete_class()
iface = iface_cls(hw_addr, ip, name)
self.assertEquals(hw_addr, iface.hw_addr)
self.assertEquals(ips, iface.ips)
self.assertEquals(name, iface.name)
self.assertTrue(iface.__hash__())
class HostInterfaceTest(unittest.TestCase):
def test_init(self):
# Arrange
hw_addr_str = "11:22:33:44:55:66"
hw_addr = EthAddr(hw_addr_str)
ip_str = "127.0.0.1"
ip = IPAddr(ip_str)
name = "eth0"
# Act
interface = HostInterface(hw_addr, ip, name=name)
# Assert
self.assertEquals(interface.hw_addr, hw_addr)
self.assertEquals(interface.ips, [ip])
self.assertEquals(interface.name, name)
def test_eq(self):
# Arrange
hw_addr_str = "11:22:33:44:55:66"
hw_addr_str2 = "66:55:44:33:22:11"
hw_addr = EthAddr(hw_addr_str)
hw_addr2 = EthAddr(hw_addr_str2)
ip_str = "127.0.0.1"
ip = IPAddr(ip_str)
name = "eth0"
# Act
interface1 = HostInterface(hw_addr, ip, name=name)
interface2 = HostInterface(hw_addr, ip, name=name)
interface3 = HostInterface(hw_addr2, ip, name=name)
# Assert
self.assertEquals(interface1, interface2)
self.assertNotEquals(interface1, interface3)
def test_to_json(self):
# Arrange
hw_addr_str = "11:22:33:44:55:66"
hw_addr = EthAddr(hw_addr_str)
ip_str = "127.0.0.1"
ip = IPAddr(ip_str)
name = "eth0"
expected = {'__type__': 'sts.entities.hosts.HostInterface',
'name': name,
'ips': [ip],
'hw_addr': hw_addr_str}
# Act
interface = HostInterface(hw_addr, ip, name=name)
json_str = interface.to_json()
# Assert
self.assertEquals(json_str, expected)
def test_from_json(self):
# Arrange
hw_addr_str = "11:22:33:44:55:66"
hw_addr = EthAddr(hw_addr_str)
ip_str = "127.0.0.1"
ip = IPAddr(ip_str)
name = "eth0"
input_json = {'__type__': 'sts.entities.hosts.HostInterface',
'name': name,
'ips': [ip],
'hw_addr': hw_addr_str}
# Act
interface = HostInterface.from_json(input_json)
# Assert
self.assertEquals(interface.hw_addr, hw_addr)
self.assertEquals(interface.ips, [ip])
self.assertEquals(interface.name, name)
class HostTest(unittest.TestCase):
def setUp(self):
# IP and MAC of requester host 1
self.H1_I1_IP = '1.1.1.1'
self.H1_I1_ETH = '\x01\x01\x01\x01\x01\x01'
# IP and MAC of receiver host 2, Interface 1
self.H2_I1_IP1 = '2.2.1.1'
self.H2_I1_ETH = '\x02\x02\x02\x02\x01\x01'
# Additional IP on the Interface 1
self.H2_I1_IP2 = '2.2.1.2'
# IP and MAC of receiver host 2, Interface 2
self.H2_I2_IP = '2.2.2.1'
self.H2_I2_ETH = '\x02\x02\x02\x02\x02\x01'
# IP and MAC of receiver host 3
self.H3_I3_IP = '3.3.3.1'
self.H3_I3_ETH = '\x03\x03\x03\x03\x03\x01'
def test_init(self):
# Arrange
interfaces = [mock.Mock()]
name = "test-host"
hid = 123
# Act
host = Host(interfaces, name, hid)
# Assert
self.assertEquals(host.interfaces, interfaces)
self.assertEquals(host.name, name)
self.assertEquals(host.hid, hid)
def test_send(self):
# Arrange
interfaces = [mock.Mock()]
name = "test-host"
hid = 123
Host.raiseEvent = mock.Mock(name="mock_interface")
pkt = ethernet()
# Act
host = Host(interfaces, name, hid)
host.send(interfaces[0], pkt)
# Assert
self.assertEquals(Host.raiseEvent.call_count, 1)
def test_none_arp(self):
"""Receive a non-ARP packet and ensure there is no reply"""
# Arrange
iface1 = HostInterface(EthAddr(self.H2_I1_ETH),
[IPAddr(self.H2_I1_IP1), IPAddr(self.H2_I1_IP2)])
iface2 = HostInterface(EthAddr(self.H2_I2_ETH), [IPAddr(self.H2_I2_IP)])
interfaces = [iface1, iface2]
h = Host(interfaces)
ether = ethernet()
ether.type = ethernet.IP_TYPE
ether.dst = EthAddr(self.H2_I1_ETH)
ether.src = EthAddr(self.H1_I1_ETH)
# Act
# Get the action and reply packet
reply_packet = h.receive(interfaces[0], ether)
# Assert
self.assertIsNone(reply_packet)
def test_invalid_arp(self):
"""Receive a ARP packet that isn't desinated to it and ensure there is no reply"""
# Arrange
iface1 = HostInterface(EthAddr(self.H2_I1_ETH),
[IPAddr(self.H2_I1_IP1), IPAddr(self.H2_I1_IP2)])
iface2 = HostInterface(EthAddr(self.H2_I2_ETH), [IPAddr(self.H2_I2_IP)])
interfaces = [iface1, iface2]
h = Host(interfaces)
arp_req = arp()
arp_req.hwsrc = EthAddr(self.H1_I1_ETH)
arp_req.hwdst = EthAddr(b"\xff\xff\xff\xff\xff\xff")
arp_req.opcode = arp.REQUEST
arp_req.protosrc = IPAddr(self.H1_I1_IP)
arp_req.protodst = IPAddr(self.H3_I3_IP)
ether = ethernet()
ether.type = ethernet.ARP_TYPE
ether.dst = EthAddr(b"\xff\xff\xff\xff\xff\xff")
ether.src = EthAddr(self.H1_I1_ETH)
ether.payload = arp_req
# Act
# Get the action and reply packet
reply_packet = h.receive(interfaces[0], ether)
# Assert
self.assertIsNone(reply_packet)
def test_arp_reply(self):
"""Receive a valid ARP packet and ensure the correct reply"""
# Arrange
iface1 = HostInterface(EthAddr(self.H2_I1_ETH),
[IPAddr(self.H2_I1_IP1), IPAddr(self.H2_I1_IP2)])
iface2 = HostInterface(EthAddr(self.H2_I2_ETH), [IPAddr(self.H2_I2_IP)])
interfaces = [iface1, iface2]
h = Host(interfaces)
arp_req = arp()
arp_req.hwsrc = EthAddr(self.H1_I1_ETH)
arp_req.hwdst = EthAddr(b"\xff\xff\xff\xff\xff\xff")
arp_req.opcode = arp.REQUEST
arp_req.protosrc = IPAddr(self.H1_I1_IP)
arp_req.protodst = IPAddr(self.H2_I1_IP1)
ether = ethernet()
ether.type = ethernet.ARP_TYPE
ether.dst = EthAddr(b"\xff\xff\xff\xff\xff\xff")
ether.src = EthAddr(self.H1_I1_ETH)
ether.payload = arp_req
# Act
# Get the action and arp reply packet
arp_reply = h.receive(interfaces[0], ether)
# Assert
self.assertIsNotNone(arp_reply)
self.assertEquals(arp_reply.src, EthAddr(self.H2_I1_ETH))
self.assertEquals(arp_reply.dst, EthAddr(self.H1_I1_ETH))
self.assertEquals(arp_reply.type, ethernet.ARP_TYPE)
reply_payload = arp_reply.payload
self.assertEquals(reply_payload.opcode, arp.REPLY)
self.assertEquals(reply_payload.hwsrc, EthAddr(self.H2_I1_ETH))
self.assertEquals(reply_payload.hwdst, EthAddr(self.H1_I1_ETH))
self.assertEquals(reply_payload.protosrc, self.H2_I1_IP1)
self.assertEquals(reply_payload.protodst, self.H1_I1_IP)
def test_to_json(self):
# Arrange
hw_addr_str = "11:22:33:44:55:66"
hw_addr = EthAddr(hw_addr_str)
ip_str = "127.0.0.1"
ip = IPAddr(ip_str)
ifname = "eth0"
interface = HostInterface(hw_addr, ip, name=ifname)
hname = "h1"
hid = 1
host = Host(interface, name=hname, hid=hid)
# Act
json_dict = host.to_json()
# Assert
self.assertEquals(json_dict['name'], hname)
self.assertEquals(json_dict['hid'], hid)
self.assertEquals(len(json_dict['interfaces']), 1)
self.assertEquals(json_dict['interfaces'][0], interface.to_json())
def test_from_json(self):
# Arrange
json_dict = {'hid': 1,
'__type__': 'sts.entities.hosts.Host',
'name': 'h1',
'interfaces': [
{'__type__': 'sts.entities.hosts.HostInterface',
'name': 'eth0',
'hw_addr': '11:22:33:44:55:66',
'ips': ['127.0.0.1'],
}],
}
hw_addr_str = "11:22:33:44:55:66"
hw_addr = EthAddr(hw_addr_str)
ip_str = "127.0.0.1"
ip = IPAddr(ip_str)
ifname = "eth0"
interface = HostInterface(hw_addr, ip, name=ifname)
hname = "h1"
hid = 1
# Act
host = Host.from_json(json_dict)
# Assert
self.assertEquals(host.name, hname)
self.assertEquals(host.hid, hid)
self.assertEquals(len(host.interfaces), 1)
self.assertEquals(host.interfaces[0], interface)
class NamespaceHostTest(unittest.TestCase):
# TODO (AH): test send and receive
def test_init(self):
# Arrange
name = "test-host"
hid = 123
hw_addr_str = "11:22:33:44:55:66"
ip = "192.168.56.1"
interfaces = [HostInterface(hw_addr_str, ip)]
# Mocking external dependencies
import sts.util.network_namespace as ns
ns.launch_namespace = mock.Mock(return_value=(None, hw_addr_str, None))
ns.bind_raw_socket = mock.Mock(return_value=None)
# Act
host = NamespaceHost(interfaces, lambda x: mock.Mock(), name=name, hid=hid)
# Assert
self.assertEquals(host.interfaces, interfaces)
def test_to_json(self):
# Arrange
io_master = mock.Mock()
hw_addr_str = "0e:32:a4:91:e7:30"
ip_str = "192.168.56.2"
hw_addr = EthAddr(hw_addr_str)
ip = IPAddr(ip_str)
ifname = "test-host"
interface = HostInterface(hw_addr, ip, name=ifname)
hname = "h1"
hid = 1
cmd = '/bin/bash sleep'
# Mocking external dependencies
import sts.util.network_namespace as ns
ns.launch_namespace = mock.Mock(return_value=(None, hw_addr_str, None))
ns.bind_raw_socket = mock.Mock(return_value=None)
host = NamespaceHost(interface, io_master.create_worker_for_socket,
name=hname, hid=hid, cmd=cmd)
# Act
json_dict = host.to_json()
# Assert
self.assertEquals(json_dict['name'], hname)
self.assertEquals(json_dict['hid'], hid)
self.assertEquals(json_dict['cmd'], cmd)
self.assertEquals(len(json_dict['interfaces']), 1)
self.assertEquals(json_dict['interfaces'][0], interface.to_json())
def test_from_json(self):
# Arrange
json_dict = {'__type__': 'sts.entities.hosts.NamespaceHost',
'cmd': '/bin/bash sleep',
'name': 'h1',
'hid': 1,
'interfaces': [
{'__type__': 'sts.entities.hosts.HostInterface',
'hw_addr': '0e:32:a4:91:e7:30',
'ips': ['192.168.56.2'],
'name': 'test-host'}]}
io_master = mock.Mock()
hw_addr_str = "0e:32:a4:91:e7:30"
ip_str = "192.168.56.2"
hw_addr = EthAddr(hw_addr_str)
ip = IPAddr(ip_str)
ifname = "test-host"
interface = HostInterface(hw_addr, ip, name=ifname)
hname = "h1"
hid = 1
cmd = '/bin/bash sleep'
# Mocking external dependencies
import sts.util.network_namespace as ns
ns.launch_namespace = mock.Mock(return_value=(None, hw_addr_str, None))
ns.bind_raw_socket = mock.Mock(return_value=None)
# Act
host = NamespaceHost.from_json(json_dict, io_master.create_worker_for_socket)
# Assert
self.assertEquals(host.name, hname)
self.assertEquals(host.hid, hid)
self.assertEquals(host.cmd, cmd)
self.assertEquals(len(host.interfaces), 1)
self.assertEquals(host.interfaces[0].to_json(), interface.to_json())
|
|
from pyglet.gl import *
from pyglet.window import mouse
from pyglet import media, clock
from wydget import element, event, util, data, layouts, anim
from wydget.widgets.frame import Frame
from wydget.widgets.label import Image, Label
from wydget.widgets.button import Button
class Movie(Frame):
name = 'movie'
def __init__(self, parent, file=None, source=None, playing=False,
x=0, y=0, z=0, width=None, height=None, scale=True, **kw):
self.parent = parent
self.scale = scale
if file is not None:
source = self.source = media.load(file, streaming=True)
else:
assert source is not None, 'one of file or source is required'
self.player = media.Player()
self.player.eos_action = self.player.EOS_PAUSE
self.player.on_eos = self.on_eos
# poke at the video format
if not source.video_format:
raise ValueError("Movie file doesn't contain video")
video_format = source.video_format
if width is None:
width = video_format.width
if video_format.sample_aspect > 1:
width *= video_format.sample_aspect
if height is None:
height = video_format.height
if video_format.sample_aspect < 1:
height /= video_format.sample_aspect
super().__init__(parent, x, y, z, width, height, **kw)
# control frame top-level
c = self.control = Frame(self, bgcolor=(1, 1, 1, .5),
is_visible=False, width='100%', height=64)
# controls underlay
f = Frame(c, is_transparent=True, width='100%', height='100%')
f.layout = layouts.Horizontal(f, valign='center', halign='center',
padding=10)
c.play = Image(f, data.load_gui_image('media-play.png'),
classes=('-play-button',), is_visible=not playing)
c.pause = Image(f, data.load_gui_image('media-pause.png'),
bgcolor=None, classes=('-pause-button',),
is_visible=playing)
fi = Frame(f, is_transparent=True)
c.range = Image(fi, data.load_gui_image('media-range.png'))
im = data.load_gui_image('media-position.png')
c.position = Image(fi, im, x=0, y=-2, classes=('-position',))
c.time = Label(f, '00:00', font_size=20)
c.anim = None
# make sure we get at least one frame to display
self.player.queue(source)
clock.schedule(self.update)
self.playing = False
if playing:
self.play()
def update(self, dt):
self.player.dispatch_events()
if self.control is None:
# the player update may have resulted in this element being
# culled
return
if not self.control.isVisible():
return
t = self.player.time
# time display
s = int(t)
m = t // 60
h = m // 60
m %= 60
s = s % 60
if h:
text = '%d:%02d:%02d' % (h, m, s)
else:
text = '%02d:%02d' % (m, s)
if text != self.control.time.text:
self.control.time.text = text
# slider position
p = (t / self.player.source.duration)
self.control.position.x = int(p * self.control.range.width)
def pause(self):
if not self.playing:
return
clock.unschedule(self.update)
self.player.pause()
self.control.play.setVisible(True)
self.control.pause.setVisible(False)
self.playing = False
def play(self):
if self.playing:
return
clock.schedule(self.update)
self.player.play()
self.control.play.setVisible(False)
self.control.pause.setVisible(True)
self.playing = True
def render(self, rect):
t = self.player.texture
if not t:
return
x = float(self.width) / t.width
y = float(self.height) / t.height
s = min(x, y)
w = int(t.width * s)
h = int(t.height * s)
x = rect.x
y = rect.y
if w < self.width:
x += self.width // 2 - w // 2
if h < self.height:
y += self.height // 2 - h // 2
t.blit(x, y, width=w, height=h)
def on_eos(self):
self.player.seek(0)
self.pause()
self.control.position.x = 0
self.control.time.text = '00:00'
self.getGUI().dispatch_event(self, 'on_eos')
def delete(self):
self.pause()
if self.control.anim is not None:
self.control.anim.cancel()
self.control = None
super().delete()
@event.default('movie')
def on_element_enter(widget, *args):
widget.control.setVisible(True)
widget.control.anim = anim.Delayed(widget.control.setVisible, False,
delay=5)
return event.EVENT_HANDLED
@event.default('movie')
def on_mouse_motion(widget, *args):
if widget.control.anim is not None:
widget.control.anim.cancel()
widget.control.setVisible(True)
widget.control.anim = anim.Delayed(widget.control.setVisible, False,
delay=5)
return event.EVENT_HANDLED
@event.default('movie')
def on_element_leave(widget, *args):
widget.control.setVisible(False)
if widget.control.anim is not None:
widget.control.anim.cancel()
return event.EVENT_HANDLED
@event.default('movie .-play-button')
def on_click(widget, x, y, buttons, modifiers, click_count):
if not buttons & mouse.LEFT:
return event.EVENT_UNHANDLED
widget.getParent('movie').play()
return event.EVENT_HANDLED
@event.default('movie .-pause-button')
def on_click(widget, x, y, buttons, modifiers, click_count):
if not buttons & mouse.LEFT:
return event.EVENT_UNHANDLED
widget.getParent('movie').pause()
return event.EVENT_HANDLED
@event.default('movie .-position')
def on_mouse_press(widget, x, y, buttons, modifiers):
if not buttons & mouse.LEFT:
return event.EVENT_UNHANDLED
widget.getParent('movie').pause()
return event.EVENT_HANDLED
@event.default('movie .-position')
def on_mouse_release(widget, x, y, buttons, modifiers):
if not buttons & mouse.LEFT:
return event.EVENT_UNHANDLED
widget.getParent('movie').play()
return event.EVENT_HANDLED
@event.default('movie .-position')
def on_drag(widget, x, y, dx, dy, buttons, modifiers):
if not buttons & mouse.LEFT:
return event.EVENT_UNHANDLED
movie = widget.getParent('movie')
rw = movie.control.range.width
widget.x = max(0, min(rw, widget.x + dx))
p = float(widget.x) / rw
movie.player.seek(p * movie.player.source.duration)
return event.EVENT_HANDLED
|
|
"""Tests formatting as writer-agnostic ExcelCells
ExcelFormatter is tested implicitly in pandas/tests/io/test_excel.py
"""
import pytest
from pandas.io.formats.excel import CSSToExcelConverter
@pytest.mark.parametrize('css,expected', [
# FONT
# - name
('font-family: foo,bar', {'font': {'name': 'foo'}}),
('font-family: "foo bar",baz', {'font': {'name': 'foo bar'}}),
('font-family: foo,\nbar', {'font': {'name': 'foo'}}),
('font-family: foo, bar, baz', {'font': {'name': 'foo'}}),
('font-family: bar, foo', {'font': {'name': 'bar'}}),
('font-family: \'foo bar\', baz', {'font': {'name': 'foo bar'}}),
('font-family: \'foo \\\'bar\', baz', {'font': {'name': 'foo \'bar'}}),
('font-family: "foo \\"bar", baz', {'font': {'name': 'foo "bar'}}),
('font-family: "foo ,bar", baz', {'font': {'name': 'foo ,bar'}}),
# - family
('font-family: serif', {'font': {'name': 'serif', 'family': 1}}),
('font-family: Serif', {'font': {'name': 'serif', 'family': 1}}),
('font-family: roman, serif', {'font': {'name': 'roman', 'family': 1}}),
('font-family: roman, sans-serif', {'font': {'name': 'roman',
'family': 2}}),
('font-family: roman, sans serif', {'font': {'name': 'roman'}}),
('font-family: roman, sansserif', {'font': {'name': 'roman'}}),
('font-family: roman, cursive', {'font': {'name': 'roman', 'family': 4}}),
('font-family: roman, fantasy', {'font': {'name': 'roman', 'family': 5}}),
# - size
('font-size: 1em', {'font': {'size': 12}}),
# - bold
('font-weight: 100', {'font': {'bold': False}}),
('font-weight: 200', {'font': {'bold': False}}),
('font-weight: 300', {'font': {'bold': False}}),
('font-weight: 400', {'font': {'bold': False}}),
('font-weight: normal', {'font': {'bold': False}}),
('font-weight: lighter', {'font': {'bold': False}}),
('font-weight: bold', {'font': {'bold': True}}),
('font-weight: bolder', {'font': {'bold': True}}),
('font-weight: 700', {'font': {'bold': True}}),
('font-weight: 800', {'font': {'bold': True}}),
('font-weight: 900', {'font': {'bold': True}}),
# - italic
# - underline
('text-decoration: underline',
{'font': {'underline': 'single'}}),
('text-decoration: overline',
{}),
('text-decoration: none',
{}),
# - strike
('text-decoration: line-through',
{'font': {'strike': True}}),
('text-decoration: underline line-through',
{'font': {'strike': True, 'underline': 'single'}}),
('text-decoration: underline; text-decoration: line-through',
{'font': {'strike': True}}),
# - color
('color: red', {'font': {'color': 'FF0000'}}),
('color: #ff0000', {'font': {'color': 'FF0000'}}),
('color: #f0a', {'font': {'color': 'FF00AA'}}),
# - shadow
('text-shadow: none', {'font': {'shadow': False}}),
('text-shadow: 0px -0em 0px #CCC', {'font': {'shadow': False}}),
('text-shadow: 0px -0em 0px #999', {'font': {'shadow': False}}),
('text-shadow: 0px -0em 0px', {'font': {'shadow': False}}),
('text-shadow: 2px -0em 0px #CCC', {'font': {'shadow': True}}),
('text-shadow: 0px -2em 0px #CCC', {'font': {'shadow': True}}),
('text-shadow: 0px -0em 2px #CCC', {'font': {'shadow': True}}),
('text-shadow: 0px -0em 2px', {'font': {'shadow': True}}),
('text-shadow: 0px -2em', {'font': {'shadow': True}}),
# FILL
# - color, fillType
('background-color: red', {'fill': {'fgColor': 'FF0000',
'patternType': 'solid'}}),
('background-color: #ff0000', {'fill': {'fgColor': 'FF0000',
'patternType': 'solid'}}),
('background-color: #f0a', {'fill': {'fgColor': 'FF00AA',
'patternType': 'solid'}}),
# BORDER
# - style
('border-style: solid',
{'border': {'top': {'style': 'medium'},
'bottom': {'style': 'medium'},
'left': {'style': 'medium'},
'right': {'style': 'medium'}}}),
('border-style: solid; border-width: thin',
{'border': {'top': {'style': 'thin'},
'bottom': {'style': 'thin'},
'left': {'style': 'thin'},
'right': {'style': 'thin'}}}),
('border-top-style: solid; border-top-width: thin',
{'border': {'top': {'style': 'thin'}}}),
('border-top-style: solid; border-top-width: 1pt',
{'border': {'top': {'style': 'thin'}}}),
('border-top-style: solid',
{'border': {'top': {'style': 'medium'}}}),
('border-top-style: solid; border-top-width: medium',
{'border': {'top': {'style': 'medium'}}}),
('border-top-style: solid; border-top-width: 2pt',
{'border': {'top': {'style': 'medium'}}}),
('border-top-style: solid; border-top-width: thick',
{'border': {'top': {'style': 'thick'}}}),
('border-top-style: solid; border-top-width: 4pt',
{'border': {'top': {'style': 'thick'}}}),
('border-top-style: dotted',
{'border': {'top': {'style': 'mediumDashDotDot'}}}),
('border-top-style: dotted; border-top-width: thin',
{'border': {'top': {'style': 'dotted'}}}),
('border-top-style: dashed',
{'border': {'top': {'style': 'mediumDashed'}}}),
('border-top-style: dashed; border-top-width: thin',
{'border': {'top': {'style': 'dashed'}}}),
('border-top-style: double',
{'border': {'top': {'style': 'double'}}}),
# - color
('border-style: solid; border-color: #0000ff',
{'border': {'top': {'style': 'medium', 'color': '0000FF'},
'right': {'style': 'medium', 'color': '0000FF'},
'bottom': {'style': 'medium', 'color': '0000FF'},
'left': {'style': 'medium', 'color': '0000FF'}}}),
('border-top-style: double; border-top-color: blue',
{'border': {'top': {'style': 'double', 'color': '0000FF'}}}),
('border-top-style: solid; border-top-color: #06c',
{'border': {'top': {'style': 'medium', 'color': '0066CC'}}}),
# ALIGNMENT
# - horizontal
('text-align: center',
{'alignment': {'horizontal': 'center'}}),
('text-align: left',
{'alignment': {'horizontal': 'left'}}),
('text-align: right',
{'alignment': {'horizontal': 'right'}}),
('text-align: justify',
{'alignment': {'horizontal': 'justify'}}),
# - vertical
('vertical-align: top',
{'alignment': {'vertical': 'top'}}),
('vertical-align: text-top',
{'alignment': {'vertical': 'top'}}),
('vertical-align: middle',
{'alignment': {'vertical': 'center'}}),
('vertical-align: bottom',
{'alignment': {'vertical': 'bottom'}}),
('vertical-align: text-bottom',
{'alignment': {'vertical': 'bottom'}}),
# - wrap_text
('white-space: nowrap',
{'alignment': {'wrap_text': False}}),
('white-space: pre',
{'alignment': {'wrap_text': False}}),
('white-space: pre-line',
{'alignment': {'wrap_text': False}}),
('white-space: normal',
{'alignment': {'wrap_text': True}}),
])
def test_css_to_excel(css, expected):
convert = CSSToExcelConverter()
assert expected == convert(css)
def test_css_to_excel_multiple():
convert = CSSToExcelConverter()
actual = convert('''
font-weight: bold;
text-decoration: underline;
color: red;
border-width: thin;
text-align: center;
vertical-align: top;
unused: something;
''')
assert {"font": {"bold": True, "underline": "single", "color": "FF0000"},
"border": {"top": {"style": "thin"},
"right": {"style": "thin"},
"bottom": {"style": "thin"},
"left": {"style": "thin"}},
"alignment": {"horizontal": "center",
"vertical": "top"}} == actual
@pytest.mark.parametrize('css,inherited,expected', [
('font-weight: bold', '',
{'font': {'bold': True}}),
('', 'font-weight: bold',
{'font': {'bold': True}}),
('font-weight: bold', 'font-style: italic',
{'font': {'bold': True, 'italic': True}}),
('font-style: normal', 'font-style: italic',
{'font': {'italic': False}}),
('font-style: inherit', '', {}),
('font-style: normal; font-style: inherit', 'font-style: italic',
{'font': {'italic': True}}),
])
def test_css_to_excel_inherited(css, inherited, expected):
convert = CSSToExcelConverter(inherited)
assert expected == convert(css)
|
|
# Copyright 2017 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Jordan-Wigner transform on fermionic operators."""
from __future__ import absolute_import
import itertools
from fermilib.ops import FermionOperator, InteractionOperator
from projectq.ops import QubitOperator
def jordan_wigner(operator):
""" Apply the Jordan-Wigner transform to a FermionOperator or
InteractionOperator to convert to a QubitOperator.
Returns:
transformed_operator: An instance of the QubitOperator class.
Warning:
The runtime of this method is exponential in the maximum locality
of the original FermionOperator.
"""
if isinstance(operator, InteractionOperator):
return jordan_wigner_interaction_op(operator)
if not isinstance(operator, FermionOperator):
raise TypeError("operator must be a FermionOperator or "
"InteractionOperator.")
transformed_operator = QubitOperator()
for term in operator.terms:
# Initialize identity matrix.
transformed_term = QubitOperator((), operator.terms[term])
# Loop through operators, transform and multiply.
for ladder_operator in term:
z_factors = tuple((index, 'Z') for
index in range(ladder_operator[0]))
pauli_x_component = QubitOperator(
z_factors + ((ladder_operator[0], 'X'),), 0.5)
if ladder_operator[1]:
pauli_y_component = QubitOperator(
z_factors + ((ladder_operator[0], 'Y'),), -0.5j)
else:
pauli_y_component = QubitOperator(
z_factors + ((ladder_operator[0], 'Y'),), 0.5j)
transformed_term *= pauli_x_component + pauli_y_component
transformed_operator += transformed_term
return transformed_operator
def jordan_wigner_interaction_op(iop, n_qubits=None):
"""Output InteractionOperator as QubitOperator class under JW transform.
One could accomplish this very easily by first mapping to fermions and
then mapping to qubits. We skip the middle step for the sake of speed.
Returns:
qubit_operator: An instance of the QubitOperator class.
"""
from fermilib.utils import count_qubits
if n_qubits is None:
n_qubits = count_qubits(iop)
if n_qubits < count_qubits(iop):
raise ValueError('Invalid number of qubits specified.')
# Initialize qubit operator as constant.
qubit_operator = QubitOperator((), iop.constant)
# Loop through all indices.
for p in range(n_qubits):
for q in range(n_qubits):
# Handle one-body terms.
coefficient = complex(iop[p, q])
if coefficient and p >= q:
qubit_operator += coefficient * jordan_wigner_one_body(p, q)
# Keep looping for the two-body terms.
for r in range(n_qubits):
for s in range(n_qubits):
coefficient = complex(iop[p, q, r, s])
# Skip zero terms.
if (not coefficient) or (p == q) or (r == s):
continue
# Identify and skip one of the complex conjugates.
if [p, q, r, s] != [s, r, q, p]:
if len(set([p, q, r, s])) == 4:
if min(r, s) < min(p, q):
continue
elif p != r and q < p:
continue
# Handle the two-body terms.
transformed_term = jordan_wigner_two_body(p, q, r, s)
transformed_term *= coefficient
qubit_operator += transformed_term
return qubit_operator
def jordan_wigner_one_body(p, q):
"""Map the term a^\dagger_p a_q + a^\dagger_q a_p to QubitOperator.
Note that the diagonal terms are divided by a factor of 2
because they are equal to their own Hermitian conjugate.
"""
# Handle off-diagonal terms.
qubit_operator = QubitOperator()
if p != q:
a, b = sorted([p, q])
parity_string = tuple((z, 'Z') for z in range(a + 1, b))
for operator in ['X', 'Y']:
operators = ((a, operator),) + parity_string + ((b, operator),)
qubit_operator += QubitOperator(operators, .5)
# Handle diagonal terms.
else:
qubit_operator += QubitOperator((), .5)
qubit_operator += QubitOperator(((p, 'Z'),), -.5)
return qubit_operator
def jordan_wigner_two_body(p, q, r, s):
"""Map the term a^\dagger_p a^\dagger_q a_r a_s + h.c. to QubitOperator.
Note that the diagonal terms are divided by a factor of two
because they are equal to their own Hermitian conjugate.
"""
# Initialize qubit operator.
qubit_operator = QubitOperator()
# Return zero terms.
if (p == q) or (r == s):
return qubit_operator
# Handle case of four unique indices.
elif len(set([p, q, r, s])) == 4:
# Loop through different operators which act on each tensor factor.
for operator_p, operator_q, operator_r in itertools.product(
['X', 'Y'], repeat=3):
if [operator_p, operator_q, operator_r].count('X') % 2:
operator_s = 'X'
else:
operator_s = 'Y'
# Sort operators.
[(a, operator_a), (b, operator_b),
(c, operator_c), (d, operator_d)] = sorted(
[(p, operator_p), (q, operator_q),
(r, operator_r), (s, operator_s)],
key=lambda pair: pair[0])
# Computer operator strings.
operators = ((a, operator_a),)
operators += tuple((z, 'Z') for z in range(a + 1, b))
operators += ((b, operator_b),)
operators += ((c, operator_c),)
operators += tuple((z, 'Z') for z in range(c + 1, d))
operators += ((d, operator_d),)
# Get coefficients.
coefficient = .125
parity_condition = bool(operator_p != operator_q or
operator_p == operator_r)
if (p > q) ^ (r > s):
if not parity_condition:
coefficient *= -1.
elif parity_condition:
coefficient *= -1.
# Add term.
qubit_operator += QubitOperator(operators, coefficient)
# Handle case of three unique indices.
elif len(set([p, q, r, s])) == 3:
# Identify equal tensor factors.
if p == r:
a, b = sorted([q, s])
c = p
elif p == s:
a, b = sorted([q, r])
c = p
elif q == r:
a, b = sorted([p, s])
c = q
elif q == s:
a, b = sorted([p, r])
c = q
# Get operators.
parity_string = tuple((z, 'Z') for z in range(a + 1, b))
pauli_z = QubitOperator(((c, 'Z'),))
for operator in ['X', 'Y']:
operators = ((a, operator),) + parity_string + ((b, operator),)
# Get coefficient.
if (p == s) or (q == r):
coefficient = .25
else:
coefficient = -.25
# Add term.
hopping_term = QubitOperator(operators, coefficient)
qubit_operator -= pauli_z * hopping_term
qubit_operator += hopping_term
# Handle case of two unique indices.
elif len(set([p, q, r, s])) == 2:
# Get coefficient.
if p == s:
coefficient = -.25
else:
coefficient = .25
# Add terms.
qubit_operator -= QubitOperator((), coefficient)
qubit_operator += QubitOperator(((p, 'Z'),), coefficient)
qubit_operator += QubitOperator(((q, 'Z'),), coefficient)
qubit_operator -= QubitOperator(((min(q, p), 'Z'), (max(q, p), 'Z')),
coefficient)
return qubit_operator
|
|
# -*- coding: utf-8 -*-
"""
CIRpy
Python interface for the Chemical Identifier Resolver (CIR) by the CADD Group at the NCI/NIH.
https://github.com/mcs07/CIRpy
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import functools
import inspect
import logging
import os
try:
from urllib.error import HTTPError
from urllib.parse import quote, urlencode
from urllib.request import urlopen
except ImportError:
from urllib import urlencode
from urllib2 import quote, urlopen, HTTPError
try:
from lxml import etree
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
import xml.etree.ElementTree as etree
__author__ = 'Matt Swain'
__email__ = '[email protected]'
__version__ = '1.0.2'
__license__ = 'MIT'
log = logging.getLogger('cirpy')
log.addHandler(logging.NullHandler())
API_BASE = 'https://cactus.nci.nih.gov/chemical/structure'
FILE_FORMATS = {
'alc', 'cdxml', 'cerius', 'charmm', 'cif', 'cml', 'ctx', 'gjf', 'gromacs', 'hyperchem', 'jme', 'maestro', 'mol',
'mol2', 'mrv', 'pdb', 'sdf3000', 'sln', 'xyz'
}
def construct_api_url(input, representation, resolvers=None, get3d=False, tautomers=False, xml=True, **kwargs):
"""Return the URL for the desired API endpoint.
:param string input: Chemical identifier to resolve
:param string representation: Desired output representation
:param list(str) resolvers: (Optional) Ordered list of resolvers to use
:param bool get3d: (Optional) Whether to return 3D coordinates (where applicable)
:param bool tautomers: (Optional) Whether to return all tautomers
:param bool xml: (Optional) Whether to return full XML response
:returns: CIR API URL
:rtype: str
"""
# File formats require representation=file and the format in the querystring
if representation in FILE_FORMATS:
kwargs['format'] = representation
representation = 'file'
# Prepend input with 'tautomers:' to return all tautomers
if tautomers:
input = 'tautomers:%s' % input
url = '%s/%s/%s' % (API_BASE, quote(input), representation)
if xml:
url += '/xml'
if resolvers:
kwargs['resolver'] = ','.join(resolvers)
if get3d:
kwargs['get3d'] = True
if kwargs:
url += '?%s' % urlencode(kwargs)
return url
def request(input, representation, resolvers=None, get3d=False, tautomers=False, **kwargs):
"""Make a request to CIR and return the XML response.
:param string input: Chemical identifier to resolve
:param string representation: Desired output representation
:param list(string) resolvers: (Optional) Ordered list of resolvers to use
:param bool get3d: (Optional) Whether to return 3D coordinates (where applicable)
:param bool tautomers: (Optional) Whether to return all tautomers
:returns: XML response from CIR
:rtype: Element
:raises HTTPError: if CIR returns an error code
:raises ParseError: if CIR response is uninterpretable
"""
url = construct_api_url(input, representation, resolvers, get3d, tautomers, **kwargs)
log.debug('Making request: %s', url)
response = urlopen(url)
return etree.parse(response).getroot()
class Result(object):
"""A single result returned by CIR."""
def __init__(self, input, notation, input_format, resolver, representation, value):
"""
:param string input: Originally supplied input identifier that produced this result
:param string notation: Identifier matched by the resolver or tautomer ID
:param string input_format: Format of the input as interpreted by the resolver
:param string resolver: Resolver used to produce this result
:param string representation: Requested output representation
:param value: Actual result value
:type value: string or list(string)
"""
self.input = input
self.representation = representation
self.resolver = resolver
self.input_format = input_format
self.notation = notation
self.value = value
def __repr__(self):
return 'Result(input=%r, representation=%r, resolver=%r, input_format=%r, notation=%r, value=%r)' \
% (self.input, self.representation, self.resolver, self.input_format, self.notation, self.value)
def __str__(self):
return self.value
def __eq__(self, other):
return isinstance(other, type(self)) and self.__dict__ == other.__dict__
def __getitem__(self, prop):
"""Allow dict-style access to attributes to ease transition from when results were dicts."""
if prop in self.__dict__:
return getattr(self, prop)
raise KeyError(prop)
def __setitem__(self, prop, val):
"""Allow dict-style setting of attributes to ease transition from when results were dicts."""
setattr(self, prop, val)
def __contains__(self, prop):
"""Allow dict-style checking of attributes to ease transition from when results were dicts."""
return prop in self.__dict__
def to_dict(self):
"""Return a dictionary containing Result data."""
return self.__dict__
def query(input, representation, resolvers=None, get3d=False, tautomers=False, **kwargs):
"""Get all results for resolving input to the specified output representation.
:param string input: Chemical identifier to resolve
:param string representation: Desired output representation
:param list(string) resolvers: (Optional) Ordered list of resolvers to use
:param bool get3d: (Optional) Whether to return 3D coordinates (where applicable)
:param bool tautomers: (Optional) Whether to return all tautomers
:returns: List of resolved results
:rtype: list(Result)
:raises HTTPError: if CIR returns an error code
:raises ParseError: if CIR response is uninterpretable
"""
tree = request(input, representation, resolvers, get3d, tautomers, **kwargs)
results = []
for data in tree.findall('.//data'):
value = [item.text for item in data.findall('item')]
result = Result(
input=tree.attrib['string'],
representation=tree.attrib['representation'],
resolver=data.attrib['resolver'],
input_format=data.attrib['string_class'],
notation=data.attrib['notation'],
value=value[0] if len(value) == 1 else value
)
results.append(result)
log.debug('Received %s query results', len(results))
return results
def resolve(input, representation, resolvers=None, get3d=False, **kwargs):
"""Resolve input to the specified output representation.
:param string input: Chemical identifier to resolve
:param string representation: Desired output representation
:param list(string) resolvers: (Optional) Ordered list of resolvers to use
:param bool get3d: (Optional) Whether to return 3D coordinates (where applicable)
:returns: Output representation or None
:rtype: string or None
:raises HTTPError: if CIR returns an error code
:raises ParseError: if CIR response is uninterpretable
"""
# Take first result from XML query
results = query(input, representation, resolvers, False, get3d, **kwargs)
result = results[0].value if results else None
return result
def resolve_image(input, resolvers=None, fmt='png', width=300, height=300, frame=False, crop=None, bgcolor=None,
atomcolor=None, hcolor=None, bondcolor=None, framecolor=None, symbolfontsize=11, linewidth=2,
hsymbol='special', csymbol='special', stereolabels=False, stereowedges=True, header=None, footer=None,
**kwargs):
"""Resolve input to a 2D image depiction.
:param string input: Chemical identifier to resolve
:param list(string) resolvers: (Optional) Ordered list of resolvers to use
:param string fmt: (Optional) gif or png image format (default png)
:param int width: (Optional) Image width in pixels (default 300)
:param int height: (Optional) Image height in pixels (default 300)
:param bool frame: (Optional) Whether to show border frame (default False)
:param int crop: (Optional) Crop image with specified padding
:param int symbolfontsize: (Optional) Atom label font size (default 11)
:param int linewidth: (Optional) Bond line width (default 2)
:param string bgcolor: (Optional) Background color
:param string atomcolor: (Optional) Atom label color
:param string hcolor: (Optional) Hydrogen atom label color
:param string bondcolor: (Optional) Bond color
:param string framecolor: (Optional) Border frame color
:param bool hsymbol: (Optional) Hydrogens: all, special or none (default special)
:param bool csymbol: (Optional) Carbons: all, special or none (default special)
:param bool stereolabels: (Optional) Whether to show stereochemistry labels (default False)
:param bool stereowedges: (Optional) Whether to show wedge/dash bonds (default True)
:param string header: (Optional) Header text above structure
:param string footer: (Optional) Footer text below structure
"""
# Aggregate all arguments into kwargs
args, _, _, values = inspect.getargvalues(inspect.currentframe())
for arg in args:
if values[arg] is not None:
kwargs[arg] = values[arg]
# Turn off anti-aliasing for transparent background
if kwargs.get('bgcolor') == 'transparent':
kwargs['antialiasing'] = False
# Renamed parameters
if 'stereolabels' in kwargs:
kwargs['showstereo'] = kwargs.pop('stereolabels')
if 'fmt' in kwargs:
kwargs['format'] = kwargs.pop('fmt')
# Toggle stereo wedges
if 'stereowedges' in kwargs:
status = kwargs.pop('stereowedges')
kwargs.update({'wedges': status, 'dashes': status})
# Constant values
kwargs.update({'representation': 'image', 'xml': False})
url = construct_api_url(**kwargs)
log.debug('Making image request: %s', url)
response = urlopen(url)
return response.read()
# TODO: Support twirl as fmt paramter?
# TODO: ipython html repr twirl, ipython png repr image
def download(input, filename, representation, overwrite=False, resolvers=None, get3d=False, **kwargs):
"""Convenience function to save a CIR response as a file.
This is just a simple wrapper around the resolve function.
:param string input: Chemical identifier to resolve
:param string filename: File path to save to
:param string representation: Desired output representation
:param bool overwrite: (Optional) Whether to allow overwriting of an existing file
:param list(string) resolvers: (Optional) Ordered list of resolvers to use
:param bool get3d: (Optional) Whether to return 3D coordinates (where applicable)
:raises HTTPError: if CIR returns an error code
:raises ParseError: if CIR response is uninterpretable
:raises IOError: if overwrite is False and file already exists
"""
result = resolve(input, representation, resolvers, get3d, **kwargs)
# Just log and return if nothing resolved
if not result:
log.debug('No file to download.')
return
# Only overwrite an existing file if explicitly instructed to.
if not overwrite and os.path.isfile(filename):
raise IOError("%s already exists. Use 'overwrite=True' to overwrite it." % filename)
# Ensure file ends with a newline
if not result.endswith('\n'):
result += '\n'
with open(filename, 'w') as f:
f.write(result)
def memoized_property(fget):
"""Decorator to create memoized properties."""
attr_name = '_{0}'.format(fget.__name__)
@functools.wraps(fget)
def fget_memoized(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fget(self))
return getattr(self, attr_name)
return property(fget_memoized)
class Molecule(object):
"""Class to hold and cache the structure information for a given CIR input."""
def __init__(self, input, resolvers=None, get3d=False, **kwargs):
"""Initialize with a resolver input."""
self.input = input
self.resolvers = resolvers
self.get3d = get3d
self.kwargs = kwargs
log.debug('Instantiated Molecule: %s' % self)
def __repr__(self):
return 'Molecule(input=%r, resolvers=%r, get3d=%r, kwargs=%r)' \
% (self.input, self.resolvers, self.get3d, self.kwargs)
@memoized_property
def stdinchi(self):
"""Standard InChI."""
return resolve(self.input, 'stdinchi', self.resolvers, **self.kwargs)
@memoized_property
def stdinchikey(self):
"""Standard InChIKey."""
return resolve(self.input, 'stdinchikey', self.resolvers, **self.kwargs)
@memoized_property
def inchi(self):
"""Non-standard InChI. (Uses options DONOTADDH W0 FIXEDH RECMET NEWPS SPXYZ SAsXYZ Fb Fnud)."""
return resolve(self.input, 'inchi', self.resolvers, **self.kwargs)
@memoized_property
def smiles(self):
"""SMILES string."""
return resolve(self.input, 'smiles', self.resolvers, **self.kwargs)
@memoized_property
def ficts(self):
"""FICTS NCI/CADD hashed structure identifier."""
return resolve(self.input, 'ficts', self.resolvers, **self.kwargs)
@memoized_property
def ficus(self):
"""FICuS NCI/CADD hashed structure identifier."""
return resolve(self.input, 'ficus', self.resolvers, **self.kwargs)
@memoized_property
def uuuuu(self):
"""uuuuu NCI/CADD hashed structure identifier."""
return resolve(self.input, 'uuuuu', self.resolvers, **self.kwargs)
@memoized_property
def hashisy(self):
"""CACTVS HASHISY identifier."""
return resolve(self.input, 'hashisy', self.resolvers, **self.kwargs)
@memoized_property
def sdf(self):
"""SDF file."""
return resolve(self.input, 'sdf', self.resolvers, **self.kwargs)
@memoized_property
def names(self):
"""List of chemical names."""
return resolve(self.input, 'names', self.resolvers, **self.kwargs)
@memoized_property
def iupac_name(self):
"""IUPAC approved name."""
return resolve(self.input, 'iupac_name', self.resolvers, **self.kwargs)
@memoized_property
def cas(self):
"""CAS registry numbers."""
return resolve(self.input, 'cas', self.resolvers, **self.kwargs)
@memoized_property
def mw(self):
"""Molecular weight."""
return resolve(self.input, 'mw', self.resolvers, **self.kwargs)
@memoized_property
def formula(self):
"""Molecular formula"""
return resolve(self.input, 'formula', self.resolvers, **self.kwargs)
@memoized_property
def h_bond_donor_count(self):
"""Hydrogen bond donor count."""
return resolve(self.input, 'h_bond_donor_count', self.resolvers, **self.kwargs)
@memoized_property
def h_bond_acceptor_count(self):
"""Hydrogen bond acceptor count."""
return resolve(self.input, 'h_bond_acceptor_count', self.resolvers, **self.kwargs)
@memoized_property
def h_bond_center_count(self):
"""Hydrogen bond center count."""
return resolve(self.input, 'h_bond_center_count', self.resolvers, **self.kwargs)
@memoized_property
def rule_of_5_violation_count(self):
"""Rule of 5 violation count."""
return resolve(self.input, 'rule_of_5_violation_count', self.resolvers, **self.kwargs)
@memoized_property
def rotor_count(self):
"""Rotor count."""
return resolve(self.input, 'rotor_count', self.resolvers, **self.kwargs)
@memoized_property
def effective_rotor_count(self):
"""Effective rotor count."""
return resolve(self.input, 'effective_rotor_count', self.resolvers, **self.kwargs)
@memoized_property
def ring_count(self):
"""Ring count."""
return resolve(self.input, 'ring_count', self.resolvers, **self.kwargs)
@memoized_property
def ringsys_count(self):
"""Ring system count."""
return resolve(self.input, 'ringsys_count', self.resolvers, **self.kwargs)
@memoized_property
def image(self):
"""2D image depiction."""
return resolve_image(self.input, self.resolvers, **self.kwargs)
@property
def image_url(self):
"""URL of a GIF image."""
return construct_api_url(self.input, 'image', self.resolvers, False, self.get3d, False, **self.kwargs)
@property
def twirl_url(self):
"""Url of a TwirlyMol 3D viewer."""
return construct_api_url(self.input, 'twirl', self.resolvers, False, self.get3d, False, **self.kwargs)
def download(self, filename, representation, overwrite=False):
"""Download the resolved structure as a file.
:param string filename: File path to save to
:param string representation: Desired output representation
:param bool overwrite: (Optional) Whether to allow overwriting of an existing file
"""
download(self.input, filename, representation, overwrite, self.resolvers, self.get3d, **self.kwargs)
|
|
# coding: utf-8
from __future__ import absolute_import, print_function
import json
import re
import six
_url_to_api_object = {}
class FromUrl(object):
def __init__(self, url, _requests):
self.url = url
self._requests = _requests or __import__('requests')
def __call__(self, **kwargs):
try:
for regix, klass in six.iteritems(_url_to_api_object):
if regix.match(self.url):
return klass(self, **kwargs)
raise NotImplementedError
except NotImplementedError as e:
print(e)
print(regix.pattern, klass)
def __repr__(self):
return "<%s url=%r>" % (type(self).__name__, self.url)
class ApiBase(object):
def __init__(self, from_url, **kwargs):
self.url = from_url.url
self._requests = from_url._requests
self.params = kwargs.copy()
class RestObject(ApiBase):
def get(self):
return self._requests.get(self.url, params=self.params['data'])
def post(self):
return self._requests.post(self.url, params=self.params['data'])
# ================================================================================================
# Api
# ================================================================================================
class Api(ApiBase):
@property
def test(self):
return FromUrl('https://slack.com/api/api.test', self._requests)(data=self.params).get()
_url_to_api_object[re.compile(r'^https://slack.com/api$')] = Api
class ApiTest(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/api.test$')] = ApiTest
# ================================================================================================
# Auth
# ================================================================================================
class Auth(ApiBase):
@property
def test(self):
return FromUrl('https://slack.com/api/auth.test', self._requests)(data=self.params).get()
_url_to_api_object[re.compile(r'^https://slack.com/api/auth$')] = Auth
class AuthTest(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/auth.test$')] = AuthTest
# ================================================================================================
# Channels
# ================================================================================================
class Channels(ApiBase):
def all(self):
channels = []
for line in self.list.iter_lines():
if line: # JSON string.
channels = json.loads(line).get('channels')
return channels
def get_channel_id(self, channel_name):
for channel in self.all():
if channel['name'] == channel_name:
return channel['id']
return ''
def archive(self, channel_name):
""" https://api.slack.com/methods/channels.archive
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({'channel': channel_id})
return FromUrl('https://slack.com/api/channels.archive', self._requests)(data=self.params).post()
def create(self, name):
""" https://api.slack.com/methods/channels.create
"""
self.params.update({'name': name})
return FromUrl('https://slack.com/api/channels.create', self._requests)(data=self.params).post()
def history(self, channel_name, **kwargs):
""" https://api.slack.com/methods/channels.history
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({'channel': channel_id})
if kwargs:
self.params.update(kwargs)
return FromUrl('https://slack.com/api/channels.history', self._requests)(data=self.params).get()
def info(self, channel_name):
""" https://api.slack.com/methods/channels.info
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({'channel': channel_id})
return FromUrl('https://slack.com/api/channels.info', self._requests)(data=self.params).get()
def invite(self, channel_name, user):
""" https://api.slack.com/methods/channels.invite
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({
'channel': channel_id,
'user': user,
})
return FromUrl('https://slack.com/api/channels.invite', self._requests)(data=self.params).post()
def join(self, channel_name):
""" https://api.slack.com/methods/channels.join
"""
self.params.update({
'name': channel_name,
})
return FromUrl('https://slack.com/api/channels.join', self._requests)(data=self.params).post()
def kick(self, channel_name, user):
""" https://api.slack.com/methods/channels.kick
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({
'channel': channel_id,
'user': user,
})
return FromUrl('https://slack.com/api/channels.kick', self._requests)(data=self.params).post()
def leave(self, channel_name):
""" https://api.slack.com/methods/channels.leave
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({
'channel': channel_id,
})
return FromUrl('https://slack.com/api/channels.leave', self._requests)(data=self.params).post()
@property
def list(self):
""" https://api.slack.com/methods/channels.list
"""
return FromUrl('https://slack.com/api/channels.list', self._requests)(data=self.params).get()
def mark(self, channel_name, ts):
""" https://api.slack.com/methods/channels.mark
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({
'channel': channel_id,
'ts': ts,
})
return FromUrl('https://slack.com/api/channels.mark', self._requests)(data=self.params).post()
def rename(self, channel_name, new_name):
""" https://api.slack.com/methods/channels.rename
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({
'channel': channel_id,
'name': new_name,
})
return FromUrl('https://slack.com/api/channels.rename', self._requests)(data=self.params).post()
def set_purpose(self, channel_name, purpose):
""" https://api.slack.com/methods/channels.setPurpose
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({
'channel': channel_id,
'purpose': purpose,
})
return FromUrl('https://slack.com/api/channels.setPurpose', self._requests)(data=self.params).post()
def set_topic(self, channel_name, topic):
""" https://api.slack.com/methods/channels.setTopic
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({
'channel': channel_id,
'topic': topic,
})
return FromUrl('https://slack.com/api/channels.setTopic', self._requests)(data=self.params).post()
def unarchive(self, channel_name):
""" https://api.slack.com/methods/channels.unarchive
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({
'channel': channel_id,
})
return FromUrl('https://slack.com/api/channels.unarchive', self._requests)(data=self.params).post()
def timeline(self, channel_name, reverse=False, **kwargs):
timeline = self.__timeline(channel_name, reverse, **kwargs)
return timeline
def __timeline(self, channel_name, is_reverse, **kwargs):
from ..events import Message
params = {}
messages = []
if kwargs:
self.params.update(kwargs)
lines = self.history(channel_name, **params).json()['messages']
lines = sorted(lines, key=lambda x: x['ts'], reverse=is_reverse)
for line in lines:
messages.append(Message(line))
return messages
_url_to_api_object[re.compile(r'^https://slack.com/api/channels$')] = Channels
class ChannelsArchive(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/channels.archive$')] = ChannelsArchive
class ChannelsCreate(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/channels.create$')] = ChannelsCreate
class ChannelsHistory(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/channels.history$')] = ChannelsHistory
class ChannelsInfo(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/channels.info$')] = ChannelsInfo
class ChannelsInvite(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/channels.invite$')] = ChannelsInvite
class ChannelsJoin(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/channels.join$')] = ChannelsJoin
class ChannelsKick(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/channels.kick$')] = ChannelsKick
class ChannelsLeave(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/channels.leave$')] = ChannelsLeave
class ChannelsList(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/channels.list$')] = ChannelsList
class ChannelsLeave(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/channels.leave$')] = ChannelsLeave
class ChannelsMark(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/channels.mark$')] = ChannelsMark
class ChannelsRename(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/channels.rename$')] = ChannelsRename
class ChannelsSetPurpose(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/channels.setPurpose$')] = ChannelsSetPurpose
class ChannelsSetTopic(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/channels.setTopic$')] = ChannelsSetTopic
class ChannelsUnarchive(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/channels.unarchive$')] = ChannelsUnarchive
# ================================================================================================
# Chat
# ================================================================================================
class Chat(ApiBase):
def delete(self, channel, ts):
""" https://api.slack.com/methods/chat.delete
"""
self.params.update({
'channel': channel,
'ts': ts,
})
return FromUrl('https://slack.com/api/chat.delete', self._requests)(data=self.params).post()
def post_message(self, channel, text, **kwargs):
""" https://api.slack.com/methods/chat.postMessage
"""
if not channel.startswith('#'):
channel = '#' + channel
self.params.update({
'channel': channel,
'text': text,
})
if kwargs:
self.params.update(kwargs)
return FromUrl('https://slack.com/api/chat.postMessage', self._requests)(data=self.params).post()
def update(self, channel, text, ts):
""" https://api.slack.com/methods/chat.update
"""
self.params.update({
'channel': channel,
'text': text,
'ts': ts,
})
return FromUrl('https://slack.com/api/chat.update', self._requests)(data=self.params).post()
_url_to_api_object[re.compile(r'^https://slack.com/api/chat$')] = Chat
class ChatDelete(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/chat.delete$')] = ChatDelete
class ChatPostMessage(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/chat.postMessage$')] = ChatPostMessage
class ChatUpdate(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/chat.update$')] = ChatUpdate
# ================================================================================================
# emoji
# ================================================================================================
class Emoji(ApiBase):
@property
def list(self):
return FromUrl('https://slack.com/api/emoji.list', self._requests)(data=self.params).get()
_url_to_api_object[re.compile(r'^https://slack.com/api/emoji$')] = Emoji
class EmojiList(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/emoji.list$')] = EmojiList
# ================================================================================================
# file
# ================================================================================================
class Files(ApiBase):
def info(self, file, **kwargs):
""" https://slack.com/api/files.info
"""
self.params.update({
'file': file,
})
if kwargs:
self.params.update(kwargs)
return FromUrl('https://slack.com/api/files.info', self._requests)(data=self.params).get()
def list(self, **kwargs):
""" https://api.slack.com/methods/files.list
"""
if kwargs:
self.params.update(kwargs)
return FromUrl('https://slack.com/api/files.list', self._requests)(data=self.params).get()
def upload(self, **kwargs):
""" https://api.slack.com/methods/files.upload
"""
if kwargs:
self.params.update(kwargs)
return FromUrl('https://slack.com/api/files.upload', self._requests)(data=self.params).post()
def delete(self, **kwargs):
""" https://api.slack.com/methods/files.delete
"""
if kwargs:
self.params.update(kwargs)
return FromUrl('https://slack.com/api/files.delete', self._requests)(data=self.params).post()
_url_to_api_object[re.compile(r'^https://slack.com/api/files$')] = Files
class FilesInfo(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/files.info$')] = FilesInfo
class FilesList(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/files.list$')] = FilesList
class FilesUpload(RestObject):
def post(self):
""" @override
"""
files = {}
files = {'file': open(self.params['data']['file'])}
return self._requests.post(self.url, params=self.params['data'], files=files)
_url_to_api_object[re.compile(r'^https://slack.com/api/files.upload$')] = FilesUpload
class FilesDelete(RestObject):
def post(self):
""" @override
"""
files = {}
files = {'file': open(self.params['data']['file'])}
return self._requests.post(self.url, params=self.params['data'], files=files)
_url_to_api_object[re.compile(r'^https://slack.com/api/files.delete$')] = FilesDelete
# ================================================================================================
# groups
# ================================================================================================
class Groups(ApiBase):
def archive(self, group_name):
""" https://api.slack.com/methods/groups.archive
"""
group_id = self.get_group_id(group_name)
self.params.update({
'channel': group_id,
})
return FromUrl('https://slack.com/api/groups.archive', self._requests)(data=self.params).post()
def close(self, group_name):
""" https://api.slack.com/methods/groups.close
"""
group_id = self.get_group_id(group_name)
self.params.update({
'channel': group_id,
})
return FromUrl('https://slack.com/api/groups.close', self._requests)(data=self.params).post()
def create(self, group_name):
""" https://api.slack.com/methods/groups.create
"""
self.params.update({'name': group_name})
return FromUrl('https://slack.com/api/groups.create', self._requests)(data=self.params).post()
def list(self, **kwargs):
""" https://api.slack.com/methods/groups.list
"""
if kwargs:
self.params.update(kwargs)
return FromUrl('https://slack.com/api/groups.list', self._requests)(data=self.params).get()
def create_child(self, group_name):
""" https://api.slack.com/methods/groups.createChild
"""
group_id = self.get_group_id(group_name)
self.params.update({
'channel': group_id,
})
return FromUrl('https://slack.com/api/groups.createChild', self._requests)(data=self.params).post()
def history(self, group_name, **kwargs):
""" https://api.slack.com/methods/groups.history
"""
group_id = self.get_group_id(group_name)
self.params.update({
'channel': group_id,
})
if kwargs:
self.params.update(kwargs)
return FromUrl('https://slack.com/api/groups.history', self._requests)(data=self.params).get()
def invite(self, group_name, user):
""" https://api.slack.com/methods/groups.invite
"""
group_id = self.get_group_id(group_name)
self.params.update({
'channel': group_id,
'user': user,
})
return FromUrl('https://slack.com/api/groups.invite', self._requests)(data=self.params)
def kick(self, group_name, user):
""" https://api.slack.com/methods/groups.kick
"""
group_id = self.get_group_id(group_name)
self.params.update({
'channel': group_id,
'user': user,
})
return FromUrl('https://slack.com/api/groups.kick', self._requests)(data=self.params).post()
def leave(self, group_name):
""" https://api.slack.com/methods/groups.leave
"""
group_id = self.get_group_id(group_name)
self.params.update({
'channel': group_id,
})
return FromUrl('https://slack.com/api/groups.leave', self._requests)(data=self.params).post()
def mark(self, group_name, ts):
""" https://api.slack.com/methods/groups.mark
"""
group_id = self.get_group_id(group_name)
self.params.update({
'channel': group_id,
'ts': ts,
})
return FromUrl('https://slack.com/api/groups.mark', self._requests)(data=self.params).post()
def open(self, group_name):
""" https://api.slack.com/methods/groups.open
"""
group_id = self.get_group_id(group_name)
self.params.update({
'channel': group_id,
})
return FromUrl('https://slack.com/api/groups.open', self._requests)(data=self.params).post()
def rename(self, group_name, new_name):
""" https://api.slack.com/methods/groups.rename
"""
group_id = self.get_group_id(group_name)
self.params.update({
'channel': group_id,
'name': new_name,
})
return FromUrl('https://slack.com/api/groups.rename', self._requests)(data=self.params).post()
def set_purpose(self, group_name, purpose):
""" https://api.slack.com/methods/groups.setPurpose
"""
group_id = self.get_group_id(group_name)
self.params.update({
'channel': group_id,
'purpose': purpose,
})
return FromUrl('https://slack.com/api/groups.setPurpose', self._requests)(data=self.params).post()
def set_topic(self, group_name, topic):
""" https://api.slack.com/methods/groups.setTopic
"""
group_id = self.get_group_id(group_name)
self.params.update({
'channel': group_id,
'topic': topic,
})
return FromUrl('https://slack.com/api/groups.setTopic', self._requests)(data=self.params).post()
def unarchive(self, group_name):
""" https://api.slack.com/methods/groups.unarchive
"""
group_id = self.get_group_id(group_name)
self.params.update({
'channel': group_id,
})
return FromUrl('https://slack.com/api/groups.unarchive', self._requests)(data=self.params).post()
def all(self):
groups = []
for line in self.list().iter_lines():
if line: # JSON string.
groups = json.loads(line).get('groups')
return groups
def get_group_id(self, group_name):
for group in self.all():
if group['name'] == group_name:
return group['id']
return ''
_url_to_api_object[re.compile(r'^https://slack.com/api/groups$')] = Groups
class GroupsArchive(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/groups.archive$')] = GroupsArchive
class GroupsList(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/groups.list$')] = GroupsList
class GroupsClose(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/groups.close$')] = GroupsClose
class GroupsCreate(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/groups.create$')] = GroupsCreate
class GroupsCreateChild(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/groups.createChild$')] = GroupsCreateChild
class GroupsHistory(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/groups.history$')] = GroupsHistory
class GroupsInvite(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/groups.invite$')] = GroupsInvite
class GroupsKick(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/groups.kick$')] = GroupsKick
class GroupsLeave(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/groups.leave$')] = GroupsLeave
class GroupsMark(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/groups.mark$')] = GroupsMark
class GroupsOpen(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/groups.open$')] = GroupsOpen
class GroupsRename(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/groups.rename$')] = GroupsRename
class GroupsSetPurpose(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/groups.setPurpose$')] = GroupsSetPurpose
class GroupsSetTopic(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/groups.setTopic$')] = GroupsSetTopic
class GroupsUnarchive(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/groups.unarchive$')] = GroupsUnarchive
# ================================================================================================
# im
# ================================================================================================
class Im(ApiBase):
def list(self):
""" https://api.slack.com/methods/im.list
"""
return FromUrl('https://slack.com/api/im.list', self._requests)(data=self.params).get()
def close(self, channel):
""" https://api.slack.com/methods/im.close
"""
self.params.update({
'channel': channel,
})
return FromUrl('https://slack.com/api/im.close', self._requests)(data=self.params).post()
def history(self, channel, **kwargs):
""" https://api.slack.com/methods/im.history
"""
self.params.update({
'channel': channel,
})
if kwargs:
self.params.update(kwargs)
return FromUrl('https://slack.com/api/im.history', self._requests)(data=self.params).get()
def mark(self, channel, ts):
""" https://api.slack.com/methods/im.mark
"""
self.params.update({
'channel': channel,
'ts': ts,
})
return FromUrl('https://slack.com/api/im.mark', self._requests)(data=self.params).post()
def open(self, user):
""" https://api.slack.com/methods/im.history
"""
self.params.update({
'user': user,
})
return FromUrl('https://slack.com/api/im.open', self._requests)(data=self.params).post()
_url_to_api_object[re.compile(r'^https://slack.com/api/im$')] = Im
class ImClose(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/im.close$')] = ImClose
class ImHistory(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/im.history$')] = ImHistory
class ImList(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/im.list$')] = ImList
class ImMark(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/im.mark$')] = ImMark
class ImOpen(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/im.open$')] = ImOpen
# ================================================================================================
# oauth
# ================================================================================================
class OAuth(ApiBase):
def access(self, client_id, client_secret, code, **kwargs):
""" https://api.slack.com/methods/oauth.access
"""
self.params.update({
'client_id': client_id,
'client_secret': client_secret,
'code': code,
})
if kwargs:
self.params.update(kwargs)
return FromUrl('https://slack.com/api/oauth.access', self._requests)(data=self.params).post()
_url_to_api_object[re.compile(r'^https://slack.com/api/oauth$')] = OAuth
class OAuthAccess(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/oauth.access$')] = OAuth
# ================================================================================================
# rtm
# ================================================================================================
class Rtm(ApiBase):
@property
def start(self):
""" https://api.slack.com/methods/rtm.start
"""
return FromUrl('https://slack.com/api/rtm.start', self._requests)(data=self.params).get()
_url_to_api_object[re.compile(r'^https://slack.com/api/rtm$')] = Rtm
class RtmStart(RestObject):
""" https://api.slack.com/rtm
"""
def lasting(self, interval=1):
# TODO: Return json per interval.
import time
while True:
print(self.get().json())
time.sleep(interval)
_url_to_api_object[re.compile(r'^https://slack.com/api/rtm.start$')] = RtmStart
# ================================================================================================
# search
# ================================================================================================
class SearchBase(ApiBase):
def search_from_url(self, query, **kwargs):
if not self.url:
raise AttributeError
self.params.update({
'query': query,
})
if kwargs:
self.params.update(kwargs)
return FromUrl(self.url, self._requests)(data=self.params).get()
class Search(SearchBase):
def all(self, query, **kwargs):
""" https://api.slack.com/methods/search.all
"""
self.url = 'https://slack.com/api/search.all'
return super(Search, self).search_from_url(query, **kwargs)
def files(self, query, **kwargs):
""" https://api.slack.com/methods/search.files
"""
self.url = 'https://slack.com/api/search.files'
return super(Search, self).search_from_url(query, **kwargs)
def messages(self, query, **kwargs):
""" https://api.slack.com/methods/search.messages
"""
self.url = 'https://slack.com/api/search.messages'
return super(Search, self).search_from_url(query, **kwargs)
_url_to_api_object[re.compile(r'^https://slack.com/api/search$')] = Search
class SearchAll(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/search.all$')] = SearchAll
class SearchFiles(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/search.files$')] = SearchFiles
class SearchMessages(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/search.messages$')] = SearchMessages
# ================================================================================================
# stars
# ================================================================================================
class Stars(ApiBase):
def list(self, **kwargs):
""" https://api.slack.com/methods/stars.list
"""
if kwargs:
self.params.update(kwargs)
return FromUrl('https://slack.com/api/stars.list', self._requests)(data=self.params).get()
_url_to_api_object[re.compile(r'^https://slack.com/api/stars$')] = Stars
class StarsList(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/stars.list$')] = StarsList
# ================================================================================================
# users
# ================================================================================================
class Users(ApiBase):
def get_presence(self, user_name):
""" https://api.slack.com/methods/users.getPresence
"""
user_id = self.get_id_by_name(user_name)
self.params.update({
'user': user_id,
})
return FromUrl('https://slack.com/api/users.getPresence', self._requests)(data=self.params).get()
def set_presence(self, presence):
""" https://api.slack.com/methods/users.setPresence
"""
if presence not in ['auto', 'away']:
presence = 'auto'
self.params.update({
'presence': presence,
})
return FromUrl('https://slack.com/api/users.setPresence', self._requests)(data=self.params).post()
def info(self, user):
""" https://api.slack.com/methods/users.info
"""
self.params.update({
'user': user,
})
return FromUrl('https://slack.com/api/users.info', self._requests)(data=self.params).get()
@property
def list(self):
""" https://api.slack.com/methods/users.list
"""
return FromUrl('https://slack.com/api/users.list', self._requests)(data=self.params).get()
def set_active(self, user):
""" https://api.slack.com/methods/users.setActive
"""
self.params.update({
'user': user,
})
return FromUrl('https://slack.com/api/users.setActive', self._requests)(data=self.params).post()
def get_info_by_name(self, user_name):
user_id = self.get_id_by_name(user_name)
return self.info(user_id)
def get_name_by_id(self, user_id):
members = self.list.json()['members']
for member in members:
if member.get('id') == user_id:
return member['name']
return ''
def get_id_by_name(self, user_name):
if not user_name:
return ''
members = self.list.json()['members']
for member in members:
if member.get('name') == user_name:
return member['id']
return ''
_url_to_api_object[re.compile(r'^https://slack.com/api/users$')] = Users
class UsersGetPresence(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/users.getPresence$')] = UsersGetPresence
class UsersSetPresence(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/users.setPresence$')] = UsersSetPresence
class UsersInfo(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/users.info$')] = UsersInfo
class UsersList(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/users.list$')] = UsersList
class UsersSetActive(RestObject):
pass
_url_to_api_object[re.compile(r'^https://slack.com/api/users.setActive$')] = UsersSetActive
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A clone of the Music Player Daemon (MPD) that plays music from a
Beets library. Attempts to implement a compatible protocol to allow
use of the wide range of MPD clients.
"""
from __future__ import division, absolute_import, print_function
import re
from string import Template
import traceback
import random
import time
import beets
from beets.plugins import BeetsPlugin
import beets.ui
from beets import logging
from beets import vfs
from beets.util import bluelet
from beets.library import Item
from beets import dbcore
from beets.mediafile import MediaFile
import six
PROTOCOL_VERSION = '0.13.0'
BUFSIZE = 1024
HELLO = 'OK MPD %s' % PROTOCOL_VERSION
CLIST_BEGIN = 'command_list_begin'
CLIST_VERBOSE_BEGIN = 'command_list_ok_begin'
CLIST_END = 'command_list_end'
RESP_OK = 'OK'
RESP_CLIST_VERBOSE = 'list_OK'
RESP_ERR = 'ACK'
NEWLINE = u"\n"
ERROR_NOT_LIST = 1
ERROR_ARG = 2
ERROR_PASSWORD = 3
ERROR_PERMISSION = 4
ERROR_UNKNOWN = 5
ERROR_NO_EXIST = 50
ERROR_PLAYLIST_MAX = 51
ERROR_SYSTEM = 52
ERROR_PLAYLIST_LOAD = 53
ERROR_UPDATE_ALREADY = 54
ERROR_PLAYER_SYNC = 55
ERROR_EXIST = 56
VOLUME_MIN = 0
VOLUME_MAX = 100
SAFE_COMMANDS = (
# Commands that are available when unauthenticated.
u'close', u'commands', u'notcommands', u'password', u'ping',
)
ITEM_KEYS_WRITABLE = set(MediaFile.fields()).intersection(Item._fields.keys())
# Loggers.
log = logging.getLogger('beets.bpd')
global_log = logging.getLogger('beets')
# Gstreamer import error.
class NoGstreamerError(Exception):
pass
# Error-handling, exceptions, parameter parsing.
class BPDError(Exception):
"""An error that should be exposed to the client to the BPD
server.
"""
def __init__(self, code, message, cmd_name='', index=0):
self.code = code
self.message = message
self.cmd_name = cmd_name
self.index = index
template = Template(u'$resp [$code@$index] {$cmd_name} $message')
def response(self):
"""Returns a string to be used as the response code for the
erring command.
"""
return self.template.substitute({
'resp': RESP_ERR,
'code': self.code,
'index': self.index,
'cmd_name': self.cmd_name,
'message': self.message,
})
def make_bpd_error(s_code, s_message):
"""Create a BPDError subclass for a static code and message.
"""
class NewBPDError(BPDError):
code = s_code
message = s_message
cmd_name = ''
index = 0
def __init__(self):
pass
return NewBPDError
ArgumentTypeError = make_bpd_error(ERROR_ARG, u'invalid type for argument')
ArgumentIndexError = make_bpd_error(ERROR_ARG, u'argument out of range')
ArgumentNotFoundError = make_bpd_error(ERROR_NO_EXIST, u'argument not found')
def cast_arg(t, val):
"""Attempts to call t on val, raising a ArgumentTypeError
on ValueError.
If 't' is the special string 'intbool', attempts to cast first
to an int and then to a bool (i.e., 1=True, 0=False).
"""
if t == 'intbool':
return cast_arg(bool, cast_arg(int, val))
else:
try:
return t(val)
except ValueError:
raise ArgumentTypeError()
class BPDClose(Exception):
"""Raised by a command invocation to indicate that the connection
should be closed.
"""
# Generic server infrastructure, implementing the basic protocol.
class BaseServer(object):
"""A MPD-compatible music player server.
The functions with the `cmd_` prefix are invoked in response to
client commands. For instance, if the client says `status`,
`cmd_status` will be invoked. The arguments to the client's commands
are used as function arguments following the connection issuing the
command. The functions may send data on the connection. They may
also raise BPDError exceptions to report errors.
This is a generic superclass and doesn't support many commands.
"""
def __init__(self, host, port, password):
"""Create a new server bound to address `host` and listening
on port `port`. If `password` is given, it is required to do
anything significant on the server.
"""
self.host, self.port, self.password = host, port, password
# Default server values.
self.random = False
self.repeat = False
self.volume = VOLUME_MAX
self.crossfade = 0
self.playlist = []
self.playlist_version = 0
self.current_index = -1
self.paused = False
self.error = None
# Object for random numbers generation
self.random_obj = random.Random()
def run(self):
"""Block and start listening for connections from clients. An
interrupt (^C) closes the server.
"""
self.startup_time = time.time()
bluelet.run(bluelet.server(self.host, self.port,
Connection.handler(self)))
def _item_info(self, item):
"""An abstract method that should response lines containing a
single song's metadata.
"""
raise NotImplementedError
def _item_id(self, item):
"""An abstract method returning the integer id for an item.
"""
raise NotImplementedError
def _id_to_index(self, track_id):
"""Searches the playlist for a song with the given id and
returns its index in the playlist.
"""
track_id = cast_arg(int, track_id)
for index, track in enumerate(self.playlist):
if self._item_id(track) == track_id:
return index
# Loop finished with no track found.
raise ArgumentNotFoundError()
def _random_idx(self):
"""Returns a random index different from the current one.
If there are no songs in the playlist it returns -1.
If there is only one song in the playlist it returns 0.
"""
if len(self.playlist) < 2:
return len(self.playlist) - 1
new_index = self.random_obj.randint(0, len(self.playlist) - 1)
while new_index == self.current_index:
new_index = self.random_obj.randint(0, len(self.playlist) - 1)
return new_index
def _succ_idx(self):
"""Returns the index for the next song to play.
It also considers random and repeat flags.
No boundaries are checked.
"""
if self.repeat:
return self.current_index
if self.random:
return self._random_idx()
return self.current_index + 1
def _prev_idx(self):
"""Returns the index for the previous song to play.
It also considers random and repeat flags.
No boundaries are checked.
"""
if self.repeat:
return self.current_index
if self.random:
return self._random_idx()
return self.current_index - 1
def cmd_ping(self, conn):
"""Succeeds."""
pass
def cmd_kill(self, conn):
"""Exits the server process."""
exit(0)
def cmd_close(self, conn):
"""Closes the connection."""
raise BPDClose()
def cmd_password(self, conn, password):
"""Attempts password authentication."""
if password == self.password:
conn.authenticated = True
else:
conn.authenticated = False
raise BPDError(ERROR_PASSWORD, u'incorrect password')
def cmd_commands(self, conn):
"""Lists the commands available to the user."""
if self.password and not conn.authenticated:
# Not authenticated. Show limited list of commands.
for cmd in SAFE_COMMANDS:
yield u'command: ' + cmd
else:
# Authenticated. Show all commands.
for func in dir(self):
if func.startswith('cmd_'):
yield u'command: ' + func[4:]
def cmd_notcommands(self, conn):
"""Lists all unavailable commands."""
if self.password and not conn.authenticated:
# Not authenticated. Show privileged commands.
for func in dir(self):
if func.startswith('cmd_'):
cmd = func[4:]
if cmd not in SAFE_COMMANDS:
yield u'command: ' + cmd
else:
# Authenticated. No commands are unavailable.
pass
def cmd_status(self, conn):
"""Returns some status information for use with an
implementation of cmd_status.
Gives a list of response-lines for: volume, repeat, random,
playlist, playlistlength, and xfade.
"""
yield (
u'volume: ' + six.text_type(self.volume),
u'repeat: ' + six.text_type(int(self.repeat)),
u'random: ' + six.text_type(int(self.random)),
u'playlist: ' + six.text_type(self.playlist_version),
u'playlistlength: ' + six.text_type(len(self.playlist)),
u'xfade: ' + six.text_type(self.crossfade),
)
if self.current_index == -1:
state = u'stop'
elif self.paused:
state = u'pause'
else:
state = u'play'
yield u'state: ' + state
if self.current_index != -1: # i.e., paused or playing
current_id = self._item_id(self.playlist[self.current_index])
yield u'song: ' + six.text_type(self.current_index)
yield u'songid: ' + six.text_type(current_id)
if self.error:
yield u'error: ' + self.error
def cmd_clearerror(self, conn):
"""Removes the persistent error state of the server. This
error is set when a problem arises not in response to a
command (for instance, when playing a file).
"""
self.error = None
def cmd_random(self, conn, state):
"""Set or unset random (shuffle) mode."""
self.random = cast_arg('intbool', state)
def cmd_repeat(self, conn, state):
"""Set or unset repeat mode."""
self.repeat = cast_arg('intbool', state)
def cmd_setvol(self, conn, vol):
"""Set the player's volume level (0-100)."""
vol = cast_arg(int, vol)
if vol < VOLUME_MIN or vol > VOLUME_MAX:
raise BPDError(ERROR_ARG, u'volume out of range')
self.volume = vol
def cmd_crossfade(self, conn, crossfade):
"""Set the number of seconds of crossfading."""
crossfade = cast_arg(int, crossfade)
if crossfade < 0:
raise BPDError(ERROR_ARG, u'crossfade time must be nonnegative')
def cmd_clear(self, conn):
"""Clear the playlist."""
self.playlist = []
self.playlist_version += 1
self.cmd_stop(conn)
def cmd_delete(self, conn, index):
"""Remove the song at index from the playlist."""
index = cast_arg(int, index)
try:
del(self.playlist[index])
except IndexError:
raise ArgumentIndexError()
self.playlist_version += 1
if self.current_index == index: # Deleted playing song.
self.cmd_stop(conn)
elif index < self.current_index: # Deleted before playing.
# Shift playing index down.
self.current_index -= 1
def cmd_deleteid(self, conn, track_id):
self.cmd_delete(conn, self._id_to_index(track_id))
def cmd_move(self, conn, idx_from, idx_to):
"""Move a track in the playlist."""
idx_from = cast_arg(int, idx_from)
idx_to = cast_arg(int, idx_to)
try:
track = self.playlist.pop(idx_from)
self.playlist.insert(idx_to, track)
except IndexError:
raise ArgumentIndexError()
# Update currently-playing song.
if idx_from == self.current_index:
self.current_index = idx_to
elif idx_from < self.current_index <= idx_to:
self.current_index -= 1
elif idx_from > self.current_index >= idx_to:
self.current_index += 1
self.playlist_version += 1
def cmd_moveid(self, conn, idx_from, idx_to):
idx_from = self._id_to_index(idx_from)
return self.cmd_move(conn, idx_from, idx_to)
def cmd_swap(self, conn, i, j):
"""Swaps two tracks in the playlist."""
i = cast_arg(int, i)
j = cast_arg(int, j)
try:
track_i = self.playlist[i]
track_j = self.playlist[j]
except IndexError:
raise ArgumentIndexError()
self.playlist[j] = track_i
self.playlist[i] = track_j
# Update currently-playing song.
if self.current_index == i:
self.current_index = j
elif self.current_index == j:
self.current_index = i
self.playlist_version += 1
def cmd_swapid(self, conn, i_id, j_id):
i = self._id_to_index(i_id)
j = self._id_to_index(j_id)
return self.cmd_swap(conn, i, j)
def cmd_urlhandlers(self, conn):
"""Indicates supported URL schemes. None by default."""
pass
def cmd_playlistinfo(self, conn, index=-1):
"""Gives metadata information about the entire playlist or a
single track, given by its index.
"""
index = cast_arg(int, index)
if index == -1:
for track in self.playlist:
yield self._item_info(track)
else:
try:
track = self.playlist[index]
except IndexError:
raise ArgumentIndexError()
yield self._item_info(track)
def cmd_playlistid(self, conn, track_id=-1):
return self.cmd_playlistinfo(conn, self._id_to_index(track_id))
def cmd_plchanges(self, conn, version):
"""Sends playlist changes since the given version.
This is a "fake" implementation that ignores the version and
just returns the entire playlist (rather like version=0). This
seems to satisfy many clients.
"""
return self.cmd_playlistinfo(conn)
def cmd_plchangesposid(self, conn, version):
"""Like plchanges, but only sends position and id.
Also a dummy implementation.
"""
for idx, track in enumerate(self.playlist):
yield u'cpos: ' + six.text_type(idx)
yield u'Id: ' + six.text_type(track.id)
def cmd_currentsong(self, conn):
"""Sends information about the currently-playing song.
"""
if self.current_index != -1: # -1 means stopped.
track = self.playlist[self.current_index]
yield self._item_info(track)
def cmd_next(self, conn):
"""Advance to the next song in the playlist."""
self.current_index = self._succ_idx()
if self.current_index >= len(self.playlist):
# Fallen off the end. Just move to stopped state.
return self.cmd_stop(conn)
else:
return self.cmd_play(conn)
def cmd_previous(self, conn):
"""Step back to the last song."""
self.current_index = self._prev_idx()
if self.current_index < 0:
return self.cmd_stop(conn)
else:
return self.cmd_play(conn)
def cmd_pause(self, conn, state=None):
"""Set the pause state playback."""
if state is None:
self.paused = not self.paused # Toggle.
else:
self.paused = cast_arg('intbool', state)
def cmd_play(self, conn, index=-1):
"""Begin playback, possibly at a specified playlist index."""
index = cast_arg(int, index)
if index < -1 or index > len(self.playlist):
raise ArgumentIndexError()
if index == -1: # No index specified: start where we are.
if not self.playlist: # Empty playlist: stop immediately.
return self.cmd_stop(conn)
if self.current_index == -1: # No current song.
self.current_index = 0 # Start at the beginning.
# If we have a current song, just stay there.
else: # Start with the specified index.
self.current_index = index
self.paused = False
def cmd_playid(self, conn, track_id=0):
track_id = cast_arg(int, track_id)
if track_id == -1:
index = -1
else:
index = self._id_to_index(track_id)
return self.cmd_play(conn, index)
def cmd_stop(self, conn):
"""Stop playback."""
self.current_index = -1
self.paused = False
def cmd_seek(self, conn, index, pos):
"""Seek to a specified point in a specified song."""
index = cast_arg(int, index)
if index < 0 or index >= len(self.playlist):
raise ArgumentIndexError()
self.current_index = index
def cmd_seekid(self, conn, track_id, pos):
index = self._id_to_index(track_id)
return self.cmd_seek(conn, index, pos)
def cmd_profile(self, conn):
"""Memory profiling for debugging."""
from guppy import hpy
heap = hpy().heap()
print(heap)
class Connection(object):
"""A connection between a client and the server. Handles input and
output from and to the client.
"""
def __init__(self, server, sock):
"""Create a new connection for the accepted socket `client`.
"""
self.server = server
self.sock = sock
self.authenticated = False
def send(self, lines):
"""Send lines, which which is either a single string or an
iterable consisting of strings, to the client. A newline is
added after every string. Returns a Bluelet event that sends
the data.
"""
if isinstance(lines, six.string_types):
lines = [lines]
out = NEWLINE.join(lines) + NEWLINE
log.debug('{}', out[:-1]) # Don't log trailing newline.
if isinstance(out, six.text_type):
out = out.encode('utf8')
return self.sock.sendall(out)
def do_command(self, command):
"""A coroutine that runs the given command and sends an
appropriate response."""
try:
yield bluelet.call(command.run(self))
except BPDError as e:
# Send the error.
yield self.send(e.response())
else:
# Send success code.
yield self.send(RESP_OK)
def run(self):
"""Send a greeting to the client and begin processing commands
as they arrive.
"""
yield self.send(HELLO)
clist = None # Initially, no command list is being constructed.
while True:
line = yield self.sock.readline()
if not line:
break
line = line.strip()
if not line:
break
log.debug('{}', line)
if clist is not None:
# Command list already opened.
if line == CLIST_END:
yield bluelet.call(self.do_command(clist))
clist = None # Clear the command list.
else:
clist.append(Command(line))
elif line == CLIST_BEGIN or line == CLIST_VERBOSE_BEGIN:
# Begin a command list.
clist = CommandList([], line == CLIST_VERBOSE_BEGIN)
else:
# Ordinary command.
try:
yield bluelet.call(self.do_command(Command(line)))
except BPDClose:
# Command indicates that the conn should close.
self.sock.close()
return
@classmethod
def handler(cls, server):
def _handle(sock):
"""Creates a new `Connection` and runs it.
"""
return cls(server, sock).run()
return _handle
class Command(object):
"""A command issued by the client for processing by the server.
"""
command_re = re.compile(br'^([^ \t]+)[ \t]*')
arg_re = re.compile(br'"((?:\\"|[^"])+)"|([^ \t"]+)')
def __init__(self, s):
"""Creates a new `Command` from the given string, `s`, parsing
the string for command name and arguments.
"""
command_match = self.command_re.match(s)
self.name = command_match.group(1)
self.args = []
arg_matches = self.arg_re.findall(s[command_match.end():])
for match in arg_matches:
if match[0]:
# Quoted argument.
arg = match[0]
arg = arg.replace(b'\\"', b'"').replace(b'\\\\', b'\\')
else:
# Unquoted argument.
arg = match[1]
arg = arg.decode('utf8')
self.args.append(arg)
def run(self, conn):
"""A coroutine that executes the command on the given
connection.
"""
# Attempt to get correct command function.
func_name = 'cmd_' + self.name
if not hasattr(conn.server, func_name):
raise BPDError(ERROR_UNKNOWN, u'unknown command', self.name)
func = getattr(conn.server, func_name)
# Ensure we have permission for this command.
if conn.server.password and \
not conn.authenticated and \
self.name not in SAFE_COMMANDS:
raise BPDError(ERROR_PERMISSION, u'insufficient privileges')
try:
args = [conn] + self.args
results = func(*args)
if results:
for data in results:
yield conn.send(data)
except BPDError as e:
# An exposed error. Set the command name and then let
# the Connection handle it.
e.cmd_name = self.name
raise e
except BPDClose:
# An indication that the connection should close. Send
# it on the Connection.
raise
except Exception as e:
# An "unintentional" error. Hide it from the client.
log.error('{}', traceback.format_exc(e))
raise BPDError(ERROR_SYSTEM, u'server error', self.name)
class CommandList(list):
"""A list of commands issued by the client for processing by the
server. May be verbose, in which case the response is delimited, or
not. Should be a list of `Command` objects.
"""
def __init__(self, sequence=None, verbose=False):
"""Create a new `CommandList` from the given sequence of
`Command`s. If `verbose`, this is a verbose command list.
"""
if sequence:
for item in sequence:
self.append(item)
self.verbose = verbose
def run(self, conn):
"""Coroutine executing all the commands in this list.
"""
for i, command in enumerate(self):
try:
yield bluelet.call(command.run(conn))
except BPDError as e:
# If the command failed, stop executing.
e.index = i # Give the error the correct index.
raise e
# Otherwise, possibly send the output delimeter if we're in a
# verbose ("OK") command list.
if self.verbose:
yield conn.send(RESP_CLIST_VERBOSE)
# A subclass of the basic, protocol-handling server that actually plays
# music.
class Server(BaseServer):
"""An MPD-compatible server using GStreamer to play audio and beets
to store its library.
"""
def __init__(self, library, host, port, password):
try:
from beetsplug.bpd import gstplayer
except ImportError as e:
# This is a little hacky, but it's the best I know for now.
if e.args[0].endswith(' gst'):
raise NoGstreamerError()
else:
raise
super(Server, self).__init__(host, port, password)
self.lib = library
self.player = gstplayer.GstPlayer(self.play_finished)
self.cmd_update(None)
def run(self):
self.player.run()
super(Server, self).run()
def play_finished(self):
"""A callback invoked every time our player finishes a
track.
"""
self.cmd_next(None)
# Metadata helper functions.
def _item_info(self, item):
info_lines = [
u'file: ' + item.destination(fragment=True),
u'Time: ' + six.text_type(int(item.length)),
u'Title: ' + item.title,
u'Artist: ' + item.artist,
u'Album: ' + item.album,
u'Genre: ' + item.genre,
]
track = six.text_type(item.track)
if item.tracktotal:
track += u'/' + six.text_type(item.tracktotal)
info_lines.append(u'Track: ' + track)
info_lines.append(u'Date: ' + six.text_type(item.year))
try:
pos = self._id_to_index(item.id)
info_lines.append(u'Pos: ' + six.text_type(pos))
except ArgumentNotFoundError:
# Don't include position if not in playlist.
pass
info_lines.append(u'Id: ' + six.text_type(item.id))
return info_lines
def _item_id(self, item):
return item.id
# Database updating.
def cmd_update(self, conn, path=u'/'):
"""Updates the catalog to reflect the current database state.
"""
# Path is ignored. Also, the real MPD does this asynchronously;
# this is done inline.
print(u'Building directory tree...')
self.tree = vfs.libtree(self.lib)
print(u'... done.')
self.updated_time = time.time()
# Path (directory tree) browsing.
def _resolve_path(self, path):
"""Returns a VFS node or an item ID located at the path given.
If the path does not exist, raises a
"""
components = path.split(u'/')
node = self.tree
for component in components:
if not component:
continue
if isinstance(node, int):
# We're trying to descend into a file node.
raise ArgumentNotFoundError()
if component in node.files:
node = node.files[component]
elif component in node.dirs:
node = node.dirs[component]
else:
raise ArgumentNotFoundError()
return node
def _path_join(self, p1, p2):
"""Smashes together two BPD paths."""
out = p1 + u'/' + p2
return out.replace(u'//', u'/').replace(u'//', u'/')
def cmd_lsinfo(self, conn, path=u"/"):
"""Sends info on all the items in the path."""
node = self._resolve_path(path)
if isinstance(node, int):
# Trying to list a track.
raise BPDError(ERROR_ARG, u'this is not a directory')
else:
for name, itemid in iter(sorted(node.files.items())):
item = self.lib.get_item(itemid)
yield self._item_info(item)
for name, _ in iter(sorted(node.dirs.items())):
dirpath = self._path_join(path, name)
if dirpath.startswith(u"/"):
# Strip leading slash (libmpc rejects this).
dirpath = dirpath[1:]
yield u'directory: %s' % dirpath
def _listall(self, basepath, node, info=False):
"""Helper function for recursive listing. If info, show
tracks' complete info; otherwise, just show items' paths.
"""
if isinstance(node, int):
# List a single file.
if info:
item = self.lib.get_item(node)
yield self._item_info(item)
else:
yield u'file: ' + basepath
else:
# List a directory. Recurse into both directories and files.
for name, itemid in sorted(node.files.items()):
newpath = self._path_join(basepath, name)
# "yield from"
for v in self._listall(newpath, itemid, info):
yield v
for name, subdir in sorted(node.dirs.items()):
newpath = self._path_join(basepath, name)
yield u'directory: ' + newpath
for v in self._listall(newpath, subdir, info):
yield v
def cmd_listall(self, conn, path=u"/"):
"""Send the paths all items in the directory, recursively."""
return self._listall(path, self._resolve_path(path), False)
def cmd_listallinfo(self, conn, path=u"/"):
"""Send info on all the items in the directory, recursively."""
return self._listall(path, self._resolve_path(path), True)
# Playlist manipulation.
def _all_items(self, node):
"""Generator yielding all items under a VFS node.
"""
if isinstance(node, int):
# Could be more efficient if we built up all the IDs and
# then issued a single SELECT.
yield self.lib.get_item(node)
else:
# Recurse into a directory.
for name, itemid in sorted(node.files.items()):
# "yield from"
for v in self._all_items(itemid):
yield v
for name, subdir in sorted(node.dirs.items()):
for v in self._all_items(subdir):
yield v
def _add(self, path, send_id=False):
"""Adds a track or directory to the playlist, specified by the
path. If `send_id`, write each item's id to the client.
"""
for item in self._all_items(self._resolve_path(path)):
self.playlist.append(item)
if send_id:
yield u'Id: ' + six.text_type(item.id)
self.playlist_version += 1
def cmd_add(self, conn, path):
"""Adds a track or directory to the playlist, specified by a
path.
"""
return self._add(path, False)
def cmd_addid(self, conn, path):
"""Same as `cmd_add` but sends an id back to the client."""
return self._add(path, True)
# Server info.
def cmd_status(self, conn):
for line in super(Server, self).cmd_status(conn):
yield line
if self.current_index > -1:
item = self.playlist[self.current_index]
yield u'bitrate: ' + six.text_type(item.bitrate / 1000)
# Missing 'audio'.
(pos, total) = self.player.time()
yield u'time: ' + six.text_type(pos) + u':' + six.text_type(total)
# Also missing 'updating_db'.
def cmd_stats(self, conn):
"""Sends some statistics about the library."""
with self.lib.transaction() as tx:
statement = 'SELECT COUNT(DISTINCT artist), ' \
'COUNT(DISTINCT album), ' \
'COUNT(id), ' \
'SUM(length) ' \
'FROM items'
artists, albums, songs, totaltime = tx.query(statement)[0]
yield (
u'artists: ' + six.text_type(artists),
u'albums: ' + six.text_type(albums),
u'songs: ' + six.text_type(songs),
u'uptime: ' + six.text_type(int(time.time() - self.startup_time)),
u'playtime: ' + u'0', # Missing.
u'db_playtime: ' + six.text_type(int(totaltime)),
u'db_update: ' + six.text_type(int(self.updated_time)),
)
# Searching.
tagtype_map = {
u'Artist': u'artist',
u'Album': u'album',
u'Title': u'title',
u'Track': u'track',
u'AlbumArtist': u'albumartist',
u'AlbumArtistSort': u'albumartist_sort',
# Name?
u'Genre': u'genre',
u'Date': u'year',
u'Composer': u'composer',
# Performer?
u'Disc': u'disc',
u'filename': u'path', # Suspect.
}
def cmd_tagtypes(self, conn):
"""Returns a list of the metadata (tag) fields available for
searching.
"""
for tag in self.tagtype_map:
yield u'tagtype: ' + tag
def _tagtype_lookup(self, tag):
"""Uses `tagtype_map` to look up the beets column name for an
MPD tagtype (or throw an appropriate exception). Returns both
the canonical name of the MPD tagtype and the beets column
name.
"""
for test_tag, key in self.tagtype_map.items():
# Match case-insensitively.
if test_tag.lower() == tag.lower():
return test_tag, key
raise BPDError(ERROR_UNKNOWN, u'no such tagtype')
def _metadata_query(self, query_type, any_query_type, kv):
"""Helper function returns a query object that will find items
according to the library query type provided and the key-value
pairs specified. The any_query_type is used for queries of
type "any"; if None, then an error is thrown.
"""
if kv: # At least one key-value pair.
queries = []
# Iterate pairwise over the arguments.
it = iter(kv)
for tag, value in zip(it, it):
if tag.lower() == u'any':
if any_query_type:
queries.append(any_query_type(value,
ITEM_KEYS_WRITABLE,
query_type))
else:
raise BPDError(ERROR_UNKNOWN, u'no such tagtype')
else:
_, key = self._tagtype_lookup(tag)
queries.append(query_type(key, value))
return dbcore.query.AndQuery(queries)
else: # No key-value pairs.
return dbcore.query.TrueQuery()
def cmd_search(self, conn, *kv):
"""Perform a substring match for items."""
query = self._metadata_query(dbcore.query.SubstringQuery,
dbcore.query.AnyFieldQuery,
kv)
for item in self.lib.items(query):
yield self._item_info(item)
def cmd_find(self, conn, *kv):
"""Perform an exact match for items."""
query = self._metadata_query(dbcore.query.MatchQuery,
None,
kv)
for item in self.lib.items(query):
yield self._item_info(item)
def cmd_list(self, conn, show_tag, *kv):
"""List distinct metadata values for show_tag, possibly
filtered by matching match_tag to match_term.
"""
show_tag_canon, show_key = self._tagtype_lookup(show_tag)
query = self._metadata_query(dbcore.query.MatchQuery, None, kv)
clause, subvals = query.clause()
statement = 'SELECT DISTINCT ' + show_key + \
' FROM items WHERE ' + clause + \
' ORDER BY ' + show_key
with self.lib.transaction() as tx:
rows = tx.query(statement, subvals)
for row in rows:
yield show_tag_canon + u': ' + six.text_type(row[0])
def cmd_count(self, conn, tag, value):
"""Returns the number and total time of songs matching the
tag/value query.
"""
_, key = self._tagtype_lookup(tag)
songs = 0
playtime = 0.0
for item in self.lib.items(dbcore.query.MatchQuery(key, value)):
songs += 1
playtime += item.length
yield u'songs: ' + six.text_type(songs)
yield u'playtime: ' + six.text_type(int(playtime))
# "Outputs." Just a dummy implementation because we don't control
# any outputs.
def cmd_outputs(self, conn):
"""List the available outputs."""
yield (
u'outputid: 0',
u'outputname: gstreamer',
u'outputenabled: 1',
)
def cmd_enableoutput(self, conn, output_id):
output_id = cast_arg(int, output_id)
if output_id != 0:
raise ArgumentIndexError()
def cmd_disableoutput(self, conn, output_id):
output_id = cast_arg(int, output_id)
if output_id == 0:
raise BPDError(ERROR_ARG, u'cannot disable this output')
else:
raise ArgumentIndexError()
# Playback control. The functions below hook into the
# half-implementations provided by the base class. Together, they're
# enough to implement all normal playback functionality.
def cmd_play(self, conn, index=-1):
new_index = index != -1 and index != self.current_index
was_paused = self.paused
super(Server, self).cmd_play(conn, index)
if self.current_index > -1: # Not stopped.
if was_paused and not new_index:
# Just unpause.
self.player.play()
else:
self.player.play_file(self.playlist[self.current_index].path)
def cmd_pause(self, conn, state=None):
super(Server, self).cmd_pause(conn, state)
if self.paused:
self.player.pause()
elif self.player.playing:
self.player.play()
def cmd_stop(self, conn):
super(Server, self).cmd_stop(conn)
self.player.stop()
def cmd_seek(self, conn, index, pos):
"""Seeks to the specified position in the specified song."""
index = cast_arg(int, index)
pos = cast_arg(int, pos)
super(Server, self).cmd_seek(conn, index, pos)
self.player.seek(pos)
# Volume control.
def cmd_setvol(self, conn, vol):
vol = cast_arg(int, vol)
super(Server, self).cmd_setvol(conn, vol)
self.player.volume = float(vol) / 100
# Beets plugin hooks.
class BPDPlugin(BeetsPlugin):
"""Provides the "beet bpd" command for running a music player
server.
"""
def __init__(self):
super(BPDPlugin, self).__init__()
self.config.add({
'host': u'',
'port': 6600,
'password': u'',
'volume': VOLUME_MAX,
})
self.config['password'].redact = True
def start_bpd(self, lib, host, port, password, volume, debug):
"""Starts a BPD server."""
if debug: # FIXME this should be managed by BeetsPlugin
self._log.setLevel(logging.DEBUG)
else:
self._log.setLevel(logging.WARNING)
try:
server = Server(lib, host, port, password)
server.cmd_setvol(None, volume)
server.run()
except NoGstreamerError:
global_log.error(u'Gstreamer Python bindings not found.')
global_log.error(u'Install "gstreamer1.0" and "python-gi"'
u'or similar package to use BPD.')
def commands(self):
cmd = beets.ui.Subcommand(
'bpd', help=u'run an MPD-compatible music player server'
)
cmd.parser.add_option(
'-d', '--debug', action='store_true',
help=u'dump all MPD traffic to stdout'
)
def func(lib, opts, args):
host = self.config['host'].as_str()
host = args.pop(0) if args else host
port = args.pop(0) if args else self.config['port'].get(int)
if args:
raise beets.ui.UserError(u'too many arguments')
password = self.config['password'].as_str()
volume = self.config['volume'].get(int)
debug = opts.debug or False
self.start_bpd(lib, host, int(port), password, volume, debug)
cmd.func = func
return [cmd]
|
|
"""
.. module:: resource_config_mock_test
:platform: Linux, Windows
:synopsis: Unit tests for the config module.
:license: BSD, see LICENSE for more details.
.. moduleauthor:: Ryan Gard <[email protected]>
"""
__version__ = 0.1
#===================================================================================================
# Imports
#===================================================================================================
from unittest import TestCase, skipIf
from config import ResourceConfig, TestPlanConfig, ConfigError
from core import BasicInstaller, MSIInstaller
from mock import patch
#===================================================================================================
# Globals
#===================================================================================================
SKIP_EVERYTHING = False
#===================================================================================================
# Classes
#===================================================================================================
class TestCaseStub(object):
def __init__(self, name):
self.name = name
def add_test_prep(self,
resource_id,
sut,
checkpoint,
tools,
builds,
agent_commands,
post_wait,
timeout,
restart,
restart_wait):
pass
def add_test_step(self,
description,
resource_id,
test_directory,
interpreter,
test_exec,
test_params,
timeout,
post_wait,
restart,
restart_wait):
pass
def add_build(self, sut, build, timeout):
pass
def add_tool(self, sut, tool, timeout):
pass
def add_resoure_refresh(self, resource_id):
pass
def execute(self):
pass
class SystemUnderTestStub(object):
def __init__(self,
alias,
machine=None,
bespoke_root='/opt/bespoke',
credentials=None,
machine_type=None,
network_address=None,
os=None,
os_label=None,
arch_type=None,
role=None,
check_points=None,
tools=None):
self._alias = alias
self._machine = machine
self._bespoke_root = bespoke_root
self._credentials = credentials
self._machine_type = machine_type
self._network_address = network_address
self._os = os
self._os_label = os_label
self._arch_type = arch_type
self._role = role
self._check_points = check_points
self._available_tools = tools
@property
def alias(self):
return self._alias
@property
def bespoke_root(self):
return self._bespoke_root
class ToolStub(object):
"""Note: valid install types = basic_install, msi_install, no_install"""
def __init__(self,
name,
install_type,
os_type=None,
os_arch=None,
version=None,
source_type=None,
source_copy_once=None,
source_properties=None,
install_properties=None):
self._name = name
self._os_type = os_type
self._os_arch = os_arch
self._version = version
self._source_type = source_type
self._source_copy_once = source_copy_once
self._install_type = install_type
self._source_properties = source_properties
self._install_properties = install_properties
@property
def name(self):
return self._name
@property
def install_type(self):
return self._install_type
#===================================================================================================
# Mock-in Stubs
#===================================================================================================
def _add_virtual_machine_stub(self, content):
"""A stub method that stubs the internal 'content' dictionary for the 'Machine' key."""
content['Machine'] = None
def _add_virtual_template_stub(self, content):
""""A stub method that stubs the internal 'content' dictionary for the 'Machine' key."""
content['Machine'] = None
#===================================================================================================
# Tests
#===================================================================================================
@patch.object(ResourceConfig, '_add_virtual_machine', new=_add_virtual_machine_stub)
@patch.object(ResourceConfig, '_add_virtual_template', new=_add_virtual_template_stub)
class ResourceConfigTests(TestCase):
"""Tests for the ResourceConfig class in the config module."""
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test1_happy_path_static(self):
"""Happy path test to verify static SystemUnderTest machines are handled correct.
* No tools
* No checkpoints
"""
test_config = ResourceConfig(r'configs/resource/happy_path_static.xml',
r'../src/bespoke/xsd/resource_config.xsd')
actual_vm_1 = test_config['BVT-2k8-R2-64']
#VM1 Verification
self.assertEqual(actual_vm_1.alias, 'BVT-2k8-R2-64')
self.assertEqual(actual_vm_1.network_address, 'bvt-2k8-r2-64.fancylads.local')
self.assertEqual(actual_vm_1.bespoke_root, r'C:\Bespoke\TestManager')
self.assertEqual(actual_vm_1.machine_type, 'static')
self.assertDictEqual(actual_vm_1.credentials, {r'FancyLads\BobTester':'password'})
self.assertEqual(actual_vm_1.os, 'Windows')
self.assertEqual(actual_vm_1.os_label, 'Windows 2008 R2')
self.assertEqual(actual_vm_1.arch_type, 'x64')
self.assertEqual(actual_vm_1.role, 'Server')
self.assertEqual(len(actual_vm_1.check_points), 0)
self.assertEqual(len(actual_vm_1.tools), 0)
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test2_reference_to_sut(self):
"""Verify that when using the dictionary "__getitem__" method for static VM machines that
we get a reference of the SystemUnderTest rather than a copy of the original in
the "_content" dictionary.
"""
test_config = ResourceConfig(r'configs/resource/happy_path_static.xml',
r'../src/bespoke/xsd/resource_config.xsd')
actual_template_1 = test_config['BVT-2k8-R2-64']
self.assertIs(actual_template_1, test_config._content['BVT-2k8-R2-64'])
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test3_tools(self):
"""Happy path test to verify static SystemUnderTest machines are handled correct.
* Tools
* No checkpoints
"""
test_config = ResourceConfig(r'configs/resource/tools_static.xml',
r'../src/bespoke/xsd/resource_config.xsd')
#Expected tools
exp_tools = ['BillyMcToolin', 'ToolMeFaceHole', 'ThoughtBadger']
actual_vm_1 = test_config['BVT-2k8-R2-64']
#VM1 Verification
self.assertEqual(actual_vm_1.alias, 'BVT-2k8-R2-64')
self.assertEqual(actual_vm_1.network_address, 'bvt-2k8-r2-64.fancylads.local')
self.assertEqual(actual_vm_1.bespoke_root, r'C:\Bespoke\TestManager')
self.assertEqual(actual_vm_1.machine_type, 'static')
self.assertDictEqual(actual_vm_1.credentials, {r'FancyLads\BobTester':'password'})
self.assertEqual(actual_vm_1.os, 'Windows')
self.assertEqual(actual_vm_1.os_label, 'Windows 2008 R2')
self.assertEqual(actual_vm_1.arch_type, 'x64')
self.assertEqual(actual_vm_1.role, 'Server')
self.assertEqual(len(actual_vm_1.check_points), 0)
self.assertListEqual(actual_vm_1.tools, exp_tools)
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test4_tools_checkpoints(self):
"""Happy path test to verify static SystemUnderTest machines are handled correct.
* Tools
* Checkpoints
"""
test_config = ResourceConfig(r'configs/resource/checkpoints_static.xml',
r'../src/bespoke/xsd/resource_config.xsd')
#Expected tools
exp_tools = ['BillyMcToolin', 'ToolMeFaceHole', 'ThoughtBadger']
#Expected checkpoints.
exp_checkpoints = {'Test': [], 'TestLess': [], 'TestMore': []}
actual_vm_1 = test_config['BVT-2k8-R2-64']
#VM1 Verification
self.assertEqual(actual_vm_1.alias, 'BVT-2k8-R2-64')
self.assertEqual(actual_vm_1.network_address, 'bvt-2k8-r2-64.fancylads.local')
self.assertEqual(actual_vm_1.bespoke_root, r'C:\Bespoke\TestManager')
self.assertEqual(actual_vm_1.machine_type, 'static')
self.assertDictEqual(actual_vm_1.credentials, {r'FancyLads\BobTester':'password'})
self.assertEqual(actual_vm_1.os, 'Windows')
self.assertEqual(actual_vm_1.os_label, 'Windows 2008 R2')
self.assertEqual(actual_vm_1.arch_type, 'x64')
self.assertEqual(actual_vm_1.role, 'Server')
self.assertDictEqual(actual_vm_1.check_points, exp_checkpoints)
self.assertListEqual(actual_vm_1.tools, exp_tools)
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test5_tools_checkpoints_with_tools(self):
"""Happy path test to verify static SystemUnderTest machines are handled correct.
* Tools
* Checkpoints with tools.
"""
test_config = ResourceConfig(r'configs/resource/checkpoints_with_tools_static.xml',
r'../src/bespoke/xsd/resource_config.xsd')
#Expected tools
exp_tools = ['BillyMcToolin', 'ToolMeFaceHole', 'ThoughtBadger']
#Expected checkpoints.
exp_checkpoints = {'Test': ['CrazyPeople'],
'TestLess': ['ExtremeKnitting', 'PowerfulManThighs', 'GiantWomanFeet'],
'TestMore': ['DumbThings', 'DumberThings']}
actual_vm_1 = test_config['BVT-2k8-R2-64']
#VM1 Verification
self.assertEqual(actual_vm_1.alias, 'BVT-2k8-R2-64')
self.assertEqual(actual_vm_1.network_address, 'bvt-2k8-r2-64.fancylads.local')
self.assertEqual(actual_vm_1.bespoke_root, r'C:\Bespoke\TestManager')
self.assertEqual(actual_vm_1.machine_type, 'static')
self.assertDictEqual(actual_vm_1.credentials, {r'FancyLads\BobTester':'password'})
self.assertEqual(actual_vm_1.os, 'Windows')
self.assertEqual(actual_vm_1.os_label, 'Windows 2008 R2')
self.assertEqual(actual_vm_1.arch_type, 'x64')
self.assertEqual(actual_vm_1.role, 'Server')
self.assertDictEqual(actual_vm_1.check_points, exp_checkpoints)
self.assertListEqual(actual_vm_1.tools, exp_tools)
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test6_duplicate_alias_static(self):
"""Verify that we catch duplicate resource alias found in static VM machines."""
#Attempt to checkout again.
with self.assertRaises(ConfigError) as cm:
ResourceConfig(r'configs/resource/duplicate_alias_static.xml',
r'../src/bespoke/xsd/resource_config.xsd')
excep = cm.exception
self.assertEqual(excep.msg, "The resource alias 'BVT-2k8-R2-64' used more than once!")
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test7_duplicate_alias_template(self):
"""Verify that we catch duplicate resource alias found in template VM machines."""
#Attempt to checkout again.
with self.assertRaises(ConfigError) as cm:
ResourceConfig(r'configs/resource/duplicate_alias_template.xml',
r'../src/bespoke/xsd/resource_config.xsd')
excep = cm.exception
self.assertEqual(excep.msg, "The resource alias 'BVT-2k3-R2-32' used more than once!")
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test8_duplicate_alias_mixed(self):
"""Verify that we catch duplicate resource alias found in either template or static
VM machines.
"""
#Attempt to checkout again.
with self.assertRaises(ConfigError) as cm:
ResourceConfig(r'configs/resource/duplicate_alias_mixed.xml',
r'../src/bespoke/xsd/resource_config.xsd')
excep = cm.exception
self.assertEqual(excep.msg, "The resource alias 'BVT-2k8-R2-64' used more than once!")
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test9_happy_path_template(self):
"""Happy path test to verify templated SystemUnderTest machines are handled correct.
* No tools
"""
test_config = ResourceConfig(r'configs/resource/happy_path_template.xml',
r'../src/bespoke/xsd/resource_config.xsd')
actual_template_1 = test_config['BVT-2k3-R2-32']
#VM1 Verification
self.assertEqual(actual_template_1.alias, 'BVT-2k3-R2-32')
self.assertEqual(actual_template_1.bespoke_root, r'C:\Bespoke\TestManager')
self.assertEqual(actual_template_1.machine_type, 'template')
self.assertDictEqual(actual_template_1.credentials, {r'FancyLads\BobTester':'password'})
self.assertEqual(actual_template_1.os, 'Windows')
self.assertEqual(actual_template_1.os_label, 'Windows 2003 R2')
self.assertEqual(actual_template_1.arch_type, 'x86')
self.assertEqual(actual_template_1.role, 'Server')
self.assertEqual(len(actual_template_1.tools), 0)
#This information is not set by default for templates.
self.assertEqual(actual_template_1.check_points, None)
self.assertEqual(actual_template_1.network_address, None)
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test10_copy_of_sut(self):
"""Verify that when using the dictionary "__getitem__" method for templated VM machines that
we get a copy of the SystemUnderTest rather than a reference to the original in
the "_content" dictionary.
"""
test_config = ResourceConfig(r'configs/resource/happy_path_template.xml',
r'../src/bespoke/xsd/resource_config.xsd')
actual_template_1 = test_config['BVT-2k3-R2-32']
self.assertIsNot(actual_template_1, test_config._content['BVT-2k3-R2-32'])
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test11_tools(self):
"""Happy path test to verify templated SystemUnderTest machines are handled correct.
* Tools
"""
test_config = ResourceConfig(r'configs/resource/tools_template.xml',
r'../src/bespoke/xsd/resource_config.xsd')
#Expected tools
exp_tools = ['BillyMcToolin', 'ToolMeFaceHole', 'ThoughtBadger']
actual_template_1 = test_config['BVT-2k3-R2-32']
#VM1 Verification
self.assertEqual(actual_template_1.alias, 'BVT-2k3-R2-32')
self.assertEqual(actual_template_1.bespoke_root, r'C:\Bespoke\TestManager')
self.assertEqual(actual_template_1.machine_type, 'template')
self.assertDictEqual(actual_template_1.credentials, {r'FancyLads\BobTester':'password'})
self.assertEqual(actual_template_1.os, 'Windows')
self.assertEqual(actual_template_1.os_label, 'Windows 2003 R2')
self.assertEqual(actual_template_1.arch_type, 'x86')
self.assertEqual(actual_template_1.role, 'Server')
self.assertListEqual(actual_template_1.tools, exp_tools)
#This information is not set by default for templates.
self.assertEqual(actual_template_1.check_points, None)
self.assertEqual(actual_template_1.network_address, None)
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test12_missing_ext_config(self):
"""Verify that missing extended configuration elements generate appropriate exceptions.
"""
#Attempt to checkout again.
with self.assertRaises(ConfigError) as cm:
ResourceConfig(r'configs/resource/missing_ext_config_template.xml',
r'../src/bespoke/xsd/resource_config.xsd')
excep = cm.exception
self.assertEqual(excep.msg, 'The extended config element "VagrantHypervisor" is required '
'for the Vagrant template "BVT-2k3-R2-32"!')
class TestPlanConfigTests(TestCase):
"""Tests for the TestPlanConfig class in the config module."""
def setUp(self):
self.builds = {'Happy_Build':ToolStub('Happy_Build', 'msi_install'),
'Unhappy_Build':ToolStub('Unhappy_Build', 'no_install'),
'Mildly_Happy_Build':ToolStub('Mildly_Happy_Build', 'basic_install')}
self.tools = {'Tool_1':ToolStub('Tool_1', 'msi_install'),
'Tool_2':ToolStub('Tool_2', 'basic_install'),
'Tool_3':ToolStub('Tool_3', 'msi_install')}
self.resources = {'Windows_VM':SystemUnderTestStub('Windows_VM'),
'CentOS_VM':SystemUnderTestStub('CentOS_VM'),
'Ubuntu_VM':SystemUnderTestStub('Ubuntu_VM')}
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test1_happy_path_resource_count(self):
"""Verify that the number of elements is correct in the test case.."""
test_config = TestPlanConfig(r'configs/test_plan/happy_path.xml',
r'../src/bespoke/xsd/test_plan.xsd',
self.builds,
self.tools,
self.resources)
#Items returns a tuple of key|value pair hence the 2 order array syntax.
self.assertEqual(len(test_config['Happy_Test_Case_1']._tests),
19,
'Incorrect number of elements found in TestCase!')
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test2_happy_path_test_prep(self):
"""Verify that the "TestPrep" elements exist in the right order with the correct
content."""
test_config = TestPlanConfig(r'configs/test_plan/happy_path.xml',
r'../src/bespoke/xsd/test_plan.xsd',
self.builds,
self.tools,
self.resources)
#Element 0
test_prep_0 = test_config['Happy_Test_Case_1']._tests[0]
self.assertEqual(test_prep_0.name, 'Test_System_1', 'Incorrect TestPrep name!')
self.assertEqual(test_prep_0.sut.alias, 'Windows_VM', 'Incorrect SUT name!')
self.assertEqual(test_prep_0._checkpoint, 'ReadyToAutoTest', 'Incorrect checkpoint!')
self.assertEqual(test_prep_0._post_wait, 5, 'Incorrect postwait!')
self.assertEqual(test_prep_0._timeout, 600, 'Incorrect timeout!')
#Element 5
test_prep_5 = test_config['Happy_Test_Case_1']._tests[5]
self.assertEqual(test_prep_5.name, 'Test_System_2', 'Incorrect TestPrep name!')
self.assertEqual(test_prep_5.sut.alias, 'CentOS_VM', 'Incorrect SUT name!')
self.assertEqual(test_prep_5._checkpoint, 'StartTesting', 'Incorrect checkpoint!')
self.assertEqual(test_prep_5._post_wait, 8, 'Incorrect postwait!')
self.assertEqual(test_prep_5._timeout, 599, 'Incorrect timeout!')
#Element 8
test_prep_8 = test_config['Happy_Test_Case_1']._tests[8]
self.assertEqual(test_prep_8.name, 'Test_System_3', 'Incorrect TestPrep name!')
self.assertEqual(test_prep_8.sut.alias, 'Ubuntu_VM', 'Incorrect SUT name!')
self.assertEqual(test_prep_8._checkpoint, 'TestNow', 'Incorrect checkpoint!')
self.assertEqual(test_prep_8._post_wait, 123124, 'Incorrect postwait!')
self.assertEqual(test_prep_8._timeout, 2, 'Incorrect timeout!')
#Element 12
test_prep_12 = test_config['Happy_Test_Case_1']._tests[12]
self.assertEqual(test_prep_12.name, 'Test_System_1', 'Incorrect TestPrep name!')
self.assertEqual(test_prep_12.sut.alias, 'Windows_VM', 'Incorrect SUT name!')
self.assertEqual(test_prep_12._checkpoint, 'ReadyToAutoTest', 'Incorrect checkpoint!')
self.assertEqual(test_prep_12._post_wait, 5, 'Incorrect postwait!')
self.assertEqual(test_prep_12._timeout, 600, 'Incorrect timeout!')
#Element 15
test_prep_15 = test_config['Happy_Test_Case_1']._tests[5]
self.assertEqual(test_prep_15.name, 'Test_System_2', 'Incorrect TestPrep name!')
self.assertEqual(test_prep_15.sut.alias, 'CentOS_VM', 'Incorrect SUT name!')
self.assertEqual(test_prep_15._checkpoint, 'StartTesting', 'Incorrect checkpoint!')
self.assertEqual(test_prep_15._post_wait, 8, 'Incorrect postwait!')
self.assertEqual(test_prep_15._timeout, 599, 'Incorrect timeout!')
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test3_happy_path_test_installers(self):
"""Verify that the "_Installer" elements exist in the right order with the correct
content."""
test_config = TestPlanConfig(r'configs/test_plan/happy_path.xml',
r'../src/bespoke/xsd/test_plan.xsd',
self.builds,
self.tools,
self.resources)
#Element 2
test_installer_2 = test_config['Happy_Test_Case_1']._tests[2]
self.assertEqual(test_installer_2._tool.name, 'Tool_1', 'Incorrect tool name!')
self.assertIsInstance(test_installer_2, MSIInstaller, 'Incorrect installer type!')
#Element 3
test_installer_3 = test_config['Happy_Test_Case_1']._tests[3]
self.assertEqual(test_installer_3._tool.name, 'Tool_2', 'Incorrect tool name!')
self.assertIsInstance(test_installer_3, BasicInstaller, 'Incorrect installer type!')
#Element 4
test_installer_4 = test_config['Happy_Test_Case_1']._tests[4]
self.assertEqual(test_installer_4._tool.name, 'Happy_Build', 'Incorrect tool name!')
self.assertIsInstance(test_installer_4, MSIInstaller, 'Incorrect installer type!')
#Element 7
test_installer_7 = test_config['Happy_Test_Case_1']._tests[7]
self.assertEqual(test_installer_7._tool.name, 'Tool_2', 'Incorrect tool name!')
self.assertIsInstance(test_installer_7, BasicInstaller, 'Incorrect installer type!')
#Element 9
test_installer_9 = test_config['Happy_Test_Case_1']._tests[9]
self.assertEqual(test_installer_9._tool.name, 'Tool_3', 'Incorrect tool name!')
self.assertIsInstance(test_installer_9, MSIInstaller, 'Incorrect installer type!')
#Element 10
test_installer_10 = test_config['Happy_Test_Case_1']._tests[10]
self.assertEqual(test_installer_10._tool.name, 'Mildly_Happy_Build', 'Incorrect tool name!')
self.assertIsInstance(test_installer_10, BasicInstaller, 'Incorrect installer type!')
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test4_happy_path_test_power_control(self):
"""Verify that the "PowerControl" elements exist in the right order with the correct
content."""
test_config = TestPlanConfig(r'configs/test_plan/happy_path.xml',
r'../src/bespoke/xsd/test_plan.xsd',
self.builds,
self.tools,
self.resources)
#Element 1
test_power_control_1 = test_config['Happy_Test_Case_1']._tests[1]
self.assertEqual(test_power_control_1.name,
'Test_System_1_PowerControl',
'Incorrect power control name!')
self.assertTrue(test_power_control_1._wait, 'Incorrect wait status!')
#Element 6
test_power_control_6 = test_config['Happy_Test_Case_1']._tests[6]
self.assertEqual(test_power_control_6.name,
'Test_System_2_PowerControl',
'Incorrect power control name!')
self.assertFalse(test_power_control_6._wait, 'Incorrect wait status!')
#Element 13
test_power_control_13 = test_config['Happy_Test_Case_1']._tests[13]
self.assertEqual(test_power_control_13.name,
'Test_System_1_PowerControl',
'Incorrect power control name!')
self.assertTrue(test_power_control_13._wait, 'Incorrect wait status!')
#Element 16
test_power_control_16 = test_config['Happy_Test_Case_1']._tests[16]
self.assertEqual(test_power_control_16.name,
'Test_System_2_PowerControl',
'Incorrect power control name!')
self.assertFalse(test_power_control_16._wait, 'Incorrect wait status!')
#Element 18
test_power_control_18 = test_config['Happy_Test_Case_1']._tests[18]
self.assertEqual(test_power_control_18.name,
'Test Step 3_PowerControl',
'Incorrect power control name!')
self.assertTrue(test_power_control_18._wait, 'Incorrect wait status!')
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test5_happy_path_test_steps(self):
"""Verify that the "Step" elements exist in the right order with the correct
content."""
test_config = TestPlanConfig(r'configs/test_plan/happy_path.xml',
r'../src/bespoke/xsd/test_plan.xsd',
self.builds,
self.tools,
self.resources)
#Element 11
test_step_11 = test_config['Happy_Test_Case_1']._tests[11]
self.assertEqual(test_step_11.name, 'Test Step 1', 'Incorrect step name!')
self.assertEqual(test_step_11._sut.alias, 'Windows_VM', 'Incorrect SUT!')
self.assertEqual(test_step_11._interpreter, None, 'Incorrect interpreter!')
self.assertEqual(test_step_11._post_wait, 5, 'Incorrect postwait!')
self.assertEqual(test_step_11._test_directory,
'Fancy_Lads\\Tests',
'Incorrect test directory!')
self.assertEqual(test_step_11._test_exec, 'happy_tester.exe', 'Incorrect test exec!')
self.assertEqual(test_step_11.timeout, 600, 'Incorrect timeout')
self.assertDictEqual(test_step_11._test_params,
{'--cwd': '"\\"C:\\tests\\""', '--resultsPath': '"\\"C:\\temp\\""'},
'Incorrect params!')
#Element 14
test_step_14 = test_config['Happy_Test_Case_1']._tests[14]
self.assertEqual(test_step_14.name, 'Test Step 2', 'Incorrect step name!')
self.assertEqual(test_step_14._sut.alias, 'CentOS_VM', 'Incorrect SUT!')
self.assertEqual(test_step_14._interpreter, 'python', 'Incorrect interpreter!')
self.assertEqual(test_step_14._post_wait, 10, 'Incorrect postwait!')
self.assertEqual(test_step_14._test_directory,
'Fancy_Lads\\More_Tests',
'Incorrect test directory!')
self.assertEqual(test_step_14._test_exec, 'super_happy_tester.py', 'Incorrect test exec!')
self.assertEqual(test_step_14.timeout, 6000, 'Incorrect test timeout!')
self.assertDictEqual(test_step_14._test_params,
{'--cwd': '"\\"C:\\tests\\""', '--resultsPath': '"\\"C:\\happy\\""'},
'Incorrect params!')
#Element 17
test_step_17 = test_config['Happy_Test_Case_1']._tests[17]
self.assertEqual(test_step_17.name, 'Test Step 3', 'Incorrect step name!')
self.assertEqual(test_step_17._sut.alias, 'Ubuntu_VM', 'Incorrect SUT!')
self.assertEqual(test_step_17._interpreter, 'perl', 'Incorrect interpreter!')
self.assertEqual(test_step_17._post_wait, 7, 'Incorrect postwait!')
self.assertEqual(test_step_17._test_directory,
'Fancy_Lads\\Even_More_Tests',
'Incorrect test directory!')
self.assertEqual(test_step_17._test_exec, 'sad_tester.pl', 'Incorrect test exec!')
self.assertEqual(test_step_17.timeout, 333, 'Incorrect test timeout!')
self.assertDictEqual(test_step_17._test_params,
{'--cwd': '"\\"C:\\unhappy_tests\\""', '--resultsPath': '"\\"C:\\sad\\""'},
'Incorrect params!')
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test6_bad_build(self):
"""Verify that the user can't specify builds that don't exist."""
#Attempt to open a config with a bad build.
with self.assertRaises(ConfigError) as cm:
TestPlanConfig(r'configs/test_plan/bad_build.xml',
r'../src/bespoke/xsd/test_plan.xsd',
self.builds,
self.tools,
self.resources)
excep = cm.exception
self.assertEqual(excep.msg, 'The build "Bad_Build" specified in the "Bad_Build_Test_Case" '
'test case not defined in any BuildConfig!')
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test7_duplicate_builds(self):
"""Verify that the user can't specify the same build twice."""
#Attempt to open a config with the same build specified twice.
with self.assertRaises(ConfigError) as cm:
TestPlanConfig(r'configs/test_plan/duplicate_builds.xml',
r'../src/bespoke/xsd/test_plan.xsd',
self.builds,
self.tools,
self.resources)
excep = cm.exception
self.assertEqual(excep.msg, 'The build "Happy_Build" used more than once in the '
'"Duplicate_Build_Test_Case" test case!')
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test8_bad_tool(self):
"""Verify that the user can't specify tools that don't exist."""
#Attempt to open a config with the same build specified twice.
with self.assertRaises(ConfigError) as cm:
TestPlanConfig(r'configs/test_plan/bad_tool.xml',
r'../src/bespoke/xsd/test_plan.xsd',
self.builds,
self.tools,
self.resources)
excep = cm.exception
self.assertEqual(excep.msg, 'The tool "Bad_Tool" specified in the "Bad_Tool_Test_Case" '
'test case not defined in any ToolConfig!')
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test9_duplicate_tools(self):
"""Verify that the user can't specify the same tool twice."""
#Attempt to open a config with the same tool specified twice.
with self.assertRaises(ConfigError) as cm:
TestPlanConfig(r'configs/test_plan/duplicate_tools.xml',
r'../src/bespoke/xsd/test_plan.xsd',
self.builds,
self.tools,
self.resources)
excep = cm.exception
self.assertEqual(excep.msg, 'The tool "Tool_1" used more than once in the '
'"Duplicate_Tool_Test_Case" test case!')
@skipIf(SKIP_EVERYTHING, 'Skip if we are creating/modifying tests!')
def test10_invalid_test_exe(self):
"""Verify that invalid test executable is recognized and rejected."""
#Attempt to open a config with invalid executable in test step.
with self.assertRaises(ConfigError) as cm:
TestPlanConfig(r'configs/test_plan/invalid_executable.xml',
r'../src/bespoke/xsd/test_plan.xsd',
self.builds,
self.tools,
self.resources)
excep = cm.exception
self.assertEqual(excep.msg, "Element 'Executable': '' is not a valid value of the atomic "
"type 'nonEmptyString'. Line: 23 Column: 0")
|
|
"""Support for Ecovacs Ecovacs Vaccums."""
import logging
import sucks
from homeassistant.components.vacuum import (
SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_STATUS,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
VacuumDevice,
)
from homeassistant.helpers.icon import icon_for_battery_level
from . import ECOVACS_DEVICES
_LOGGER = logging.getLogger(__name__)
SUPPORT_ECOVACS = (
SUPPORT_BATTERY
| SUPPORT_RETURN_HOME
| SUPPORT_CLEAN_SPOT
| SUPPORT_STOP
| SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
| SUPPORT_LOCATE
| SUPPORT_STATUS
| SUPPORT_SEND_COMMAND
| SUPPORT_FAN_SPEED
)
ATTR_ERROR = "error"
ATTR_COMPONENT_PREFIX = "component_"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Ecovacs vacuums."""
vacuums = []
for device in hass.data[ECOVACS_DEVICES]:
vacuums.append(EcovacsVacuum(device))
_LOGGER.debug("Adding Ecovacs Vacuums to Home Assistant: %s", vacuums)
add_entities(vacuums, True)
class EcovacsVacuum(VacuumDevice):
"""Ecovacs Vacuums such as Deebot."""
def __init__(self, device):
"""Initialize the Ecovacs Vacuum."""
self.device = device
self.device.connect_and_wait_until_ready()
if self.device.vacuum.get("nick", None) is not None:
self._name = "{}".format(self.device.vacuum["nick"])
else:
# In case there is no nickname defined, use the device id
self._name = "{}".format(self.device.vacuum["did"])
self._fan_speed = None
self._error = None
_LOGGER.debug("Vacuum initialized: %s", self.name)
async def async_added_to_hass(self) -> None:
"""Set up the event listeners now that hass is ready."""
self.device.statusEvents.subscribe(lambda _: self.schedule_update_ha_state())
self.device.batteryEvents.subscribe(lambda _: self.schedule_update_ha_state())
self.device.lifespanEvents.subscribe(lambda _: self.schedule_update_ha_state())
self.device.errorEvents.subscribe(self.on_error)
def on_error(self, error):
"""Handle an error event from the robot.
This will not change the entity's state. If the error caused the state
to change, that will come through as a separate on_status event
"""
if error == "no_error":
self._error = None
else:
self._error = error
self.hass.bus.fire(
"ecovacs_error", {"entity_id": self.entity_id, "error": error}
)
self.schedule_update_ha_state()
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state."""
return False
@property
def unique_id(self) -> str:
"""Return an unique ID."""
return self.device.vacuum.get("did", None)
@property
def is_on(self):
"""Return true if vacuum is currently cleaning."""
return self.device.is_cleaning
@property
def is_charging(self):
"""Return true if vacuum is currently charging."""
return self.device.is_charging
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_ECOVACS
@property
def status(self):
"""Return the status of the vacuum cleaner."""
return self.device.vacuum_status
def return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
self.device.run(sucks.Charge())
@property
def battery_icon(self):
"""Return the battery icon for the vacuum cleaner."""
return icon_for_battery_level(
battery_level=self.battery_level, charging=self.is_charging
)
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
if self.device.battery_status is not None:
return self.device.battery_status * 100
return super().battery_level
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
return self.device.fan_speed
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return [sucks.FAN_SPEED_NORMAL, sucks.FAN_SPEED_HIGH]
def turn_on(self, **kwargs):
"""Turn the vacuum on and start cleaning."""
self.device.run(sucks.Clean())
def turn_off(self, **kwargs):
"""Turn the vacuum off stopping the cleaning and returning home."""
self.return_to_base()
def stop(self, **kwargs):
"""Stop the vacuum cleaner."""
self.device.run(sucks.Stop())
def clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
self.device.run(sucks.Spot())
def locate(self, **kwargs):
"""Locate the vacuum cleaner."""
self.device.run(sucks.PlaySound())
def set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if self.is_on:
self.device.run(sucks.Clean(mode=self.device.clean_status, speed=fan_speed))
def send_command(self, command, params=None, **kwargs):
"""Send a command to a vacuum cleaner."""
self.device.run(sucks.VacBotCommand(command, params))
@property
def device_state_attributes(self):
"""Return the device-specific state attributes of this vacuum."""
data = {}
data[ATTR_ERROR] = self._error
for key, val in self.device.components.items():
attr_name = ATTR_COMPONENT_PREFIX + key
data[attr_name] = int(val * 100)
return data
|
|
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import hashlib
import itertools
import six
from nailgun.middleware import utils
from nailgun.db import db
from nailgun.db.sqlalchemy.models import ActionLog
from nailgun import consts
compiled_urls_actions_mapping = utils.compile_mapping_keys(
{
r'.*/clusters/(?P<cluster_id>\d+)/changes/?$': {
'action_name': 'deploy_changes',
'action_group': 'cluster_changes'
},
r'.*/clusters/(?P<cluster_id>\d+)/provision/?$': {
'action_name': 'provision',
'action_group': 'cluster_changes'
},
r'.*/clusters/(?P<cluster_id>\d+)/deploy/?$': {
'action_name': 'deploy',
'action_group': 'cluster_changes'
},
r'.*/clusters/(?P<cluster_id>\d+)/stop_deployment/?$': {
'action_name': 'stop_deployment',
'action_group': 'cluster_changes'
},
r'.*/clusters/(?P<cluster_id>\d+)/reset/?$': {
'action_name': 'reset',
'action_group': 'cluster_changes'
},
r'.*/clusters/(?P<cluster_id>\d+)/update/?$': {
'action_name': 'update',
'action_group': 'cluster_changes'
},
r'.*/clusters/?$': {
'action_name': 'cluster_collection',
'action_group': 'cluster_changes'
},
r'.*/clusters/(?P<cluster_id>\d+)/?$': {
'action_name': 'cluster_instance',
'action_group': 'cluster_changes'
},
(r'.*/clusters/(?P<cluster_id>\d+)'
r'/network_configuration/nova_network/?$'): {
'action_name': 'nova_network',
'action_group': 'network_configuration'
},
r'.*/clusters/(?P<cluster_id>\d+)/network_configuration/neutron/?$': {
'action_name': 'neutron',
'action_group': 'network_configuration'
},
(r'.*/clusters/(?P<cluster_id>\d+)/network_configuration/'
r'nova_network/verify/?$'): {
'action_name': 'nova_network',
'action_group': 'network_verification'
},
(r'.*/clusters/(?P<cluster_id>\d+)/network_configuration/'
r'neutron/verify/?$'): {
'action_name': 'neutron',
'action_group': 'network_verification'
},
r'.*/clusters/(?P<cluster_id>\d+)/attributes/?$': {
'action_name': 'attributes',
'action_group': 'cluster_attributes'
},
r'.*/clusters/(?P<cluster_id>\d+)/attributes/defaults/?$': {
'action_name': 'attributes_defaults',
'action_group': 'cluster_attributes'
},
r'.*/clusters/(?P<cluster_id>\d+)/orchestrator/deployment/?$': {
'action_name': 'deployment_info',
'action_group': 'orchestrator'
},
r'.*/clusters/(?P<cluster_id>\d+)/orchestrator/provisioning/?$': {
'action_name': 'provisioning_info',
'action_group': 'orchestrator'
},
r'.*/settings/?$': {
'action_name': 'master_node_settings',
'action_group': 'master_node_settings'
},
r'.*/releases/?$': {
'action_name': 'releases_collection',
'action_group': 'release_changes'
},
r'.*/releases/(?P<obj_id>\d+)/?$': {
'action_name': 'release_instance',
'action_group': 'release_changes'
},
r'.*/tasks/?$': {
'action_name': 'tasks_collection',
'action_group': 'tasks_changes'
},
r'.*/tasks/(?P<obj_id>\d+)/?$': {
'action_name': 'task_instance',
'action_group': 'tasks_changes'
},
}
)
class ConnectionMonitorMiddleware(object):
methods_to_analyze = ('POST', 'PUT', 'DELETE', 'PATCH')
def __init__(self, app):
self.app = app
self.status = None
def __call__(self, env, start_response):
if env['REQUEST_METHOD'] in self.methods_to_analyze:
url_matcher = self._get_url_matcher(url=env['PATH_INFO'])
if url_matcher:
request_body = utils.get_body_from_env(env)
def save_headers_start_response(status, headers, *args):
"""Hook for saving resp headers for further processing"""
self.status = status
return start_response(status, headers, *args)
# Prepare arguments for ActionLog instance creation
create_kwargs = {}
actor_id = self._get_actor_id(env)
create_kwargs['actor_id'] = actor_id
# save actor_id in env for further processing
env['fuel.action.actor_id'] = actor_id
create_kwargs['start_timestamp'] = datetime.datetime.utcnow()
response = self.app(env, save_headers_start_response)
create_kwargs['end_timestamp'] = datetime.datetime.utcnow()
# since responce is iterator to avoid its exhaustion in
# analysing process we make two copies of it: one to be
# processed in stats collection logic and the other to
# propagate further on middleware stack
response_to_analyse, response_to_propagate = \
itertools.tee(response)
create_kwargs['action_name'] = \
compiled_urls_actions_mapping[url_matcher]['action_name']
create_kwargs['action_group'] = \
compiled_urls_actions_mapping[url_matcher]['action_group']
create_kwargs['action_type'] = \
consts.ACTION_TYPES.http_request
create_kwargs['additional_info'] = \
self._get_additional_info(env,
request_body,
response_to_analyse)
# get cluster_id from url
cluster_id = utils.get_group_from_matcher(url_matcher,
env['PATH_INFO'],
'cluster_id')
if cluster_id:
cluster_id = int(cluster_id)
create_kwargs['cluster_id'] = cluster_id
db.add(ActionLog(**create_kwargs))
db.commit()
return response_to_propagate
return self.app(env, start_response)
def _get_url_matcher(self, url):
for url_matcher in six.iterkeys(compiled_urls_actions_mapping):
if url_matcher.match(url):
return url_matcher
return None
def _get_actor_id(self, env):
token_id = env.get('HTTP_X_AUTH_TOKEN')
if not token_id:
return None
return hashlib.sha256(token_id).hexdigest()
def _get_additional_info(self, env, request_body, response_to_analyse):
additional_info = {
'request_data': self._get_request_data(env, request_body),
'response_data': self._get_response_data(response_to_analyse)
}
return additional_info
def _get_request_data(self, env, request_body):
request_data = {
'http_method': env['REQUEST_METHOD'],
'url': env['PATH_INFO'],
'data': {},
}
return request_data
def _get_response_data(self, response_iterator):
"""Retrieves data from response iterator
:param response_iterator: iterator over response data
:returns: python dict with response data, status and
http message if any
"""
response_data = {
'status': self.status,
'data': {}
}
# check whether request was failed
if not self.status.startswith('20'):
# useful data always will be stored in first element of
# response
response_data['data'] = {'message': six.next(response_iterator)}
return response_data
|
|
from abc import ABCMeta, abstractmethod
import json
import requests
import eventlet
from eventlet.green import zmq, time
import hexdump
import scapy.all
import six
from .. import LOG as _LOG
from ..signal.signal import ActionBase
from ..signal.event import PacketEvent
from ..signal.action import AcceptEventAction, NopAction
from pyearthquake.inspector.internal.ether_tcp_watcher import TCPWatcher
LOG = _LOG.getChild(__name__)
ENABLE_TCP_WATCHER = True
eventlet.monkey_patch() # for requests
@six.add_metaclass(ABCMeta)
class EtherInspectorBase(object):
pkt_recv_handler_table = {}
def __init__(self, zmq_addr, orchestrator_rest_url='http://localhost:10080/api/v2',
entity_id='_earthquake_ether_inspector'):
if ENABLE_TCP_WATCHER:
LOG.info('Using TCPWatcher')
self.tcp_watcher = TCPWatcher()
else:
self.tcp_watcher = None
self.deferred_events = {} # key: string(event_uuid), value: {'event': PacketEvent, 'metadata`: dict}
LOG.info('Hookswitch ZMQ Addr: %s', zmq_addr)
self.zmq_addr = zmq_addr
LOG.info('Orchestrator REST URL: %s', orchestrator_rest_url)
self.orchestrator_rest_url = orchestrator_rest_url
LOG.info('Inspector System Entity ID: %s', entity_id)
self.entity_id = entity_id
def start(self):
zmq_worker_handle = self.start_zmq_worker()
rest_worker_handle = eventlet.spawn(self._orchestrator_rest_worker)
zmq_worker_handle.wait()
rest_worker_handle.wait()
raise RuntimeError('should not reach here')
def start_zmq_worker(self):
self.zmq_ctx = zmq.Context()
self.zs = self.zmq_ctx.socket(zmq.PAIR)
self.zs.bind(self.zmq_addr)
worker_handle = eventlet.spawn(self._zmq_worker)
return worker_handle
def regist_layer_on_tcp(self, klazz, tcp_port):
scapy.all.bind_layers(scapy.all.TCP, klazz, dport=tcp_port)
scapy.all.bind_layers(scapy.all.TCP, klazz, sport=tcp_port)
def inspect(self, eth_bytes):
"""
scapy inspector
Do NOT call TWICE for the same packet, as the inspector can have side-effects
"""
pkt = scapy.all.Ether(eth_bytes)
return pkt
def _zmq_worker(self):
"""
ZeroMQ worker for the inspector
"""
while True:
metadata_str, eth_bytes = self.zs.recv_multipart()
metadata = json.loads(metadata_str)
try:
# LOG.info('Full-Hexdump (%d bytes)', len(eth_bytes))
# for line in hexdump.hexdump(eth_bytes, result='generator'):
# LOG.info(line)
if self.tcp_watcher:
self.tcp_watcher.on_recv(metadata, eth_bytes,
default_handler=self._on_recv_from_hookswitch,
retrans_handler=self._on_tcp_retrans)
else:
self._on_recv_from_hookswitch(metadata, eth_bytes)
except Exception as e:
LOG.error('Error in _zmq_worker()', exc_info=True)
try:
LOG.error('Full-Hexdump (%d bytes)', len(eth_bytes))
for line in hexdump.hexdump(eth_bytes, result='generator'):
LOG.error(line)
except:
LOG.error('Error while hexdumping', exc_info=True)
self._send_to_hookswitch(metadata)
def _send_to_hookswitch(self, metadata, op='accept'):
assert isinstance(metadata, dict)
assert op in ('accept', 'drop')
resp_metadata = metadata.copy()
resp_metadata['op'] = op
resp_metadata_str = json.dumps(resp_metadata)
self.zs.send_multipart((resp_metadata_str, ''))
def _on_recv_from_hookswitch(self, metadata, eth_bytes):
inspected_packet = self.inspect(eth_bytes)
event = self.map_packet_to_event(inspected_packet)
assert event is None or isinstance(event, PacketEvent)
if not event:
self._send_to_hookswitch(metadata, op='accept')
else:
self.on_packet_event(metadata, event)
def _on_tcp_retrans(self, metadata, eth_bytes):
self._send_to_hookswitch(metadata, op='drop')
@abstractmethod
def map_packet_to_event(self, pkt):
"""
return None if this packet is NOT interesting at all.
"""
pass
def on_packet_event(self, metadata, event, buffer_if_not_sent=False):
assert isinstance(event, PacketEvent)
event.entity = self.entity_id
sent = self.send_event_to_orchestrator(event)
if not sent:
if buffer_if_not_sent:
LOG.debug('Buffering an event: %s', event)
else:
LOG.debug('Accepting an event (could not sent to orchestrator): %s', event)
self._send_to_hookswitch(metadata)
return
if event.deferred:
self.defer_packet_event(metadata, event)
else:
# NOTE: non-deferred packet events are useful for logging
LOG.debug('Accepting an event (not deferred): %s', event)
self._send_to_hookswitch(metadata)
def defer_packet_event(self, metadata, event):
"""
Defer the packet until the orchestrator permits
"""
assert isinstance(event, PacketEvent)
assert event.deferred
LOG.debug('Defer event=%s, deferred+:%d->%d',
event, len(self.deferred_events), len(self.deferred_events)+1)
self.deferred_events[event.uuid] = {'event': event, 'metadata': metadata, 'time': time.time()}
def accept_deferred_event_uuid(self, event_uuid):
try:
event = self.deferred_events[event_uuid]['event']
assert isinstance(event, PacketEvent)
assert event.deferred
metadata = self.deferred_events[event_uuid]['metadata']
LOG.debug('Accept deferred event=%s, deferred-:%d->%d',
event, len(self.deferred_events), len(self.deferred_events)-1)
self._send_to_hookswitch(metadata)
del self.deferred_events[event_uuid]
except Exception as e:
LOG.error('cannot pass this event: %s', event_uuid, exc_info=True)
def send_event_to_orchestrator(self, event):
try:
event_jsdict = event.to_jsondict()
headers = {'content-type': 'application/json'}
post_url = self.orchestrator_rest_url + '/events/' + self.entity_id + '/' + event.uuid
LOG.debug('POST %s', post_url)
r = requests.post(post_url, data=json.dumps(event_jsdict), headers=headers)
return True
except Exception as e:
LOG.error('cannot send event: %s', event, exc_info=True)
## do not re-raise the exception to continue processing
return False
def on_recv_action_from_orchestrator(self, action):
LOG.debug('Received action: %s', action)
if isinstance(action, AcceptEventAction):
ev_uuid = action.option['event_uuid']
self.accept_deferred_event_uuid(ev_uuid)
elif isinstance(action, NopAction):
LOG.debug('nop action: %s', action)
else:
LOG.warn('Unsupported action: %s', action)
def _orchestrator_rest_worker(self):
error_count = 0
got = None
while True:
try:
get_url = self.orchestrator_rest_url + '/actions/' + self.entity_id
LOG.debug('GET %s', get_url)
got = requests.get(get_url)
got_jsdict = got.json()
action = ActionBase.dispatch_from_jsondict(got_jsdict)
LOG.debug('got %s', action.uuid)
delete_url = get_url + '/' + action.uuid
LOG.debug('DELETE %s', delete_url)
deleted = requests.delete(delete_url)
assert deleted.status_code == 200
self.on_recv_action_from_orchestrator(action)
error_count = 0
except Exception as e:
LOG.error('cannot HTTP GET', exc_info=True)
if got is not None:
LOG.error('Got: %s', got.text)
error_count += 1
eventlet.sleep(error_count * 1.0)
got = None
|
|
# -*- coding: utf-8 -*-
import datetime
import httplib as http
import time
import functools
import furl
import itsdangerous
import jwe
import jwt
import mock
from django.utils import timezone
from framework.auth import cas, signing
from framework.auth.core import Auth
from framework.exceptions import HTTPError
from nose.tools import * # noqa
from osf_tests import factories
from tests.base import OsfTestCase, get_default_metaschema
from osf_tests.factories import (AuthUserFactory, ProjectFactory,
RegistrationFactory)
from website import settings
from addons.base import views
from addons.github.exceptions import ApiError
from addons.github.models import GithubFolder, GithubFile, GithubFileNode
from addons.github.tests.factories import GitHubAccountFactory
from osf.models import Session, MetaSchema
from osf.models import files as file_models
from osf.models.files import BaseFileNode, TrashedFileNode
from website.project import new_private_link
from website.project.views.node import _view_project as serialize_node
from website.util import api_url_for, rubeus
from dateutil.parser import parse as parse_date
from framework import sentry
class SetEnvironMiddleware(object):
def __init__(self, app, **kwargs):
self.app = app
self.kwargs = kwargs
def __call__(self, environ, start_response):
environ.update(self.kwargs)
return self.app(environ, start_response)
class TestAddonAuth(OsfTestCase):
def setUp(self):
super(TestAddonAuth, self).setUp()
self.user = AuthUserFactory()
self.auth_obj = Auth(user=self.user)
self.node = ProjectFactory(creator=self.user)
self.session = Session(data={'auth_user_id': self.user._id})
self.session.save()
self.cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(self.session._id)
self.configure_addon()
self.JWE_KEY = jwe.kdf(settings.WATERBUTLER_JWE_SECRET.encode('utf-8'), settings.WATERBUTLER_JWE_SALT.encode('utf-8'))
def configure_addon(self):
self.user.add_addon('github')
self.user_addon = self.user.get_addon('github')
self.oauth_settings = GitHubAccountFactory(display_name='john')
self.oauth_settings.save()
self.user.external_accounts.add(self.oauth_settings)
self.user.save()
self.node.add_addon('github', self.auth_obj)
self.node_addon = self.node.get_addon('github')
self.node_addon.user = 'john'
self.node_addon.repo = 'youre-my-best-friend'
self.node_addon.user_settings = self.user_addon
self.node_addon.external_account = self.oauth_settings
self.node_addon.save()
self.user_addon.oauth_grants[self.node._id] = {self.oauth_settings._id: []}
self.user_addon.save()
def build_url(self, **kwargs):
options = {'payload': jwe.encrypt(jwt.encode({'data': dict(dict(
action='download',
nid=self.node._id,
provider=self.node_addon.config.short_name), **kwargs),
'exp': timezone.now() + datetime.timedelta(seconds=settings.WATERBUTLER_JWT_EXPIRATION),
}, settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM), self.JWE_KEY)}
return api_url_for('get_auth', **options)
def test_auth_download(self):
url = self.build_url()
res = self.app.get(url, auth=self.user.auth)
data = jwt.decode(jwe.decrypt(res.json['payload'].encode('utf-8'), self.JWE_KEY), settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM)['data']
assert_equal(data['auth'], views.make_auth(self.user))
assert_equal(data['credentials'], self.node_addon.serialize_waterbutler_credentials())
assert_equal(data['settings'], self.node_addon.serialize_waterbutler_settings())
expected_url = furl.furl(self.node.api_url_for('create_waterbutler_log', _absolute=True, _internal=True))
observed_url = furl.furl(data['callback_url'])
observed_url.port = expected_url.port
assert_equal(expected_url, observed_url)
def test_auth_missing_args(self):
url = self.build_url(cookie=None)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_auth_bad_cookie(self):
url = self.build_url(cookie=self.cookie)
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 200)
data = jwt.decode(jwe.decrypt(res.json['payload'].encode('utf-8'), self.JWE_KEY), settings.WATERBUTLER_JWT_SECRET, algorithm=settings.WATERBUTLER_JWT_ALGORITHM)['data']
assert_equal(data['auth'], views.make_auth(self.user))
assert_equal(data['credentials'], self.node_addon.serialize_waterbutler_credentials())
assert_equal(data['settings'], self.node_addon.serialize_waterbutler_settings())
expected_url = furl.furl(self.node.api_url_for('create_waterbutler_log', _absolute=True, _internal=True))
observed_url = furl.furl(data['callback_url'])
observed_url.port = expected_url.port
assert_equal(expected_url, observed_url)
def test_auth_cookie(self):
url = self.build_url(cookie=self.cookie[::-1])
res = self.app.get(url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_auth_missing_addon(self):
url = self.build_url(provider='queenhub')
res = self.app.get(url, expect_errors=True, auth=self.user.auth)
assert_equal(res.status_code, 400)
@mock.patch('addons.base.views.cas.get_client')
def test_auth_bad_bearer_token(self, mock_cas_client):
mock_cas_client.return_value = mock.Mock(profile=mock.Mock(return_value=cas.CasResponse(authenticated=False)))
url = self.build_url()
res = self.app.get(url, headers={'Authorization': 'Bearer invalid_access_token'}, expect_errors=True)
assert_equal(res.status_code, 403)
class TestAddonLogs(OsfTestCase):
def setUp(self):
super(TestAddonLogs, self).setUp()
self.user = AuthUserFactory()
self.auth_obj = Auth(user=self.user)
self.node = ProjectFactory(creator=self.user)
self.session = Session(data={'auth_user_id': self.user._id})
self.session.save()
self.cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(self.session._id)
self.configure_addon()
def configure_addon(self):
self.user.add_addon('github')
self.user_addon = self.user.get_addon('github')
self.oauth_settings = GitHubAccountFactory(display_name='john')
self.oauth_settings.save()
self.user.external_accounts.add(self.oauth_settings)
self.user.save()
self.node.add_addon('github', self.auth_obj)
self.node_addon = self.node.get_addon('github')
self.node_addon.user = 'john'
self.node_addon.repo = 'youre-my-best-friend'
self.node_addon.user_settings = self.user_addon
self.node_addon.external_account = self.oauth_settings
self.node_addon.save()
self.user_addon.oauth_grants[self.node._id] = {self.oauth_settings._id: []}
self.user_addon.save()
def build_payload(self, metadata, **kwargs):
options = dict(
auth={'id': self.user._id},
action='create',
provider=self.node_addon.config.short_name,
metadata=metadata,
time=time.time() + 1000,
)
options.update(kwargs)
options = {
key: value
for key, value in options.iteritems()
if value is not None
}
message, signature = signing.default_signer.sign_payload(options)
return {
'payload': message,
'signature': signature,
}
@mock.patch('website.notifications.events.files.FileAdded.perform')
def test_add_log(self, mock_perform):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path})
nlogs = self.node.logs.count()
self.app.put_json(url, payload, headers={'Content-Type': 'application/json'})
self.node.reload()
assert_equal(self.node.logs.count(), nlogs + 1)
# # Mocking form_message and perform so that the payload need not be exact.
# assert_true(mock_form_message.called, "form_message not called")
assert_true(mock_perform.called, 'perform not called')
def test_add_log_missing_args(self):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path}, auth=None)
nlogs = self.node.logs.count()
res = self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(self.node.logs.count(), nlogs)
def test_add_log_no_user(self):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path}, auth={'id': None})
nlogs = self.node.logs.count()
res = self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(self.node.logs.count(), nlogs)
def test_add_log_no_addon(self):
path = 'pizza'
node = ProjectFactory(creator=self.user)
url = node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path})
nlogs = node.logs.count()
res = self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(node.logs.count(), nlogs)
def test_add_log_bad_action(self):
path = 'pizza'
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(metadata={'path': path}, action='dance')
nlogs = self.node.logs.count()
res = self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'},
expect_errors=True,
)
assert_equal(res.status_code, 400)
self.node.reload()
assert_equal(self.node.logs.count(), nlogs)
def test_action_file_rename(self):
url = self.node.api_url_for('create_waterbutler_log')
payload = self.build_payload(
action='rename',
metadata={
'path': 'foo',
},
source={
'materialized': 'foo',
'provider': 'github',
'node': {'_id': self.node._id},
'name': 'new.txt',
'kind': 'file',
},
destination={
'path': 'foo',
'materialized': 'foo',
'provider': 'github',
'node': {'_id': self.node._id},
'name': 'old.txt',
'kind': 'file',
},
)
self.app.put_json(
url,
payload,
headers={'Content-Type': 'application/json'}
)
self.node.reload()
assert_equal(
self.node.logs.latest().action,
'github_addon_file_renamed',
)
class TestCheckAuth(OsfTestCase):
def setUp(self):
super(TestCheckAuth, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
def test_has_permission(self):
res = views.check_access(self.node, Auth(user=self.user), 'upload', None)
assert_true(res)
def test_not_has_permission_read_public(self):
self.node.is_public = True
self.node.save()
views.check_access(self.node, Auth(), 'download', None)
def test_not_has_permission_read_has_link(self):
link = new_private_link('red-special', self.user, [self.node], anonymous=False)
views.check_access(self.node, Auth(private_key=link.key), 'download', None)
def test_not_has_permission_logged_in(self):
user2 = AuthUserFactory()
with assert_raises(HTTPError) as exc_info:
views.check_access(self.node, Auth(user=user2), 'download', None)
assert_equal(exc_info.exception.code, 403)
def test_not_has_permission_not_logged_in(self):
with assert_raises(HTTPError) as exc_info:
views.check_access(self.node, Auth(), 'download', None)
assert_equal(exc_info.exception.code, 401)
def test_has_permission_on_parent_node_copyto_pass_if_registration(self):
component_admin = AuthUserFactory()
ProjectFactory(creator=component_admin, parent=self.node)
registration = RegistrationFactory(project=self.node)
component_registration = registration._nodes.first()
assert_false(component_registration.has_permission(self.user, 'write'))
res = views.check_access(component_registration, Auth(user=self.user), 'copyto', None)
assert_true(res)
def test_has_permission_on_parent_node_metadata_pass_if_registration(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, parent=self.node, is_public=False)
component_registration = RegistrationFactory(project=component, creator=component_admin)
assert_false(component_registration.has_permission(self.user, 'read'))
res = views.check_access(component_registration, Auth(user=self.user), 'metadata', None)
assert_true(res)
def test_has_permission_on_parent_node_copyto_fail_if_not_registration(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, parent=self.node)
assert_false(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError):
views.check_access(component, Auth(user=self.user), 'copyto', None)
def test_has_permission_on_parent_node_copyfrom(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, Auth(user=self.user), 'copyfrom', None)
assert_true(res)
class TestCheckPreregAuth(OsfTestCase):
def setUp(self):
super(TestCheckPreregAuth, self).setUp()
self.prereg_challenge_admin_user = AuthUserFactory()
self.prereg_challenge_admin_user.add_system_tag(settings.PREREG_ADMIN_TAG)
self.prereg_challenge_admin_user.save()
prereg_schema = MetaSchema.objects.get(name='Prereg Challenge', schema_version=2)
self.user = AuthUserFactory()
self.node = factories.ProjectFactory(creator=self.user)
self.parent = factories.ProjectFactory()
self.child = factories.NodeFactory(parent=self.parent)
self.draft_registration = factories.DraftRegistrationFactory(
initiator=self.user,
registration_schema=prereg_schema,
branched_from=self.parent
)
def test_has_permission_download_prereg_challenge_admin(self):
res = views.check_access(self.draft_registration.branched_from,
Auth(user=self.prereg_challenge_admin_user), 'download', None)
assert_true(res)
def test_has_permission_download_on_component_prereg_challenge_admin(self):
try:
res = views.check_access(self.draft_registration.branched_from._nodes.first(),
Auth(user=self.prereg_challenge_admin_user), 'download', None)
except Exception:
self.fail()
assert_true(res)
def test_has_permission_download_not_prereg_challenge_admin(self):
new_user = AuthUserFactory()
with assert_raises(HTTPError) as exc_info:
views.check_access(self.draft_registration.branched_from,
Auth(user=new_user), 'download', None)
assert_equal(exc_info.exception.code, http.FORBIDDEN)
def test_has_permission_download_prereg_challenge_admin_not_draft(self):
with assert_raises(HTTPError) as exc_info:
views.check_access(self.node,
Auth(user=self.prereg_challenge_admin_user), 'download', None)
assert_equal(exc_info.exception.code, http.FORBIDDEN)
def test_has_permission_write_prereg_challenge_admin(self):
with assert_raises(HTTPError) as exc_info:
views.check_access(self.draft_registration.branched_from,
Auth(user=self.prereg_challenge_admin_user), 'write', None)
assert_equal(exc_info.exception.code, http.FORBIDDEN)
class TestCheckOAuth(OsfTestCase):
def setUp(self):
super(TestCheckOAuth, self).setUp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
def test_has_permission_private_not_authenticated(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=False)
assert_false(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError) as exc_info:
views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_equal(exc_info.exception.code, 403)
def test_has_permission_private_no_scope_forbidden(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {}})
assert_false(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError) as exc_info:
views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_equal(exc_info.exception.code, 403)
def test_has_permission_public_irrelevant_scope_allowed(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=True, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {'osf.users.all_read'}})
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_true(res)
def test_has_permission_private_irrelevant_scope_forbidden(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {'osf.users.all_read'}})
assert_false(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError) as exc_info:
views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_equal(exc_info.exception.code, 403)
def test_has_permission_decommissioned_scope_no_error(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {
'decommissioned.scope+write',
'osf.nodes.data_read',
}})
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_true(res)
def test_has_permission_write_scope_read_action(self):
component_admin = AuthUserFactory()
component = ProjectFactory(creator=component_admin, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {'osf.nodes.data_write'}})
assert_false(component.has_permission(self.user, 'write'))
res = views.check_access(component, Auth(user=self.user), 'download', cas_resp)
assert_true(res)
def test_has_permission_read_scope_write_action_forbidden(self):
component = ProjectFactory(creator=self.user, is_public=False, parent=self.node)
cas_resp = cas.CasResponse(authenticated=True, status=None, user=self.user._id,
attributes={'accessTokenScope': {'osf.nodes.data_read'}})
assert_true(component.has_permission(self.user, 'write'))
with assert_raises(HTTPError) as exc_info:
views.check_access(component, Auth(user=self.user), 'upload', cas_resp)
assert_equal(exc_info.exception.code, 403)
def assert_urls_equal(url1, url2):
furl1 = furl.furl(url1)
furl2 = furl.furl(url2)
for attr in ['scheme', 'host', 'port']:
setattr(furl1, attr, None)
setattr(furl2, attr, None)
# Note: furl params are ordered and cause trouble
assert_equal(dict(furl1.args), dict(furl2.args))
furl1.args = {}
furl2.args = {}
assert_equal(furl1, furl2)
def mock_touch(self, bearer, version=None, revision=None, **kwargs):
if version:
if self.versions:
try:
return self.versions[int(version) - 1]
except (IndexError, ValueError):
return None
else:
return None
return file_models.FileVersion()
@mock.patch('addons.github.models.GithubFileNode.touch', mock_touch)
@mock.patch('addons.github.models.GitHubClient.repo', mock.Mock(side_effect=ApiError))
class TestAddonFileViews(OsfTestCase):
def setUp(self):
super(TestAddonFileViews, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.user.add_addon('github')
self.project.add_addon('github', auth=Auth(self.user))
self.user_addon = self.user.get_addon('github')
self.node_addon = self.project.get_addon('github')
self.oauth = GitHubAccountFactory()
self.oauth.save()
self.user.external_accounts.add(self.oauth)
self.user.save()
self.node_addon.user_settings = self.user_addon
self.node_addon.external_account = self.oauth
self.node_addon.repo = 'Truth'
self.node_addon.user = 'E'
self.node_addon.save()
self.user_addon.oauth_grants[self.project._id] = {self.oauth._id: []}
self.user_addon.save()
def set_sentry(status):
def wrapper(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
enabled, sentry.enabled = sentry.enabled, status
func(*args, **kwargs)
sentry.enabled = enabled
return wrapped
return wrapper
with_sentry = set_sentry(True)
def get_test_file(self):
version = file_models.FileVersion(identifier='1')
version.save()
ret = GithubFile(
name='Test',
node=self.project,
path='/test/Test',
materialized_path='/test/Test',
)
ret.save()
ret.versions.add(version)
return ret
def get_second_test_file(self):
version = file_models.FileVersion(identifier='1')
version.save()
ret = GithubFile(
name='Test2',
node=self.project,
path='/test/Test2',
materialized_path='/test/Test2',
)
ret.save()
ret.versions.add(version)
return ret
def get_mako_return(self):
ret = serialize_node(self.project, Auth(self.user), primary=True)
ret.update({
'error': '',
'provider': '',
'file_path': '',
'sharejs_uuid': '',
'private': '',
'urls': {
'files': '',
'render': '',
'sharejs': '',
'mfr': '',
'gravatar': '',
'external': '',
'archived_from': '',
},
'size': '',
'extra': '',
'file_name': '',
'materialized_path': '',
'file_id': '',
})
ret.update(rubeus.collect_addon_assets(self.project))
return ret
def test_redirects_to_guid(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=file_node.path.strip('/'),
provider='github'
),
auth=self.user.auth
)
assert_equals(resp.status_code, 302)
assert_equals(resp.location, 'http://localhost:80/{}/'.format(guid._id))
def test_action_download_redirects_to_download(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.get('/{}/?action=download'.format(guid._id), auth=self.user.auth)
assert_equals(resp.status_code, 302)
location = furl.furl(resp.location)
assert_urls_equal(location.url, file_node.generate_waterbutler_url(action='download', direct=None, version=''))
def test_action_download_redirects_to_download_with_version(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.get('/{}/?action=download&revision=1'.format(guid._id), auth=self.user.auth)
assert_equals(resp.status_code, 302)
location = furl.furl(resp.location)
# Note: version is added but us but all other url params are added as well
assert_urls_equal(location.url, file_node.generate_waterbutler_url(action='download', direct=None, revision=1, version=''))
@mock.patch('addons.base.views.addon_view_file')
def test_action_view_calls_view_file(self, mock_view_file):
self.user.reload()
self.project.reload()
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
mock_view_file.return_value = self.get_mako_return()
self.app.get('/{}/?action=view'.format(guid._id), auth=self.user.auth)
args, kwargs = mock_view_file.call_args
assert_equals(kwargs, {})
assert_equals(args[0].user._id, self.user._id)
assert_equals(args[1], self.project)
assert_equals(args[2], file_node)
assert_true(isinstance(args[3], file_node.touch(None).__class__))
@mock.patch('addons.base.views.addon_view_file')
def test_no_action_calls_view_file(self, mock_view_file):
self.user.reload()
self.project.reload()
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
mock_view_file.return_value = self.get_mako_return()
self.app.get('/{}/'.format(guid._id), auth=self.user.auth)
args, kwargs = mock_view_file.call_args
assert_equals(kwargs, {})
assert_equals(args[0].user._id, self.user._id)
assert_equals(args[1], self.project)
assert_equals(args[2], file_node)
assert_true(isinstance(args[3], file_node.touch(None).__class__))
def test_download_create_guid(self):
file_node = self.get_test_file()
assert_is(file_node.get_guid(), None)
self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=file_node.path.strip('/'),
provider='github',
),
auth=self.user.auth
)
assert_true(file_node.get_guid())
def test_view_file_does_not_delete_file_when_requesting_invalid_version(self):
with mock.patch('addons.github.models.NodeSettings.is_private',
new_callable=mock.PropertyMock) as mock_is_private:
mock_is_private.return_value = False
file_node = self.get_test_file()
assert_is(file_node.get_guid(), None)
url = self.project.web_url_for(
'addon_view_or_download_file',
path=file_node.path.strip('/'),
provider='github',
)
# First view generated GUID
self.app.get(url, auth=self.user.auth)
self.app.get(url + '?version=invalid', auth=self.user.auth, expect_errors=True)
assert_is_not_none(BaseFileNode.load(file_node._id))
assert_is_none(TrashedFileNode.load(file_node._id))
def test_unauthorized_addons_raise(self):
path = 'cloudfiles'
self.node_addon.user_settings = None
self.node_addon.save()
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=path,
provider='github',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 401)
def test_nonstorage_addons_raise(self):
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path='sillywiki',
provider='wiki',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 400)
def test_head_returns_url_and_redriect(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.head('/{}/'.format(guid._id), auth=self.user.auth)
location = furl.furl(resp.location)
assert_equals(resp.status_code, 302)
assert_urls_equal(location.url, file_node.generate_waterbutler_url(direct=None, version=''))
def test_head_returns_url_with_version_and_redirect(self):
file_node = self.get_test_file()
guid = file_node.get_guid(create=True)
resp = self.app.head('/{}/?revision=1&foo=bar'.format(guid._id), auth=self.user.auth)
location = furl.furl(resp.location)
# Note: version is added but us but all other url params are added as well
assert_equals(resp.status_code, 302)
assert_urls_equal(location.url, file_node.generate_waterbutler_url(direct=None, revision=1, version='', foo='bar'))
def test_nonexistent_addons_raise(self):
path = 'cloudfiles'
self.project.delete_addon('github', Auth(self.user))
self.project.save()
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=path,
provider='github',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 400)
def test_unauth_addons_raise(self):
path = 'cloudfiles'
self.node_addon.user_settings = None
self.node_addon.save()
resp = self.app.get(
self.project.web_url_for(
'addon_view_or_download_file',
path=path,
provider='github',
action='download'
),
auth=self.user.auth,
expect_errors=True
)
assert_equals(resp.status_code, 401)
def test_delete_action_creates_trashed_file_node(self):
file_node = self.get_test_file()
payload = {
'provider': file_node.provider,
'metadata': {
'path': '/test/Test',
'materialized': '/test/Test'
}
}
views.addon_delete_file_node(self=None, node=self.project, user=self.user, event_type='file_removed', payload=payload)
assert_false(GithubFileNode.load(file_node._id))
assert_true(TrashedFileNode.load(file_node._id))
def test_delete_action_for_folder_deletes_subfolders_and_creates_trashed_file_nodes(self):
file_node = self.get_test_file()
subfolder = GithubFolder(
name='folder',
node=self.project,
path='/test/folder/',
materialized_path='/test/folder/',
)
subfolder.save()
payload = {
'provider': file_node.provider,
'metadata': {
'path': '/test/',
'materialized': '/test/'
}
}
views.addon_delete_file_node(self=None, node=self.project, user=self.user, event_type='file_removed', payload=payload)
assert_false(GithubFileNode.load(subfolder._id))
assert_true(TrashedFileNode.load(file_node._id))
@mock.patch('website.archiver.tasks.archive')
def test_archived_from_url(self, mock_archive):
file_node = self.get_test_file()
second_file_node = self.get_second_test_file()
file_node.copied_from = second_file_node
registered_node = self.project.register_node(
schema=get_default_metaschema(),
auth=Auth(self.user),
data=None,
)
archived_from_url = views.get_archived_from_url(registered_node, file_node)
view_url = self.project.web_url_for('addon_view_or_download_file', provider=file_node.provider, path=file_node.copied_from._id)
assert_true(archived_from_url)
assert_urls_equal(archived_from_url, view_url)
@mock.patch('website.archiver.tasks.archive')
def test_archived_from_url_without_copied_from(self, mock_archive):
file_node = self.get_test_file()
registered_node = self.project.register_node(
schema=get_default_metaschema(),
auth=Auth(self.user),
data=None,
)
archived_from_url = views.get_archived_from_url(registered_node, file_node)
assert_false(archived_from_url)
@mock.patch('website.archiver.tasks.archive')
def test_copied_from_id_trashed(self, mock_archive):
file_node = self.get_test_file()
second_file_node = self.get_second_test_file()
file_node.copied_from = second_file_node
self.project.register_node(
schema=get_default_metaschema(),
auth=Auth(self.user),
data=None,
)
trashed_node = second_file_node.delete()
assert_false(trashed_node.copied_from)
@mock.patch('website.archiver.tasks.archive')
def test_missing_modified_date_in_file_data(self, mock_archive):
file_node = self.get_test_file()
file_data = {
'name': 'Test File Update',
'materialized': file_node.materialized_path,
'modified': None
}
file_node.update(revision=None, data=file_data)
assert_equal(len(file_node.history), 1)
assert_equal(file_node.history[0], file_data)
@mock.patch('website.archiver.tasks.archive')
def test_missing_modified_date_in_file_history(self, mock_archive):
file_node = self.get_test_file()
file_node.history.append({'modified': None})
file_data = {
'name': 'Test File Update',
'materialized': file_node.materialized_path,
'modified': None
}
file_node.update(revision=None, data=file_data)
assert_equal(len(file_node.history), 2)
assert_equal(file_node.history[1], file_data)
@with_sentry
@mock.patch('framework.sentry.sentry.captureMessage')
def test_update_logs_to_sentry_when_called_with_disordered_metadata(self, mock_capture):
file_node = self.get_test_file()
file_node.history.append({'modified': parse_date(
'2017-08-22T13:54:32.100900',
ignoretz=True,
default=timezone.now() # Just incase nothing can be parsed
)})
data = {
'name': 'a name',
'materialized': 'materialized',
'modified': '2016-08-22T13:54:32.100900'
}
file_node.update(revision=None, user=None, data=data)
mock_capture.assert_called_with(unicode('update() receives metatdata older than the newest entry in file history.'), extra={'session': {}})
class TestLegacyViews(OsfTestCase):
def setUp(self):
super(TestLegacyViews, self).setUp()
self.path = 'mercury.png'
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user)
self.node_addon = self.project.get_addon('osfstorage')
file_record = self.node_addon.get_root().append_file(self.path)
self.expected_path = file_record._id
self.node_addon.save()
file_record.save()
def test_view_file_redirect(self):
url = '/{0}/osffiles/{1}/'.format(self.project._id, self.path)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
action='view',
path=self.expected_path,
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_download_file_redirect(self):
url = '/{0}/osffiles/{1}/download/'.format(self.project._id, self.path)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_download_file_version_redirect(self):
url = '/{0}/osffiles/{1}/version/3/download/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
version=3,
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_api_download_file_redirect(self):
url = '/api/v1/project/{0}/osffiles/{1}/'.format(self.project._id, self.path)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_api_download_file_version_redirect(self):
url = '/api/v1/project/{0}/osffiles/{1}/version/3/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
version=3,
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_no_provider_name(self):
url = '/{0}/files/{1}'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
action='view',
path=self.expected_path,
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_action_as_param(self):
url = '/{}/osfstorage/files/{}/?action=download'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.expected_path,
action='download',
provider='osfstorage',
)
assert_urls_equal(res.location, expected_url)
def test_other_addon_redirect(self):
url = '/project/{0}/mycooladdon/files/{1}/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
action='view',
path=self.path,
provider='mycooladdon',
)
assert_urls_equal(res.location, expected_url)
def test_other_addon_redirect_download(self):
url = '/project/{0}/mycooladdon/files/{1}/download/'.format(
self.project._id,
self.path,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 301)
expected_url = self.project.web_url_for(
'addon_view_or_download_file',
path=self.path,
action='download',
provider='mycooladdon',
)
assert_urls_equal(res.location, expected_url)
|
|
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import testing_config # Must be imported before the module under test.
from unittest import mock
import os
import flask
import werkzeug
import html5lib
from pages import intentpreview
from internals import models
test_app = flask.Flask(__name__)
class IntentEmailPreviewHandlerTest(testing_config.CustomTestCase):
def setUp(self):
self.feature_1 = models.Feature(
name='feature one', summary='sum', category=1, visibility=1,
standardization=1, web_dev_views=1, impl_status_chrome=1,
intent_stage=models.INTENT_IMPLEMENT)
self.feature_1.put()
self.request_path = '/admin/features/launch/%d/%d?intent' % (
models.INTENT_SHIP, self.feature_1.key.integer_id())
self.handler = intentpreview.IntentEmailPreviewHandler()
def tearDown(self):
self.feature_1.key.delete()
def test_get__anon(self):
"""Anon cannot view this preview features, gets redirected to login."""
testing_config.sign_out()
feature_id = self.feature_1.key.integer_id()
with test_app.test_request_context(self.request_path):
actual_response = self.handler.get_template_data(feature_id=feature_id)
self.assertEqual('302 FOUND', actual_response.status)
def test_get__no_existing(self):
"""Trying to view a feature that does not exist gives a 404."""
testing_config.sign_in('[email protected]', 123567890)
bad_feature_id = self.feature_1.key.integer_id() + 1
with test_app.test_request_context(self.request_path):
with self.assertRaises(werkzeug.exceptions.NotFound):
self.handler.get_template_data(feature_id=bad_feature_id)
def test_get__no_stage_specified(self):
"""Allowed user can preview intent email for a feature using an old URL."""
request_path = (
'/admin/features/launch/%d?intent' % self.feature_1.key.integer_id())
testing_config.sign_in('[email protected]', 123567890)
feature_id = self.feature_1.key.integer_id()
with test_app.test_request_context(self.request_path):
actual_data = self.handler.get_template_data(feature_id=feature_id)
self.assertIn('feature', actual_data)
self.assertEqual('feature one', actual_data['feature']['name'])
def test_get__normal(self):
"""Allowed user can preview intent email for a feature."""
testing_config.sign_in('[email protected]', 123567890)
feature_id = self.feature_1.key.integer_id()
with test_app.test_request_context(self.request_path):
actual_data = self.handler.get_template_data(feature_id=feature_id)
self.assertIn('feature', actual_data)
self.assertEqual('feature one', actual_data['feature']['name'])
def test_get_page_data(self):
"""page_data has correct values."""
feature_id = self.feature_1.key.integer_id()
with test_app.test_request_context(self.request_path):
page_data = self.handler.get_page_data(
feature_id, self.feature_1, models.INTENT_IMPLEMENT)
self.assertEqual(
'http://localhost/feature/%d' % feature_id,
page_data['default_url'])
self.assertEqual(
['motivation'],
page_data['sections_to_show'])
self.assertEqual(
'Intent to Prototype',
page_data['subject_prefix'])
def test_compute_subject_prefix__incubate_new_feature(self):
"""We offer users the correct subject line for each intent stage."""
self.assertEqual(
'Intent stage "None"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_NONE))
self.assertEqual(
'Intent stage "Start incubating"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_INCUBATE))
self.assertEqual(
'Intent to Prototype',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_IMPLEMENT))
self.assertEqual(
'Ready for Trial',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_EXPERIMENT))
self.assertEqual(
'Intent stage "Evaluate readiness to ship"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_IMPLEMENT_SHIP))
self.assertEqual(
'Intent to Experiment',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_EXTEND_TRIAL))
self.assertEqual(
'Intent to Ship',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_SHIP))
self.assertEqual(
'Intent to Extend Deprecation Trial',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_REMOVED))
self.assertEqual(
'Intent stage "Shipped"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_SHIPPED))
self.assertEqual(
'Intent stage "Parked"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_PARKED))
def test_compute_subject_prefix__deprecate_feature(self):
"""We offer users the correct subject line for each intent stage."""
self.feature_1.feature_type = models.FEATURE_TYPE_DEPRECATION_ID
self.assertEqual(
'Intent stage "None"',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_NONE))
self.assertEqual(
'Intent to Deprecate and Remove',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_INCUBATE))
self.assertEqual(
'Request for Deprecation Trial',
self.handler.compute_subject_prefix(
self.feature_1, models.INTENT_EXTEND_TRIAL))
class IntentEmailPreviewTemplateTest(testing_config.CustomTestCase):
HANDLER_CLASS = intentpreview.IntentEmailPreviewHandler
def setUp(self):
super(IntentEmailPreviewTemplateTest, self).setUp()
self.feature_1 = models.Feature(
name='feature one', summary='sum', category=1, visibility=1,
standardization=1, web_dev_views=1, impl_status_chrome=1,
intent_stage=models.INTENT_IMPLEMENT)
self.feature_1.put()
self.request_path = '/admin/features/launch/%d/%d?intent' % (
models.INTENT_SHIP, self.feature_1.key.integer_id())
self.handler = self.HANDLER_CLASS()
self.feature_id = self.feature_1.key.integer_id()
with test_app.test_request_context(self.request_path):
self.template_data = self.handler.get_template_data(
self.feature_id)
page_data = self.handler.get_page_data(
self.feature_id, self.feature_1, models.INTENT_IMPLEMENT)
self.template_data.update(page_data)
self.template_data['nonce'] = 'fake nonce'
template_path = self.handler.get_template_path(self.template_data)
self.full_template_path = os.path.join(template_path)
def test_html_rendering(self):
"""We can render the template with valid html."""
testing_config.sign_in('[email protected]', 123567890)
with test_app.test_request_context(self.request_path):
actual_data = self.handler.get_template_data(feature_id=self.feature_id)
template_text = self.handler.render(
actual_data, self.full_template_path)
parser = html5lib.HTMLParser(strict=True)
document = parser.parse(template_text)
|
|
#!/usr/bin/env python2
"""Display a string of letters to the subject, then ask them whether a certain letter
is at a certain position in the sequence. On some trials, ask them to alphabetize the
letters in their head."""
# LetterOrderTask_d1.py
# Created 10/05/17 by DJ based on SampleExperiment_d1.py
# Updated 10/24/17 by DJ - fixed basename, random doubles
from psychopy import core, gui, data, event, sound, logging
# from psychopy import visual # visual causes a bug in the guis, so it's declared after all GUIs run.
from psychopy.tools.filetools import fromFile, toFile # saving and loading parameter files
import time as ts, numpy as np # for timing and array operations
import AppKit, os, glob # for monitor size detection, files
import BasicPromptTools # for loading/presenting prompts and questions
import random, string # for randomization of trials, letters
# ====================== #
# ===== PARAMETERS ===== #
# ====================== #
# Save the parameters declared below?
saveParams = True;
newParamsFilename = 'LetterOrderParams.pickle'
# Declare primary task parameters.
params = {
# Declare stimulus and response parameters
'nTrials': 10, # number of trials in this session
'nLetters': 5, # number of letters in the string
'stringDur': 2.5, # time string is on screen (sec)
'pauseDur': 1, # time between string and cue (sec)
'cueDur': 0.5, # time instructions (remember/alphabetize) are on screen (sec)
'minDelayDur': 12, # minimum duration of cue-resp delay (seconds)
'maxDelayDur': 18, # maximum duration of cue-resp delay (seconds)
'testDur': 1, # time when test stimulus is presented (in seconds)
'minISI': 5, # min time between when one stimulus disappears and the next appears (in seconds)
'maxISI': 10, # max time between when one stimulus disappears and the next appears (in seconds)
'tStartup': 10, # pause time before starting first stimulus
'tCoolDown': 10, # pause time after end of last stimulus before "the end" text
'triggerKey': 't', # key from scanner that says scan is starting
'respKeys': ['1','2'], # keys to be used for responses (mapped to y,n)
'cues':['REMEMBER','ALPHABETIZE'], # strings of instructions for each condition (remember, alphabetize)
'letters': string.ascii_uppercase, # letters that could be in the string (all uppercase letters)
'rememberProb': 0.5, # probability of a given trial being a 'remember' trial
'trueProb': 0.5, # probability of a given trial having a 'yes'/'true' response
'respAdvances': False, # should response make stimulus disappear?
# declare prompt and question files
'skipPrompts': False, # go right to the scanner-wait page
'promptDir': 'Prompts/', # directory containing prompts and questions files
'promptFile': 'LetterOrderPrompts.txt', # Name of text file containing prompts
# declare display parameters
'fullScreen': True, # run in full screen mode?
'screenToShow': 0, # display on primary screen (0) or secondary (1)?
'fixCrossSize': 50, # size of cross, in pixels
'fixCrossPos': [0,0], # (x,y) pos of fixation cross displayed before each stimulus (for gaze drift correction)
'screenColor':(128,128,128) # in rgb255 space: (r,g,b) all between 0 and 255
}
# save parameters
if saveParams:
dlgResult = gui.fileSaveDlg(prompt='Save Params...',initFilePath = os.getcwd() + '/Params', initFileName = newParamsFilename,
allowed="PICKLE files (.pickle)|.pickle|All files (.*)|")
newParamsFilename = dlgResult
if newParamsFilename is None: # keep going, but don't save
saveParams = False
else:
toFile(newParamsFilename, params) # save it!
# ========================== #
# ===== SET UP LOGGING ===== #
# ========================== #
scriptName = os.path.basename(__file__)
scriptName = os.path.splitext(scriptName)[0] # remove extension
try: # try to get a previous parameters file
expInfo = fromFile('%s-lastExpInfo.pickle'%scriptName)
expInfo['session'] +=1 # automatically increment session number
expInfo['paramsFile'] = [expInfo['paramsFile'],'Load...']
except: # if not there then use a default set
expInfo = {
'subject':'1',
'session': 1,
'skipPrompts':False,
'paramsFile':['DEFAULT','Load...']}
# overwrite params struct if you just saved a new parameter set
if saveParams:
expInfo['paramsFile'] = [newParamsFilename,'Load...']
#present a dialogue to change select params
dlg = gui.DlgFromDict(expInfo, title=scriptName, order=['subject','session','skipPrompts','paramsFile'])
if not dlg.OK:
core.quit() # the user hit cancel, so exit
# find parameter file
if expInfo['paramsFile'] == 'Load...':
dlgResult = gui.fileOpenDlg(prompt='Select parameters file',tryFilePath=os.getcwd(),
allowed="PICKLE files (.pickle)|.pickle|All files (.*)|")
expInfo['paramsFile'] = dlgResult[0]
# load parameter file
if expInfo['paramsFile'] not in ['DEFAULT', None]: # otherwise, just use defaults.
# load params file
params = fromFile(expInfo['paramsFile'])
# transfer skipPrompts from expInfo (gui input) to params (logged parameters)
params['skipPrompts'] = expInfo['skipPrompts']
# print params to Output
print 'params = {'
for key in sorted(params.keys()):
print " '%s': %s"%(key,params[key]) # print each value as-is (no quotes)
print '}'
# save experimental info
toFile('%s-lastExpInfo.pickle'%scriptName, expInfo)#save params to file for next time
#make a log file to save parameter/event data
dateStr = ts.strftime("%b_%d_%H%M", ts.localtime()) # add the current time
filename = '%s-%s-%d-%s'%(scriptName,expInfo['subject'], expInfo['session'], dateStr) # log filename
logging.LogFile((filename+'.log'), level=logging.INFO)#, mode='w') # w=overwrite
logging.log(level=logging.INFO, msg='---START PARAMETERS---')
logging.log(level=logging.INFO, msg='filename: %s'%filename)
logging.log(level=logging.INFO, msg='subject: %s'%expInfo['subject'])
logging.log(level=logging.INFO, msg='session: %s'%expInfo['session'])
logging.log(level=logging.INFO, msg='date: %s'%dateStr)
# log everything in the params struct
for key in sorted(params.keys()): # in alphabetical order
logging.log(level=logging.INFO, msg='%s: %s'%(key,params[key])) # log each parameter
logging.log(level=logging.INFO, msg='---END PARAMETERS---')
# ========================== #
# ===== GET SCREEN RES ===== #
# ========================== #
# kluge for secondary monitor
if params['fullScreen']:
screens = AppKit.NSScreen.screens()
screenRes = (int(screens[params['screenToShow']].frame().size.width), int(screens[params['screenToShow']].frame().size.height))
# screenRes = [1920, 1200]
if params['screenToShow']>0:
params['fullScreen'] = False
else:
screenRes = [800,600]
print "screenRes = [%d,%d]"%screenRes
# ========================== #
# ===== SET UP STIMULI ===== #
# ========================== #
from psychopy import visual
# Initialize deadline for displaying next frame
tNextFlip = [0.0] # put in a list to make it mutable (weird quirk of python variables)
#create clocks and window
globalClock = core.Clock()#to keep track of time
trialClock = core.Clock()#to keep track of time
win = visual.Window(screenRes, fullscr=params['fullScreen'], allowGUI=False, monitor='testMonitor', screen=params['screenToShow'], units='deg', name='win',color=params['screenColor'],colorSpace='rgb255')
# create fixation cross
fCS = params['fixCrossSize'] # size (for brevity)
fCP = params['fixCrossPos'] # position (for brevity)
fixation = visual.ShapeStim(win,lineColor='#000000',lineWidth=3.0,vertices=((fCP[0]-fCS/2,fCP[1]),(fCP[0]+fCS/2,fCP[1]),(fCP[0],fCP[1]),(fCP[0],fCP[1]+fCS/2),(fCP[0],fCP[1]-fCS/2)),units='pix',closeShape=False,name='fixCross');
# create text stimuli
message1 = visual.TextStim(win, pos=[0,+.5], wrapWidth=1.5, color='#000000', alignHoriz='center', name='topMsg', text="aaa",units='norm')
message2 = visual.TextStim(win, pos=[0,-.5], wrapWidth=1.5, color='#000000', alignHoriz='center', name='bottomMsg', text="bbb",units='norm')
# draw stimuli
mainText = visual.TextStim(win, pos=[0,-0], wrapWidth=3.5, color='#000000', alignHoriz='center', name='mainText', text="bbb",units='norm')
# read prompts from text files
[topPrompts,bottomPrompts] = BasicPromptTools.ParsePromptFile(params['promptDir']+params['promptFile'])
print('%d prompts loaded from %s'%(len(topPrompts),params['promptFile']))
# ============================ #
# ======= SUBFUNCTIONS ======= #
# ============================ #
# increment time of next window flip
def AddToFlipTime(tIncrement=1.0):
tNextFlip[0] += tIncrement
# flip window as soon as possible
def SetFlipTimeToNow():
tNextFlip[0] = globalClock.getTime()
def RunTrial(iTrial):
# Flush the key buffer and mouse movements
event.clearEvents()
# Decide Trial Params
isRememberTrial = random.random()<params['rememberProb']
isTrueTrial = random.random()<params['trueProb']
delayDur = random.uniform(params['minDelayDur'], params['maxDelayDur'])
if iTrial<params['nTrials']:
ISI = random.uniform(params['minISI'], params['maxISI'])
else:
ISI = params['tCoolDown']
startLetters = random.sample(params['letters'], params['nLetters']) # get list of letters to present to subject
startString = ''.join(startLetters) # turn them into a string
testLoc = random.randint(0,params['nLetters']-1) # the letter to test the subject on
if isRememberTrial:
if isTrueTrial:
testLetter = startLetters[testLoc];
else:
testLetter = random.choice(startLetters[:testLoc] + startLetters[testLoc+1:])
else: # alphabetize!
sortedLetters = sorted(startLetters)
if isTrueTrial:
testLetter = sortedLetters[testLoc]
else:
testLetter = random.choice(sortedLetters[:testLoc] + sortedLetters[testLoc+1:])
testString = '%s=%d?'%(testLetter,testLoc+1) # add 1 for 1-based numbering
# display info to experimenter
print('Trial %d: isRemember = %d, delayDur = %.1f, ISI = %.1f, startString = %s, testString = %s, isTrueTrial = %d'%(iTrial,isRememberTrial,delayDur,ISI,startString,testString, isTrueTrial))
logging.log(level=logging.EXP, msg='Trial %d: isRemember = %d, delayDur = %.1f, ISI = %.1f, startString = %s, testString = %s, isTrueTrial = %d'%(iTrial,isRememberTrial,delayDur,ISI,startString,testString, isTrueTrial))
# Draw letters
mainText.setText(startString)
mainText.draw()
win.logOnFlip(level=logging.EXP, msg='Display string (%s)'%startString)
# Wait until it's time to display
while (globalClock.getTime()<tNextFlip[0]):
pass
# log & flip window to display image
win.flip()
tStringStart = globalClock.getTime() # record time when window flipped
# set up next win flip time after this one
AddToFlipTime(params['stringDur']) # add to tNextFlip[0]
# Draw fixation (PAUSE)
fixation.draw()
win.logOnFlip(level=logging.EXP, msg='Display fixation (pause)')
# Wait until it's time to display
while (globalClock.getTime()<tNextFlip[0]):
pass
# log & flip window to display image
win.flip()
# set up next win flip time after this one
AddToFlipTime(params['pauseDur']) # add to tNextFlip[0]
# Draw cue
if isRememberTrial:
mainText.setText(params['cues'][0])
win.logOnFlip(level=logging.EXP, msg='Display cue (%s)'%params['cues'][0])
else:
mainText.setText(params['cues'][1])
win.logOnFlip(level=logging.EXP, msg='Display cue (%s)'%params['cues'][1])
mainText.draw()
# Wait until it's time to display
while (globalClock.getTime()<tNextFlip[0]):
pass
# log & flip window to display image
win.flip()
tStimStart = globalClock.getTime() # record time when window flipped
# set up next win flip time after this one
AddToFlipTime(params['cueDur']) # add to tNextFlip[0]
# Draw fixation (DELAY)
fixation.draw()
win.logOnFlip(level=logging.EXP, msg='Display fixation (delay)')
# Wait until it's time to display
while (globalClock.getTime()<tNextFlip[0]):
pass
# log & flip window to display image
win.flip()
# set up next win flip time after this one
AddToFlipTime(delayDur) # add to tNextFlip[0]
# Draw letters (test
mainText.setText(testString)
mainText.draw()
win.logOnFlip(level=logging.EXP, msg='Display test stim (%s)'%testString)
# Wait until it's time to display
while (globalClock.getTime()<tNextFlip[0]):
pass
# log & flip window to display image
win.flip()
# set up next win flip time after this one
AddToFlipTime(params['testDur']) # add to tNextFlip[0]
# Wait for relevant key press or 'stimDur' seconds
respKey = None
while (globalClock.getTime()<tNextFlip[0]): # until it's time for the next frame
# get new keys
newKeys = event.getKeys(keyList=params['respKeys']+['q','escape'],timeStamped=globalClock)
# check each keypress for escape or response keys
if len(newKeys)>0:
for thisKey in newKeys:
if thisKey[0] in ['q','escape']: # escape keys
CoolDown() # exit gracefully
elif thisKey[0] in params['respKeys'] and respKey == None: # only take first keypress
respKey = thisKey # record keypress
# Display the fixation cross
if ISI>0:# if there should be a fixation cross
fixation.draw() # draw it
win.logOnFlip(level=logging.EXP, msg='Display fixation (ISI)')
win.flip()
AddToFlipTime(ISI)
return
# Handle end of a session
def CoolDown():
# display cool-down message
message1.setText("That's the end! ")
message2.setText("Press 'q' or 'escape' to end the session.")
win.logOnFlip(level=logging.EXP, msg='Display TheEnd')
message1.draw()
message2.draw()
win.flip()
thisKey = event.waitKeys(keyList=['q','escape'])
# exit
core.quit()
# =========================== #
# ======= RUN PROMPTS ======= #
# =========================== #
# display prompts
if not params['skipPrompts']:
BasicPromptTools.RunPrompts(topPrompts,bottomPrompts,win,message1,message2)
# wait for scanner
message1.setText("Waiting for scanner to start...")
message2.setText("(Press '%c' to override.)"%params['triggerKey'].upper())
message1.draw()
message2.draw()
win.logOnFlip(level=logging.EXP, msg='Display WaitingForScanner')
win.flip()
event.waitKeys(keyList=params['triggerKey'])
tStartSession = globalClock.getTime()
AddToFlipTime(tStartSession+params['tStartup'])
# wait before first stimulus
fixation.draw()
win.logOnFlip(level=logging.EXP, msg='Display fixation')
win.flip()
# =========================== #
# ===== MAIN EXPERIMENT ===== #
# =========================== #
# log experiment start and set up
logging.log(level=logging.EXP, msg='---START EXPERIMENT---')
# run trials
for iTrial in range(0,params['nTrials']):
# display text
RunTrial(iTrial)
if iTrial == params['nTrials']:
AddToFlipTime(params['tCoolDown'])
# wait before 'the end' text
fixation.draw()
win.flip()
while (globalClock.getTime()<tNextFlip[0]):
pass
# Log end of experiment
logging.log(level=logging.EXP, msg='--- END EXPERIMENT ---')
# exit experiment
CoolDown()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import time
from common import chrome_proxy_metrics
from common import network_metrics
from common.chrome_proxy_metrics import ChromeProxyMetricException
from telemetry.page import page_test
from telemetry.value import scalar
from metrics import Metric
class ChromeProxyMetric(network_metrics.NetworkMetric):
"""A Chrome proxy timeline metric."""
def __init__(self):
super(ChromeProxyMetric, self).__init__()
self.compute_data_saving = True
def SetEvents(self, events):
"""Used for unittest."""
self._events = events
def ResponseFromEvent(self, event):
return chrome_proxy_metrics.ChromeProxyResponse(event)
def AddResults(self, tab, results):
raise NotImplementedError
def AddResultsForDataSaving(self, tab, results):
resources_via_proxy = 0
resources_from_cache = 0
resources_direct = 0
super(ChromeProxyMetric, self).AddResults(tab, results)
for resp in self.IterResponses(tab):
if resp.response.served_from_cache:
resources_from_cache += 1
if resp.HasChromeProxyViaHeader():
resources_via_proxy += 1
else:
resources_direct += 1
if resources_from_cache + resources_via_proxy + resources_direct == 0:
raise ChromeProxyMetricException, (
'Expected at least one response, but zero responses were received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'resources_via_proxy', 'count',
resources_via_proxy))
results.AddValue(scalar.ScalarValue(
results.current_page, 'resources_from_cache', 'count',
resources_from_cache))
results.AddValue(scalar.ScalarValue(
results.current_page, 'resources_direct', 'count', resources_direct))
def AddResultsForHeaderValidation(self, tab, results):
via_count = 0
for resp in self.IterResponses(tab):
if resp.IsValidByViaHeader():
via_count += 1
else:
r = resp.response
raise ChromeProxyMetricException, (
'%s: Via header (%s) is not valid (refer=%s, status=%d)' % (
r.url, r.GetHeader('Via'), r.GetHeader('Referer'), r.status))
if via_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one response through the proxy, but zero such '
'responses were received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'checked_via_header', 'count', via_count))
def AddResultsForLatency(self, tab, results):
# TODO(bustamante): This is a hack to workaround crbug.com/467174,
# once fixed just pull down window.performance.timing object and
# reference that everywhere.
load_event_start = tab.EvaluateJavaScript(
'window.performance.timing.loadEventStart')
navigation_start = tab.EvaluateJavaScript(
'window.performance.timing.navigationStart')
dom_content_loaded_event_start = tab.EvaluateJavaScript(
'window.performance.timing.domContentLoadedEventStart')
fetch_start = tab.EvaluateJavaScript(
'window.performance.timing.fetchStart')
request_start = tab.EvaluateJavaScript(
'window.performance.timing.requestStart')
domain_lookup_end = tab.EvaluateJavaScript(
'window.performance.timing.domainLookupEnd')
domain_lookup_start = tab.EvaluateJavaScript(
'window.performance.timing.domainLookupStart')
connect_end = tab.EvaluateJavaScript(
'window.performance.timing.connectEnd')
connect_start = tab.EvaluateJavaScript(
'window.performance.timing.connectStart')
response_end = tab.EvaluateJavaScript(
'window.performance.timing.responseEnd')
response_start = tab.EvaluateJavaScript(
'window.performance.timing.responseStart')
# NavigationStart relative markers in milliseconds.
load_start = (float(load_event_start) - navigation_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'load_start', 'ms', load_start))
dom_content_loaded_start = (
float(dom_content_loaded_event_start) - navigation_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'dom_content_loaded_start', 'ms',
dom_content_loaded_start))
fetch_start = (float(fetch_start) - navigation_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'fetch_start', 'ms', fetch_start,
important=False))
request_start = (float(request_start) - navigation_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'request_start', 'ms', request_start,
important=False))
response_start = (float(response_start) - navigation_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'response_start', 'ms', response_start,
important=False))
response_end = (float(response_end) - navigation_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'response_end', 'ms', response_end,
important=False))
# Phase measurements in milliseconds.
domain_lookup_duration = (float(domain_lookup_end) - domain_lookup_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'domain_lookup_duration', 'ms',
domain_lookup_duration, important=False))
connect_duration = (float(connect_end) - connect_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'connect_duration', 'ms', connect_duration,
important=False))
request_duration = (float(response_start) - request_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'request_duration', 'ms', request_duration,
important=False))
response_duration = (float(response_end) - response_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'response_duration', 'ms', response_duration,
important=False))
def AddResultsForExtraViaHeader(self, tab, results, extra_via_header):
extra_via_count = 0
for resp in self.IterResponses(tab):
if resp.HasChromeProxyViaHeader():
if resp.HasExtraViaHeader(extra_via_header):
extra_via_count += 1
else:
raise ChromeProxyMetricException, (
'%s: Should have via header %s.' % (resp.response.url,
extra_via_header))
results.AddValue(scalar.ScalarValue(
results.current_page, 'extra_via_header', 'count', extra_via_count))
def GetClientTypeFromRequests(self, tab):
"""Get the Chrome-Proxy client type value from requests made in this tab.
Returns:
The client type value from the first request made in this tab that
specifies a client type in the Chrome-Proxy request header. See
ChromeProxyResponse.GetChromeProxyClientType for more details about the
Chrome-Proxy client type. Returns None if none of the requests made in
this tab specify a client type.
"""
for resp in self.IterResponses(tab):
client_type = resp.GetChromeProxyClientType()
if client_type:
return client_type
return None
def AddResultsForClientType(self, tab, results, client_type,
bypass_for_client_type):
via_count = 0
bypass_count = 0
for resp in self.IterResponses(tab):
if resp.HasChromeProxyViaHeader():
via_count += 1
if client_type.lower() == bypass_for_client_type.lower():
raise ChromeProxyMetricException, (
'%s: Response for client of type "%s" has via header, but should '
'be bypassed.' % (resp.response.url, bypass_for_client_type))
elif resp.ShouldHaveChromeProxyViaHeader():
bypass_count += 1
if client_type.lower() != bypass_for_client_type.lower():
raise ChromeProxyMetricException, (
'%s: Response missing via header. Only "%s" clients should '
'bypass for this page, but this client is "%s".' % (
resp.response.url, bypass_for_client_type, client_type))
if via_count + bypass_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one response that was eligible to be proxied, but '
'zero such responses were received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'via', 'count', via_count))
results.AddValue(scalar.ScalarValue(
results.current_page, 'bypass', 'count', bypass_count))
def AddResultsForLoFi(self, tab, results):
lo_fi_request_count = 0
lo_fi_response_count = 0
for resp in self.IterResponses(tab):
if 'favicon.ico' in resp.response.url:
continue
if resp.HasChromeProxyLoFiRequest():
lo_fi_request_count += 1
else:
raise ChromeProxyMetricException, (
'%s: LoFi not in request header.' % (resp.response.url))
if resp.HasChromeProxyLoFiResponse():
lo_fi_response_count += 1
else:
raise ChromeProxyMetricException, (
'%s: LoFi not in response header.' % (resp.response.url))
if resp.content_length > 100:
raise ChromeProxyMetricException, (
'Image %s is %d bytes. Expecting less than 100 bytes.' %
(resp.response.url, resp.content_length))
if lo_fi_request_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one LoFi request, but zero such requests were '
'sent.')
if lo_fi_response_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one LoFi response, but zero such responses were '
'received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'lo_fi_request', 'count', lo_fi_request_count))
results.AddValue(scalar.ScalarValue(
results.current_page, 'lo_fi_response', 'count', lo_fi_response_count))
super(ChromeProxyMetric, self).AddResults(tab, results)
def AddResultsForLoFiPreview(self, tab, results):
lo_fi_preview_request_count = 0
lo_fi_preview_exp_request_count = 0
lo_fi_preview_response_count = 0
for resp in self.IterResponses(tab):
if '/csi?' in resp.response.url:
continue
if 'favicon.ico' in resp.response.url:
continue
if resp.response.url.startswith('data:'):
continue
if resp.HasChromeProxyLoFiPreviewRequest():
lo_fi_preview_request_count += 1
if resp.HasChromeProxyLoFiPreviewExpRequest():
lo_fi_preview_exp_request_count += 1
if resp.HasChromeProxyLoFiPreviewResponse():
lo_fi_preview_response_count += 1
if resp.HasChromeProxyLoFiRequest():
raise ChromeProxyMetricException, (
'%s: Lo-Fi directive should not be in preview request header.' %
(resp.response.url))
if lo_fi_preview_request_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one LoFi preview request, but zero such requests '
'were sent.')
if lo_fi_preview_exp_request_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one LoFi preview exp=ignore_preview_blacklist '
'request, but zero such requests were sent.')
if lo_fi_preview_response_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one LoFi preview response, but zero such '
'responses were received.')
results.AddValue(
scalar.ScalarValue(
results.current_page, 'lo_fi_preview_request',
'count', lo_fi_preview_request_count))
results.AddValue(
scalar.ScalarValue(
results.current_page, 'lo_fi_preview_exp_request',
'count', lo_fi_preview_exp_request_count))
results.AddValue(
scalar.ScalarValue(
results.current_page, 'lo_fi_preview_response',
'count', lo_fi_preview_response_count))
super(ChromeProxyMetric, self).AddResults(tab, results)
def AddResultsForPassThrough(self, tab, results):
compressed_count = 0
compressed_size = 0
pass_through_count = 0
pass_through_size = 0
for resp in self.IterResponses(tab):
if 'favicon.ico' in resp.response.url:
continue
if not resp.HasChromeProxyViaHeader():
r = resp.response
raise ChromeProxyMetricException, (
'%s: Should have Via header (%s) (refer=%s, status=%d)' % (
r.url, r.GetHeader('Via'), r.GetHeader('Referer'), r.status))
if resp.HasChromeProxyPassThroughRequest():
pass_through_count += 1
pass_through_size = resp.content_length
else:
compressed_count += 1
compressed_size = resp.content_length
if pass_through_count != 1:
raise ChromeProxyMetricException, (
'Expected exactly one Chrome-Proxy pass-through request, but %d '
'such requests were sent.' % (pass_through_count))
if compressed_count != 1:
raise ChromeProxyMetricException, (
'Expected exactly one compressed request, but %d such requests were '
'received.' % (compressed_count))
if compressed_size >= pass_through_size:
raise ChromeProxyMetricException, (
'Compressed image is %d bytes and pass-through image is %d. '
'Expecting compressed image size to be less than pass-through '
'image.' % (compressed_size, pass_through_size))
results.AddValue(scalar.ScalarValue(
results.current_page, 'compressed', 'count', compressed_count))
results.AddValue(scalar.ScalarValue(
results.current_page, 'compressed_size', 'bytes', compressed_size))
results.AddValue(scalar.ScalarValue(
results.current_page, 'pass_through', 'count', pass_through_count))
results.AddValue(scalar.ScalarValue(
results.current_page, 'pass_through_size', 'bytes', pass_through_size))
def AddResultsForHTTPSBypass(self, tab, results):
bypass_count = 0
for resp in self.IterResponses(tab):
# Only check https url's
if "https://" not in resp.response.url:
continue
# If a Chrome Proxy Via appears fail the test
if resp.HasChromeProxyViaHeader():
r = resp.response
raise ChromeProxyMetricException, (
'%s: Should not have Via header (%s) (refer=%s, status=%d)' % (
r.url, r.GetHeader('Via'), r.GetHeader('Referer'), r.status))
bypass_count += 1
if bypass_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one https response was expected, but zero such '
'responses were received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'bypass', 'count', bypass_count))
def AddResultsForHTML5Test(self, tab, results):
# Wait for the number of "points" of HTML5 compatibility to appear to verify
# the HTML5 elements have loaded successfully.
tab.WaitForJavaScriptExpression(
'document.getElementsByClassName("pointsPanel")', 15)
def AddResultsForYouTube(self, tab, results):
# Wait for the video to begin playing.
tab.WaitForJavaScriptExpression(
'window.playerState == YT.PlayerState.PLAYING', 30)
def AddResultsForBypass(self, tab, results, url_pattern=""):
bypass_count = 0
skipped_count = 0
for resp in self.IterResponses(tab):
# Only check the url's that contain the specified pattern.
if url_pattern and url_pattern not in resp.response.url:
skipped_count += 1
continue
if resp.HasChromeProxyViaHeader():
r = resp.response
raise ChromeProxyMetricException, (
'%s: Should not have Via header (%s) (refer=%s, status=%d)' % (
r.url, r.GetHeader('Via'), r.GetHeader('Referer'), r.status))
bypass_count += 1
if bypass_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one response to be bypassed, but zero such '
'responses were received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'bypass', 'count', bypass_count))
results.AddValue(scalar.ScalarValue(
results.current_page, 'skipped', 'count', skipped_count))
def AddResultsForCorsBypass(self, tab, results):
eligible_response_count = 0
bypass_count = 0
bypasses = {}
for resp in self.IterResponses(tab):
logging.warn('got a resource %s' % (resp.response.url))
for resp in self.IterResponses(tab):
if resp.ShouldHaveChromeProxyViaHeader():
eligible_response_count += 1
if not resp.HasChromeProxyViaHeader():
bypass_count += 1
elif resp.response.status == 502:
bypasses[resp.response.url] = 0
for resp in self.IterResponses(tab):
if resp.ShouldHaveChromeProxyViaHeader():
if not resp.HasChromeProxyViaHeader():
if resp.response.status == 200:
if (bypasses.has_key(resp.response.url)):
bypasses[resp.response.url] = bypasses[resp.response.url] + 1
for url in bypasses:
if bypasses[url] == 0:
raise ChromeProxyMetricException, (
'%s: Got a 502 without a subsequent 200' % (url))
elif bypasses[url] > 1:
raise ChromeProxyMetricException, (
'%s: Got a 502 and multiple 200s: %d' % (url, bypasses[url]))
if bypass_count == 0:
raise ChromeProxyMetricException, (
'At least one response should be bypassed. '
'(eligible_response_count=%d, bypass_count=%d)\n' % (
eligible_response_count, bypass_count))
results.AddValue(scalar.ScalarValue(
results.current_page, 'cors_bypass', 'count', bypass_count))
def AddResultsForBlockOnce(self, tab, results):
eligible_response_count = 0
via_proxy = 0
visited_urls = []
for resp in self.IterResponses(tab):
# Add debug information in case of failure
visited_urls.append(resp.response.url)
# Block-once test URLs (Data Reduction Proxy always returns
# block-once) should not have the Chrome-Compression-Proxy Via header.
if (IsTestUrlForBlockOnce(resp.response.url)):
eligible_response_count += 1
if resp.HasChromeProxyViaHeader():
raise ChromeProxyMetricException, (
'Response has a Chrome-Compression-Proxy Via header: ' +
resp.response.url)
elif resp.ShouldHaveChromeProxyViaHeader():
via_proxy += 1
if not resp.HasChromeProxyViaHeader():
# For all other URLs, confirm that via header is present if expected.
raise ChromeProxyMetricException, (
'Missing Chrome-Compression-Proxy Via header.' +
resp.response.url)
if via_proxy == 0:
raise ChromeProxyMetricException, (
'None of the requests went via data reduction proxy')
if (eligible_response_count != 2):
raise ChromeProxyMetricException, (
'Did not make expected number of requests to whitelisted block-once'
' test URLs. Expected: 2, Actual: %s, Visited URLs: %s' %
(eligible_response_count, visited_urls))
results.AddValue(scalar.ScalarValue(results.current_page,
'eligible_responses', 'count', 2))
results.AddValue(scalar.ScalarValue(results.current_page,
'via_proxy', 'count', via_proxy))
def AddResultsForSafebrowsingOn(self, tab, results):
results.AddValue(scalar.ScalarValue(
results.current_page, 'safebrowsing', 'timeout responses', 1))
def AddResultsForSafebrowsingOff(self, tab, results):
response_count = 0
for resp in self.IterResponses(tab):
# Data reduction proxy should return the real response for sites with
# malware.
response_count += 1
if not resp.HasChromeProxyViaHeader():
r = resp.response
raise ChromeProxyMetricException, (
'%s: Safebrowsing feature should be off for desktop and webview.\n'
'Reponse: status=(%d, %s)\nHeaders:\n %s' % (
r.url, r.status, r.status_text, r.headers))
if response_count == 0:
raise ChromeProxyMetricException, (
'Safebrowsing test failed: No valid responses received')
results.AddValue(scalar.ScalarValue(
results.current_page, 'safebrowsing', 'responses', response_count))
def AddResultsForHTTPFallback(self, tab, results):
via_fallback_count = 0
for resp in self.IterResponses(tab):
if resp.ShouldHaveChromeProxyViaHeader():
# All responses should have come through the HTTP fallback proxy, which
# means that they should have the via header, and if a remote port is
# defined, it should be port 80.
if (not resp.HasChromeProxyViaHeader() or
(resp.remote_port and resp.remote_port != 80)):
r = resp.response
raise ChromeProxyMetricException, (
'%s: Should have come through the fallback proxy.\n'
'Reponse: remote_port=%s status=(%d, %s)\nHeaders:\n %s' % (
r.url, str(resp.remote_port), r.status, r.status_text,
r.headers))
via_fallback_count += 1
if via_fallback_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one response through the fallback proxy, but zero '
'such responses were received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'via_fallback', 'count', via_fallback_count))
def AddResultsForHTTPToDirectFallback(self, tab, results,
fallback_response_host):
via_fallback_count = 0
bypass_count = 0
responses = self.IterResponses(tab)
# The first response(s) coming from fallback_response_host should be
# through the HTTP fallback proxy.
resp = next(responses, None)
while resp and fallback_response_host in resp.response.url:
if fallback_response_host in resp.response.url:
if (not resp.HasChromeProxyViaHeader() or resp.remote_port != 80):
r = resp.response
raise ChromeProxyMetricException, (
'Response for %s should have come through the fallback proxy.\n'
'Response: remote_port=%s status=(%d, %s)\nHeaders:\n %s' % (
r.url, str(resp.remote_port), r.status, r.status_text,
r.headers))
else:
via_fallback_count += 1
resp = next(responses, None)
# All other responses should be bypassed.
while resp:
if resp.HasChromeProxyViaHeader():
r = resp.response
raise ChromeProxyMetricException, (
'Response for %s should not have via header.\n'
'Response: status=(%d, %s)\nHeaders:\n %s' % (
r.url, r.status, r.status_text, r.headers))
else:
bypass_count += 1
resp = next(responses, None)
# At least one response should go through the http proxy and be bypassed.
if via_fallback_count == 0 or bypass_count == 0:
raise ChromeProxyMetricException(
'There should be at least one response through the fallback proxy '
'(actual %s) and at least one bypassed response (actual %s)' %
(via_fallback_count, bypass_count))
results.AddValue(scalar.ScalarValue(
results.current_page, 'via_fallback', 'count', via_fallback_count))
results.AddValue(scalar.ScalarValue(
results.current_page, 'bypass', 'count', bypass_count))
def AddResultsForReenableAfterBypass(
self, tab, results, bypass_seconds_min, bypass_seconds_max):
"""Verify results for a re-enable after bypass test.
Args:
tab: the tab for the test.
results: the results object to add the results values to.
bypass_seconds_min: the minimum duration of the bypass.
bypass_seconds_max: the maximum duration of the bypass.
"""
bypass_count = 0
via_count = 0
for resp in self.IterResponses(tab):
if resp.HasChromeProxyViaHeader():
r = resp.response
raise ChromeProxyMetricException, (
'Response for %s should not have via header.\n'
'Reponse: status=(%d, %s)\nHeaders:\n %s' % (
r.url, r.status, r.status_text, r.headers))
else:
bypass_count += 1
# Wait until 30 seconds before the bypass should expire, and fetch a page.
# It should not have the via header because the proxy should still be
# bypassed.
time.sleep(bypass_seconds_min - 30)
tab.ClearCache(force=True)
before_metrics = ChromeProxyMetric()
before_metrics.Start(results.current_page, tab)
tab.Navigate('http://chromeproxy-test.appspot.com/default')
tab.WaitForJavaScriptExpression('performance.timing.loadEventStart', 10)
before_metrics.Stop(results.current_page, tab)
for resp in before_metrics.IterResponses(tab):
if resp.HasChromeProxyViaHeader():
r = resp.response
raise ChromeProxyMetricException, (
'Response for %s should not have via header; proxy should still '
'be bypassed.\nReponse: status=(%d, %s)\nHeaders:\n %s' % (
r.url, r.status, r.status_text, r.headers))
else:
bypass_count += 1
if bypass_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one response to be bypassed before the bypass '
'expired, but zero such responses were received.')
# Wait until 30 seconds after the bypass should expire, and fetch a page. It
# should have the via header since the proxy should no longer be bypassed.
time.sleep((bypass_seconds_max + 30) - (bypass_seconds_min - 30))
tab.ClearCache(force=True)
after_metrics = ChromeProxyMetric()
after_metrics.Start(results.current_page, tab)
tab.Navigate('http://chromeproxy-test.appspot.com/default')
tab.WaitForJavaScriptExpression('performance.timing.loadEventStart', 10)
after_metrics.Stop(results.current_page, tab)
for resp in after_metrics.IterResponses(tab):
if not resp.HasChromeProxyViaHeader():
r = resp.response
raise ChromeProxyMetricException, (
'Response for %s should have via header; proxy should no longer '
'be bypassed.\nReponse: status=(%d, %s)\nHeaders:\n %s' % (
r.url, r.status, r.status_text, r.headers))
else:
via_count += 1
if via_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one response through the proxy after the bypass '
'expired, but zero such responses were received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'bypass', 'count', bypass_count))
results.AddValue(scalar.ScalarValue(
results.current_page, 'via', 'count', via_count))
def AddResultsForReenableAfterSetBypass(
self, tab, results, bypass_seconds):
"""Verify results for a re-enable after bypass test.
Args:
tab: the tab for the test.
results: the results object to add the results values to.
bypass_seconds: the duration of the bypass
"""
bypass_count = 0
via_count = 0
# Verify the bypass url was bypassed.
for resp in self.IterResponses(tab):
if resp.HasChromeProxyViaHeader():
r = resp.response
raise ChromeProxyMetricException, (
'Response for %s should not have via header.\n'
'Reponse: status=(%d, %s)\nHeaders:\n %s' % (
r.url, r.status, r.status_text, r.headers))
else:
bypass_count += 1
# Navigate to a test page and verify it's being bypassed.
tab.ClearCache(force=True)
before_metrics = ChromeProxyMetric()
before_metrics.Start(results.current_page, tab)
tab.Navigate('http://chromeproxy-test.appspot.com/default')
tab.WaitForJavaScriptExpression('performance.timing.loadEventStart', 10)
before_metrics.Stop(results.current_page, tab)
for resp in before_metrics.IterResponses(tab):
if resp.HasChromeProxyViaHeader():
r = resp.response
raise ChromeProxyMetricException, (
'Response for %s should not have via header; proxy should still '
'be bypassed.\nReponse: status=(%d, %s)\nHeaders:\n %s' % (
r.url, r.status, r.status_text, r.headers))
else:
bypass_count += 1
if bypass_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one response to be bypassed before the bypass '
'expired, but zero such responses were received.')
# Wait for the bypass to expire, with the overhead of the previous steps
# the bypass duration will have been exceeded after this delay.
time.sleep(bypass_seconds)
# Navigate to the test pass again and verify data saver is no longer
# bypassed.
tab.ClearCache(force=True)
after_metrics = ChromeProxyMetric()
after_metrics.Start(results.current_page, tab)
tab.Navigate('http://chromeproxy-test.appspot.com/default')
tab.WaitForJavaScriptExpression('performance.timing.loadEventStart', 10)
after_metrics.Stop(results.current_page, tab)
for resp in after_metrics.IterResponses(tab):
if not resp.HasChromeProxyViaHeader():
r = resp.response
raise ChromeProxyMetricException, (
'Response for %s should have via header; proxy should no longer '
'be bypassed.\nReponse: status=(%d, %s)\nHeaders:\n %s' % (
r.url, r.status, r.status_text, r.headers))
else:
via_count += 1
if via_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one response through the proxy after the bypass '
'expired, but zero such responses were received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'bypass', 'count', bypass_count))
results.AddValue(scalar.ScalarValue(
results.current_page, 'via', 'count', via_count))
def AddResultsForClientConfig(self, tab, results):
resources_with_old_auth = 0
resources_with_new_auth = 0
super(ChromeProxyMetric, self).AddResults(tab, results)
for resp in self.IterResponses(tab):
if resp.GetChromeProxyRequestHeaderValue('s') != None:
resources_with_new_auth += 1
if resp.GetChromeProxyRequestHeaderValue('ps') != None:
resources_with_old_auth += 1
if resources_with_old_auth != 0:
raise ChromeProxyMetricException, (
'Expected zero responses with the old authentication scheme but '
'received %d.' % resources_with_old_auth)
if resources_with_new_auth == 0:
raise ChromeProxyMetricException, (
'Expected at least one response with the new authentication scheme, '
'but zero such responses were received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'new_auth', 'count', resources_with_new_auth))
results.AddValue(scalar.ScalarValue(
results.current_page, 'old_auth', 'count', resources_with_old_auth))
PROXIED = 'proxied'
DIRECT = 'direct'
class ChromeProxyVideoMetric(network_metrics.NetworkMetric):
"""Metrics for video pages.
Wraps the video metrics produced by videowrapper.js, such as the video
duration and size in pixels. Also checks a few basic HTTP response headers
such as Content-Type and Content-Length in the video responses.
"""
def __init__(self, tab):
super(ChromeProxyVideoMetric, self).__init__()
with open(os.path.join(os.path.dirname(__file__), 'videowrapper.js')) as f:
js = f.read()
tab.ExecuteJavaScript(js)
def Start(self, page, tab):
tab.ExecuteJavaScript('window.__chromeProxyCreateVideoWrappers()')
self.videoMetrics = None
super(ChromeProxyVideoMetric, self).Start(page, tab)
def Stop(self, page, tab):
tab.WaitForJavaScriptExpression('window.__chromeProxyVideoLoaded', 30)
m = tab.EvaluateJavaScript('window.__chromeProxyVideoMetrics')
# Now wait for the video to stop playing.
# Give it 2x the total duration to account for buffering.
waitTime = 2 * m['video_duration']
tab.WaitForJavaScriptExpression('window.__chromeProxyVideoEnded', waitTime)
# Load the final metrics.
m = tab.EvaluateJavaScript('window.__chromeProxyVideoMetrics')
self.videoMetrics = m
# Cast this to an integer as it is often approximate (for an unknown reason)
m['video_duration'] = int(m['video_duration'])
super(ChromeProxyVideoMetric, self).Stop(page, tab)
def ResponseFromEvent(self, event):
return chrome_proxy_metrics.ChromeProxyResponse(event)
def AddResults(self, tab, results):
raise NotImplementedError
def AddResultsForProxied(self, tab, results):
return self._AddResultsShared(PROXIED, tab, results)
def AddResultsForDirect(self, tab, results):
return self._AddResultsShared(DIRECT, tab, results)
def _AddResultsShared(self, kind, tab, results):
def err(s):
raise ChromeProxyMetricException, s
# Should have played the video.
if not self.videoMetrics['ready']:
err('%s: video not played' % kind)
# Should have an HTTP response for the video.
wantContentType = 'video/webm' if kind == PROXIED else 'video/mp4'
found = False
for r in self.IterResponses(tab):
resp = r.response
if kind == DIRECT and r.HasChromeProxyViaHeader():
err('%s: page has proxied Via header' % kind)
if resp.GetHeader('Content-Type') != wantContentType:
continue
if found:
err('%s: multiple video responses' % kind)
found = True
cl = resp.GetHeader('Content-Length')
xocl = resp.GetHeader('X-Original-Content-Length')
if cl != None:
self.videoMetrics['content_length_header'] = int(cl)
if xocl != None:
self.videoMetrics['x_original_content_length_header'] = int(xocl)
# Should have CL always.
if cl == None:
err('%s: missing ContentLength' % kind)
# Proxied: should have CL < XOCL
# Direct: should not have XOCL
if kind == PROXIED:
if xocl == None or int(cl) >= int(xocl):
err('%s: bigger response (%s > %s)' % (kind, str(cl), str(xocl)))
else:
if xocl != None:
err('%s: has XOriginalContentLength' % kind)
if not found:
err('%s: missing video response' % kind)
# Finally, add all the metrics to the results.
for (k, v) in self.videoMetrics.iteritems():
k = "%s_%s" % (k, kind)
results.AddValue(scalar.ScalarValue(results.current_page, k, "", v))
class ChromeProxyInstrumentedVideoMetric(Metric):
"""Metric for pages instrumented to evaluate video transcoding."""
def __init__(self):
super(ChromeProxyInstrumentedVideoMetric, self).__init__()
def Stop(self, page, tab):
waitTime = tab.EvaluateJavaScript('test.waitTime')
tab.WaitForJavaScriptExpression('test.metrics.complete', waitTime)
super(ChromeProxyInstrumentedVideoMetric, self).Stop(page, tab)
def AddResults(self, tab, results):
metrics = tab.EvaluateJavaScript('test.metrics')
for (k, v) in metrics.iteritems():
results.AddValue(scalar.ScalarValue(results.current_page, k, '', v))
try:
complete = metrics['complete']
failed = metrics['failed']
if not complete:
raise ChromeProxyMetricException, 'Test not complete'
if failed:
raise ChromeProxyMetricException, 'failed'
except KeyError:
raise ChromeProxyMetricException, 'No metrics found'
# Returns whether |url| is a block-once test URL. Data Reduction Proxy has been
# configured to always return block-once for these URLs.
def IsTestUrlForBlockOnce(url):
return (url == 'http://check.googlezip.net/blocksingle/' or
url == ('http://chromeproxy-test.appspot.com/default?respBody=T0s='
'&respHeader=eyJBY2Nlc3MtQ29udHJvbC1BbGxvdy1PcmlnaW4iOlsiKiJ'
'dfQ==&respStatus=200&flywheelAction=block-once'))
|
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from heatclient import client as heat_client
from oslo.config import cfg
from sahara import context
from sahara import exceptions as ex
from sahara.i18n import _
from sahara.openstack.common import log as logging
from sahara.utils import files as f
from sahara.utils import general as g
from sahara.utils.openstack import base
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
SSH_PORT = 22
def client():
ctx = context.current()
heat_url = base.url_for(ctx.service_catalog, 'orchestration')
return heat_client.Client('1', heat_url, token=ctx.token)
def get_stack(stack_name):
heat = client()
for stack in heat.stacks.list(filters={'stack_name': stack_name}):
return stack
raise ex.NotFoundException(_('Failed to find stack %(stack)s')
% {'stack': stack_name})
def wait_stack_completion(stack):
# NOTE: expected empty status because status of stack
# maybe is not set in heat database
while stack.status in ['IN_PROGRESS', '']:
context.sleep(1)
stack.get()
if stack.status != 'COMPLETE':
raise ex.HeatStackException(stack.stack_status)
def _get_inst_name(cluster_name, ng_name, index):
return g.generate_instance_name(cluster_name, ng_name, index + 1)
def _get_aa_group_name(cluster_name):
return g.generate_aa_group_name(cluster_name)
def _get_port_name(inst_name):
return '%s-port' % inst_name
def _get_floating_name(inst_name):
return '%s-floating' % inst_name
def _get_floating_assoc_name(inst_name):
return '%s-floating-assoc' % inst_name
def _get_volume_name(inst_name, volume_idx):
return '%s-volume-%i' % (inst_name, volume_idx)
def _get_volume_attach_name(inst_name, volume_idx):
return '%s-volume-attachment-%i' % (inst_name, volume_idx)
def _load_template(template_name, fields):
template_file = f.get_file_text('resources/%s' % template_name)
return template_file.rstrip() % fields
def _prepare_userdata(userdata):
"""Converts userdata as a text into format consumable by heat template."""
userdata = userdata.replace('"', '\\"')
lines = userdata.splitlines()
return '"' + '",\n"'.join(lines) + '"'
class ClusterTemplate(object):
def __init__(self, cluster):
self.cluster = cluster
self.node_groups_extra = {}
def add_node_group_extra(self, node_group_id, node_count,
gen_userdata_func):
self.node_groups_extra[node_group_id] = {
'node_count': node_count,
'gen_userdata_func': gen_userdata_func
}
# Consider using a single Jinja template for all this
def instantiate(self, update_existing, disable_rollback=True):
main_tmpl = _load_template('main.heat',
{'resources': self._serialize_resources()})
heat = client()
kwargs = {
'stack_name': self.cluster.name,
'timeout_mins': 180,
'disable_rollback': disable_rollback,
'parameters': {},
'template': json.loads(main_tmpl)}
if not update_existing:
heat.stacks.create(**kwargs)
else:
for stack in heat.stacks.list():
if stack.stack_name == self.cluster.name:
stack.update(**kwargs)
break
return ClusterStack(self, get_stack(self.cluster.name))
def _need_aa_server_group(self, node_group):
for node_process in node_group.node_processes:
if node_process in self.cluster.anti_affinity:
return True
return False
def _get_anti_affinity_scheduler_hints(self, node_group):
if not self._need_aa_server_group(node_group):
return ''
return ('"scheduler_hints" : %s,' %
json.dumps({"group": {"Ref": _get_aa_group_name(
self.cluster.name)}}))
def _serialize_resources(self):
resources = []
if self.cluster.anti_affinity:
resources.extend(self._serialize_aa_server_group())
for ng in self.cluster.node_groups:
if ng.auto_security_group:
resources.extend(self._serialize_auto_security_group(ng))
for idx in range(0, self.node_groups_extra[ng.id]['node_count']):
resources.extend(self._serialize_instance(ng, idx))
return ',\n'.join(resources)
def _serialize_auto_security_group(self, ng):
fields = {
'security_group_name': g.generate_auto_security_group_name(ng),
'security_group_description':
"Auto security group created by Sahara for Node Group "
"'%s' of cluster '%s'." % (ng.name, ng.cluster.name),
'rules': self._serialize_auto_security_group_rules(ng)}
yield _load_template('security_group.heat', fields)
def _serialize_auto_security_group_rules(self, ng):
rules = []
for port in ng.open_ports:
rules.append({"remote_ip_prefix": "0.0.0.0/0", "protocol": "tcp",
"port_range_min": port, "port_range_max": port})
rules.append({"remote_ip_prefix": "0.0.0.0/0", "protocol": "tcp",
"port_range_min": SSH_PORT, "port_range_max": SSH_PORT})
return json.dumps(rules)
def _serialize_instance(self, ng, idx):
inst_name = _get_inst_name(self.cluster.name, ng.name, idx)
nets = ''
security_groups = ''
if CONF.use_neutron:
port_name = _get_port_name(inst_name)
yield self._serialize_port(port_name,
self.cluster.neutron_management_network,
self._get_security_groups(ng))
nets = '"networks" : [{ "port" : { "Ref" : "%s" }}],' % port_name
if ng.floating_ip_pool:
yield self._serialize_neutron_floating(inst_name, port_name,
ng.floating_ip_pool)
else:
if ng.floating_ip_pool:
yield self._serialize_nova_floating(inst_name,
ng.floating_ip_pool)
if ng.security_groups:
security_groups = (
'"security_groups": %s,' % json.dumps(
self._get_security_groups(ng)))
# Check if cluster contains user key-pair and include it to template.
key_name = ''
if self.cluster.user_keypair_id:
key_name = '"key_name" : "%s",' % self.cluster.user_keypair_id
gen_userdata_func = self.node_groups_extra[ng.id]['gen_userdata_func']
userdata = gen_userdata_func(ng, inst_name)
availability_zone = ''
if ng.availability_zone:
# Use json.dumps to escape ng.availability_zone
# (in case it contains quotes)
availability_zone = ('"availability_zone" : %s,' %
json.dumps(ng.availability_zone))
fields = {'instance_name': inst_name,
'flavor_id': ng.flavor_id,
'image_id': ng.get_image_id(),
'image_username': ng.image_username,
'network_interfaces': nets,
'key_name': key_name,
'userdata': _prepare_userdata(userdata),
'scheduler_hints':
self._get_anti_affinity_scheduler_hints(ng),
'security_groups': security_groups,
'availability_zone': availability_zone}
yield _load_template('instance.heat', fields)
for idx in range(0, ng.volumes_per_node):
yield self._serialize_volume(inst_name, idx, ng.volumes_size,
ng.volumes_availability_zone,
ng.volume_type)
def _serialize_port(self, port_name, fixed_net_id, security_groups):
fields = {'port_name': port_name,
'fixed_net_id': fixed_net_id,
'security_groups': ('"security_groups": %s,' % json.dumps(
security_groups) if security_groups else '')}
return _load_template('neutron-port.heat', fields)
def _serialize_neutron_floating(self, inst_name, port_name,
floating_net_id):
fields = {'floating_ip_name': _get_floating_name(inst_name),
'floating_net_id': floating_net_id,
'port_name': port_name}
return _load_template('neutron-floating.heat', fields)
def _serialize_nova_floating(self, inst_name, floating_pool_name):
fields = {
'floating_ip_name': _get_floating_name(inst_name),
'floating_ip_assoc_name': _get_floating_assoc_name(inst_name),
'instance_name': inst_name,
'pool': floating_pool_name
}
return _load_template('nova-floating.heat', fields)
def _serialize_volume_type(self, volume_type):
property = '"volume_type" : %s'
if volume_type is None:
return property % 'null'
else:
return property % ('"%s"' % volume_type)
def _serialize_volume(self, inst_name, volume_idx, volumes_size,
volumes_availability_zone, volume_type):
fields = {'volume_name': _get_volume_name(inst_name, volume_idx),
'volumes_size': volumes_size,
'volume_attach_name': _get_volume_attach_name(inst_name,
volume_idx),
'availability_zone': '',
'instance_name': inst_name,
'volume_type': self._serialize_volume_type(volume_type)}
if volumes_availability_zone:
# Use json.dumps to escape volumes_availability_zone
# (in case it contains quotes)
fields['availability_zone'] = (
'"availability_zone": %s,' %
json.dumps(volumes_availability_zone))
return _load_template('volume.heat', fields)
def _get_security_groups(self, node_group):
if not node_group.auto_security_group:
return node_group.security_groups
return (list(node_group.security_groups or []) +
[{"Ref": g.generate_auto_security_group_name(node_group)}])
def _serialize_aa_server_group(self):
fields = {'server_group_name': _get_aa_group_name(self.cluster.name)}
yield _load_template('aa_server_group.heat', fields)
class ClusterStack(object):
def __init__(self, tmpl, heat_stack):
self.tmpl = tmpl
self.heat_stack = heat_stack
def get_node_group_instances(self, node_group):
insts = []
count = self.tmpl.node_groups_extra[node_group.id]['node_count']
heat = client()
for i in range(0, count):
name = _get_inst_name(self.tmpl.cluster.name, node_group.name, i)
res = heat.resources.get(self.heat_stack.id, name)
insts.append((name, res.physical_resource_id))
return insts
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
import re
import subprocess
import time
import fixtures
from heatclient import exc as heat_exceptions
from keystoneauth1 import exceptions as kc_exceptions
from oslo_log import log as logging
from oslo_utils import timeutils
import six
from six.moves import urllib
import testscenarios
import testtools
from heat_integrationtests.common import clients
from heat_integrationtests.common import config
from heat_integrationtests.common import exceptions
LOG = logging.getLogger(__name__)
_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s"
def call_until_true(duration, sleep_for, func, *args, **kwargs):
"""Call the function until it returns True or the duration elapsed.
Call the given function until it returns True (and return True) or
until the specified duration (in seconds) elapses (and return
False).
:param func: A zero argument callable that returns True on success.
:param duration: The number of seconds for which to attempt a
successful call of the function.
:param sleep_for: The number of seconds to sleep after an unsuccessful
invocation of the function.
"""
now = time.time()
timeout = now + duration
while now < timeout:
if func(*args, **kwargs):
return True
LOG.debug("Sleeping for %d seconds", sleep_for)
time.sleep(sleep_for)
now = time.time()
return False
def rand_name(name=''):
randbits = six.text_type(random.randint(1, 0x7fffffff))
if name:
return name + '-' + randbits
else:
return randbits
def requires_convergence(test_method):
'''Decorator for convergence-only tests.
The decorated test will be skipped when convergence is disabled.
'''
convergence_enabled = config.init_conf(
).heat_plugin.convergence_engine_enabled
skipper = testtools.skipUnless(convergence_enabled,
"Convergence-only tests are disabled")
return skipper(test_method)
class HeatIntegrationTest(testscenarios.WithScenarios,
testtools.TestCase):
def setUp(self):
super(HeatIntegrationTest, self).setUp()
self.conf = config.init_conf().heat_plugin
self.assertIsNotNone(self.conf.auth_url,
'No auth_url configured')
self.assertIsNotNone(self.conf.username,
'No username configured')
self.assertIsNotNone(self.conf.password,
'No password configured')
self.setup_clients(self.conf)
self.useFixture(fixtures.FakeLogger(format=_LOG_FORMAT))
self.updated_time = {}
if self.conf.disable_ssl_certificate_validation:
self.verify_cert = False
else:
self.verify_cert = self.conf.ca_file or True
def setup_clients(self, conf, admin_credentials=False):
self.manager = clients.ClientManager(conf, admin_credentials)
self.identity_client = self.manager.identity_client
self.orchestration_client = self.manager.orchestration_client
self.compute_client = self.manager.compute_client
self.network_client = self.manager.network_client
self.volume_client = self.manager.volume_client
self.object_client = self.manager.object_client
self.client = self.orchestration_client
def setup_clients_for_admin(self):
self.setup_clients(self.conf, True)
def check_connectivity(self, check_ip):
def try_connect(ip):
try:
urllib.request.urlopen('http://%s/' % ip)
return True
except IOError:
return False
timeout = self.conf.connectivity_timeout
elapsed_time = 0
while not try_connect(check_ip):
time.sleep(10)
elapsed_time += 10
if elapsed_time > timeout:
raise exceptions.TimeoutException()
def _log_console_output(self, servers=None):
if not servers:
servers = self.compute_client.servers.list()
for server in servers:
LOG.info('Console output for %s', server.id)
LOG.info(server.get_console_output())
def _load_template(self, base_file, file_name, sub_dir=None):
sub_dir = sub_dir or ''
filepath = os.path.join(os.path.dirname(os.path.realpath(base_file)),
sub_dir, file_name)
with open(filepath) as f:
return f.read()
def create_keypair(self, client=None, name=None):
if client is None:
client = self.compute_client
if name is None:
name = rand_name('heat-keypair')
keypair = client.keypairs.create(name)
self.assertEqual(keypair.name, name)
def delete_keypair():
keypair.delete()
self.addCleanup(delete_keypair)
return keypair
def assign_keypair(self):
if self.conf.keypair_name:
self.keypair = None
self.keypair_name = self.conf.keypair_name
else:
self.keypair = self.create_keypair()
self.keypair_name = self.keypair.id
@classmethod
def _stack_rand_name(cls):
return rand_name(cls.__name__)
def _get_network(self, net_name=None):
if net_name is None:
net_name = self.conf.fixed_network_name
networks = self.network_client.list_networks()
for net in networks['networks']:
if net['name'] == net_name:
return net
def is_service_available(self, service_type):
try:
self.identity_client.get_endpoint_url(
service_type, self.conf.region)
except kc_exceptions.EndpointNotFound:
return False
else:
return True
@staticmethod
def _stack_output(stack, output_key, validate_errors=True):
"""Return a stack output value for a given key."""
value = None
for o in stack.outputs:
if validate_errors and 'output_error' in o:
# scan for errors in the stack output.
raise ValueError(
'Unexpected output errors in %s : %s' % (
output_key, o['output_error']))
if o['output_key'] == output_key:
value = o['output_value']
return value
def _ping_ip_address(self, ip_address, should_succeed=True):
cmd = ['ping', '-c1', '-w1', ip_address]
def ping():
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.wait()
return (proc.returncode == 0) == should_succeed
return call_until_true(
self.conf.build_timeout, 1, ping)
def _wait_for_all_resource_status(self, stack_identifier,
status, failure_pattern='^.*_FAILED$',
success_on_not_found=False):
for res in self.client.resources.list(stack_identifier):
self._wait_for_resource_status(
stack_identifier, res.resource_name,
status, failure_pattern=failure_pattern,
success_on_not_found=success_on_not_found)
def _wait_for_resource_status(self, stack_identifier, resource_name,
status, failure_pattern='^.*_FAILED$',
success_on_not_found=False):
"""Waits for a Resource to reach a given status."""
fail_regexp = re.compile(failure_pattern)
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
try:
res = self.client.resources.get(
stack_identifier, resource_name)
except heat_exceptions.HTTPNotFound:
if success_on_not_found:
return
# ignore this, as the resource may not have
# been created yet
else:
if res.resource_status == status:
return
wait_for_action = status.split('_')[0]
resource_action = res.resource_status.split('_')[0]
if (resource_action == wait_for_action and
fail_regexp.search(res.resource_status)):
raise exceptions.StackResourceBuildErrorException(
resource_name=res.resource_name,
stack_identifier=stack_identifier,
resource_status=res.resource_status,
resource_status_reason=res.resource_status_reason)
time.sleep(build_interval)
message = ('Resource %s failed to reach %s status within '
'the required time (%s s).' %
(resource_name, status, build_timeout))
raise exceptions.TimeoutException(message)
def verify_resource_status(self, stack_identifier, resource_name,
status='CREATE_COMPLETE'):
try:
res = self.client.resources.get(stack_identifier, resource_name)
except heat_exceptions.HTTPNotFound:
return False
return res.resource_status == status
def _verify_status(self, stack, stack_identifier, status,
fail_regexp, is_action_cancelled=False):
if stack.stack_status == status:
# Handle UPDATE_COMPLETE/FAILED case: Make sure we don't
# wait for a stale UPDATE_COMPLETE/FAILED status.
if status in ('UPDATE_FAILED', 'UPDATE_COMPLETE'):
if is_action_cancelled:
return True
if self.updated_time.get(
stack_identifier) != stack.updated_time:
self.updated_time[stack_identifier] = stack.updated_time
return True
elif status == 'DELETE_COMPLETE' and stack.deletion_time is None:
# Wait for deleted_time to be filled, so that we have more
# confidence the operation is finished.
return False
else:
return True
wait_for_action = status.split('_')[0]
if (stack.action == wait_for_action and
fail_regexp.search(stack.stack_status)):
# Handle UPDATE_COMPLETE/UPDATE_FAILED case.
if status in ('UPDATE_FAILED', 'UPDATE_COMPLETE'):
if self.updated_time.get(
stack_identifier) != stack.updated_time:
self.updated_time[stack_identifier] = stack.updated_time
raise exceptions.StackBuildErrorException(
stack_identifier=stack_identifier,
stack_status=stack.stack_status,
stack_status_reason=stack.stack_status_reason)
else:
raise exceptions.StackBuildErrorException(
stack_identifier=stack_identifier,
stack_status=stack.stack_status,
stack_status_reason=stack.stack_status_reason)
def _wait_for_stack_status(self, stack_identifier, status,
failure_pattern=None,
success_on_not_found=False,
signal_required=False,
resources_to_signal=None,
is_action_cancelled=False):
"""Waits for a Stack to reach a given status.
Note this compares the full $action_$status, e.g
CREATE_COMPLETE, not just COMPLETE which is exposed
via the status property of Stack in heatclient
"""
if failure_pattern:
fail_regexp = re.compile(failure_pattern)
elif 'FAILED' in status:
# If we're looking for e.g CREATE_FAILED, COMPLETE is unexpected.
fail_regexp = re.compile('^.*_COMPLETE$')
else:
fail_regexp = re.compile('^.*_FAILED$')
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
try:
stack = self.client.stacks.get(stack_identifier,
resolve_outputs=False)
except heat_exceptions.HTTPNotFound:
if success_on_not_found:
return
elif not any(s in status for s in ['CREATE', 'ADOPT']):
# raise exception when stack not found, if it's not
# in create or adopt (which should be the only two possible
# reason that stack may not have been created yet)
raise
# ignore this, as the resource may not have
# been created yet
else:
if self._verify_status(stack, stack_identifier, status,
fail_regexp, is_action_cancelled):
return
if signal_required:
self.signal_resources(resources_to_signal)
time.sleep(build_interval)
message = ('Stack %s failed to reach %s status within '
'the required time (%s s).' %
(stack_identifier, status, build_timeout))
raise exceptions.TimeoutException(message)
def _stack_delete(self, stack_identifier):
try:
self._handle_in_progress(self.client.stacks.delete,
stack_identifier)
except heat_exceptions.HTTPNotFound:
pass
self._wait_for_stack_status(
stack_identifier, 'DELETE_COMPLETE',
success_on_not_found=True)
def _handle_in_progress(self, fn, *args, **kwargs):
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
try:
fn(*args, **kwargs)
except heat_exceptions.HTTPConflict as ex:
# FIXME(sirushtim): Wait a little for the stack lock to be
# released and hopefully, the stack should be usable again.
if ex.error['error']['type'] != 'ActionInProgress':
raise ex
time.sleep(build_interval)
else:
break
def update_stack(self, stack_identifier, template=None, environment=None,
files=None, parameters=None, tags=None,
expected_status='UPDATE_COMPLETE',
disable_rollback=True,
existing=False):
env = environment or {}
env_files = files or {}
parameters = parameters or {}
self.updated_time[stack_identifier] = self.client.stacks.get(
stack_identifier, resolve_outputs=False).updated_time
self._handle_in_progress(
self.client.stacks.update,
stack_id=stack_identifier,
template=template,
files=env_files,
disable_rollback=disable_rollback,
parameters=parameters,
environment=env,
tags=tags,
existing=existing)
kwargs = {'stack_identifier': stack_identifier,
'status': expected_status}
if expected_status in ['ROLLBACK_COMPLETE']:
# To trigger rollback you would intentionally fail the stack
# Hence check for rollback failures
kwargs['failure_pattern'] = '^ROLLBACK_FAILED$'
self._wait_for_stack_status(**kwargs)
def cancel_update_stack(self, stack_identifier, rollback=True,
expected_status='ROLLBACK_COMPLETE'):
stack_name = stack_identifier.split('/')[0]
self.updated_time[stack_identifier] = self.client.stacks.get(
stack_identifier, resolve_outputs=False).updated_time
if rollback:
self.client.actions.cancel_update(stack_name)
else:
self.client.actions.cancel_without_rollback(stack_name)
kwargs = {'stack_identifier': stack_identifier,
'status': expected_status}
if expected_status == 'UPDATE_FAILED':
kwargs['is_action_cancelled'] = True
if expected_status in ['ROLLBACK_COMPLETE']:
# To trigger rollback you would intentionally fail the stack
# Hence check for rollback failures
kwargs['failure_pattern'] = '^ROLLBACK_FAILED$'
self._wait_for_stack_status(**kwargs)
def preview_update_stack(self, stack_identifier, template,
environment=None, files=None, parameters=None,
tags=None, disable_rollback=True,
show_nested=False):
env = environment or {}
env_files = files or {}
parameters = parameters or {}
return self.client.stacks.preview_update(
stack_id=stack_identifier,
template=template,
files=env_files,
disable_rollback=disable_rollback,
parameters=parameters,
environment=env,
tags=tags,
show_nested=show_nested
)
def assert_resource_is_a_stack(self, stack_identifier, res_name,
wait=False):
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
time.sleep(build_interval)
try:
nested_identifier = self._get_nested_identifier(
stack_identifier, res_name)
except Exception:
# We may have to wait, if the create is in-progress
if wait:
time.sleep(build_interval)
else:
raise
else:
return nested_identifier
def _get_nested_identifier(self, stack_identifier, res_name):
rsrc = self.client.resources.get(stack_identifier, res_name)
nested_link = [l for l in rsrc.links if l['rel'] == 'nested']
nested_href = nested_link[0]['href']
nested_id = nested_href.split('/')[-1]
nested_identifier = '/'.join(nested_href.split('/')[-2:])
self.assertEqual(rsrc.physical_resource_id, nested_id)
nested_stack = self.client.stacks.get(nested_id, resolve_outputs=False)
nested_identifier2 = '%s/%s' % (nested_stack.stack_name,
nested_stack.id)
self.assertEqual(nested_identifier, nested_identifier2)
parent_id = stack_identifier.split("/")[-1]
self.assertEqual(parent_id, nested_stack.parent)
return nested_identifier
def group_nested_identifier(self, stack_identifier,
group_name):
# Get the nested stack identifier from a group resource
rsrc = self.client.resources.get(stack_identifier, group_name)
physical_resource_id = rsrc.physical_resource_id
nested_stack = self.client.stacks.get(physical_resource_id,
resolve_outputs=False)
nested_identifier = '%s/%s' % (nested_stack.stack_name,
nested_stack.id)
parent_id = stack_identifier.split("/")[-1]
self.assertEqual(parent_id, nested_stack.parent)
return nested_identifier
def list_group_resources(self, stack_identifier,
group_name, minimal=True):
nested_identifier = self.group_nested_identifier(stack_identifier,
group_name)
if minimal:
return self.list_resources(nested_identifier)
return self.client.resources.list(nested_identifier)
def list_resources(self, stack_identifier, filter_func=None):
resources = self.client.resources.list(stack_identifier)
return dict((r.resource_name, r.resource_type) for r in resources
if (filter_func(r) if callable(filter_func) else True))
def get_resource_stack_id(self, r):
stack_link = [l for l in r.links if l.get('rel') == 'stack'][0]
return stack_link['href'].split("/")[-1]
def get_physical_resource_id(self, stack_identifier, resource_name):
try:
resource = self.client.resources.get(
stack_identifier, resource_name)
return resource.physical_resource_id
except Exception:
raise Exception('Resource (%s) not found in stack (%s)!' %
(stack_identifier, resource_name))
def get_stack_output(self, stack_identifier, output_key,
validate_errors=True):
stack = self.client.stacks.get(stack_identifier)
return self._stack_output(stack, output_key, validate_errors)
def check_input_values(self, group_resources, key, value):
# Check inputs for deployment and derived config
for r in group_resources:
d = self.client.software_deployments.get(
r.physical_resource_id)
self.assertEqual({key: value}, d.input_values)
c = self.client.software_configs.get(
d.config_id)
foo_input_c = [i for i in c.inputs if i.get('name') == key][0]
self.assertEqual(value, foo_input_c.get('value'))
def signal_resources(self, resources):
# Signal all IN_PROGRESS resources
for r in resources:
if 'IN_PROGRESS' in r.resource_status:
stack_id = self.get_resource_stack_id(r)
self.client.resources.signal(stack_id, r.resource_name)
def stack_create(self, stack_name=None, template=None, files=None,
parameters=None, environment=None, tags=None,
expected_status='CREATE_COMPLETE',
disable_rollback=True, enable_cleanup=True,
environment_files=None, timeout=None):
name = stack_name or self._stack_rand_name()
templ = template or self.template
templ_files = files or {}
params = parameters or {}
env = environment or {}
timeout_mins = timeout or self.conf.build_timeout
self.client.stacks.create(
stack_name=name,
template=templ,
files=templ_files,
disable_rollback=disable_rollback,
parameters=params,
environment=env,
tags=tags,
environment_files=environment_files,
timeout_mins=timeout_mins
)
if enable_cleanup:
self.addCleanup(self._stack_delete, name)
stack = self.client.stacks.get(name, resolve_outputs=False)
stack_identifier = '%s/%s' % (name, stack.id)
kwargs = {'stack_identifier': stack_identifier,
'status': expected_status}
if expected_status:
if expected_status in ['ROLLBACK_COMPLETE']:
# To trigger rollback you would intentionally fail the stack
# Hence check for rollback failures
kwargs['failure_pattern'] = '^ROLLBACK_FAILED$'
self._wait_for_stack_status(**kwargs)
return stack_identifier
def stack_adopt(self, stack_name=None, files=None,
parameters=None, environment=None, adopt_data=None,
wait_for_status='ADOPT_COMPLETE'):
if (self.conf.skip_test_stack_action_list and
'ADOPT' in self.conf.skip_test_stack_action_list):
self.skipTest('Testing Stack adopt disabled in conf, skipping')
name = stack_name or self._stack_rand_name()
templ_files = files or {}
params = parameters or {}
env = environment or {}
self.client.stacks.create(
stack_name=name,
files=templ_files,
disable_rollback=True,
parameters=params,
environment=env,
adopt_stack_data=adopt_data,
)
self.addCleanup(self._stack_delete, name)
stack = self.client.stacks.get(name, resolve_outputs=False)
stack_identifier = '%s/%s' % (name, stack.id)
self._wait_for_stack_status(stack_identifier, wait_for_status)
return stack_identifier
def stack_abandon(self, stack_id):
if (self.conf.skip_test_stack_action_list and
'ABANDON' in self.conf.skip_test_stack_action_list):
self.addCleanup(self._stack_delete, stack_id)
self.skipTest('Testing Stack abandon disabled in conf, skipping')
info = self.client.stacks.abandon(stack_id=stack_id)
return info
def stack_snapshot(self, stack_id,
wait_for_status='SNAPSHOT_COMPLETE'):
snapshot = self.client.stacks.snapshot(stack_id=stack_id)
self._wait_for_stack_status(stack_id, wait_for_status)
return snapshot['id']
def stack_restore(self, stack_id, snapshot_id,
wait_for_status='RESTORE_COMPLETE'):
self.client.stacks.restore(stack_id, snapshot_id)
self._wait_for_stack_status(stack_id, wait_for_status)
def stack_suspend(self, stack_identifier):
if (self.conf.skip_test_stack_action_list and
'SUSPEND' in self.conf.skip_test_stack_action_list):
self.addCleanup(self._stack_delete, stack_identifier)
self.skipTest('Testing Stack suspend disabled in conf, skipping')
self._handle_in_progress(self.client.actions.suspend, stack_identifier)
# improve debugging by first checking the resource's state.
self._wait_for_all_resource_status(stack_identifier,
'SUSPEND_COMPLETE')
self._wait_for_stack_status(stack_identifier, 'SUSPEND_COMPLETE')
def stack_resume(self, stack_identifier):
if (self.conf.skip_test_stack_action_list and
'RESUME' in self.conf.skip_test_stack_action_list):
self.addCleanup(self._stack_delete, stack_identifier)
self.skipTest('Testing Stack resume disabled in conf, skipping')
self._handle_in_progress(self.client.actions.resume, stack_identifier)
# improve debugging by first checking the resource's state.
self._wait_for_all_resource_status(stack_identifier,
'RESUME_COMPLETE')
self._wait_for_stack_status(stack_identifier, 'RESUME_COMPLETE')
def wait_for_event_with_reason(self, stack_identifier, reason,
rsrc_name=None, num_expected=1):
build_timeout = self.conf.build_timeout
build_interval = self.conf.build_interval
start = timeutils.utcnow()
while timeutils.delta_seconds(start,
timeutils.utcnow()) < build_timeout:
try:
rsrc_events = self.client.events.list(stack_identifier,
resource_name=rsrc_name)
except heat_exceptions.HTTPNotFound:
LOG.debug("No events yet found for %s", rsrc_name)
else:
matched = [e for e in rsrc_events
if e.resource_status_reason == reason]
if len(matched) == num_expected:
return matched
time.sleep(build_interval)
def check_autoscale_complete(self, stack_id, expected_num, parent_stack,
group_name):
res_list = self.client.resources.list(stack_id)
all_res_complete = all(res.resource_status in ('UPDATE_COMPLETE',
'CREATE_COMPLETE')
for res in res_list)
all_res = len(res_list) == expected_num
if all_res and all_res_complete:
metadata = self.client.resources.metadata(parent_stack, group_name)
return not metadata.get('scaling_in_progress')
return False
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import OrderedDict
import pickle
import pytest
import hypothesis as h
import hypothesis.strategies as st
import numpy as np
import pyarrow as pa
import pyarrow.types as types
import pyarrow.tests.strategies as past
def get_many_types():
# returning them from a function is required because of pa.dictionary
# type holds a pyarrow array and test_array.py::test_toal_bytes_allocated
# checks that the default memory pool has zero allocated bytes
return (
pa.null(),
pa.bool_(),
pa.int32(),
pa.time32('s'),
pa.time64('us'),
pa.date32(),
pa.timestamp('us'),
pa.timestamp('us', tz='UTC'),
pa.timestamp('us', tz='Europe/Paris'),
pa.duration('s'),
pa.float16(),
pa.float32(),
pa.float64(),
pa.decimal128(19, 4),
pa.string(),
pa.binary(),
pa.binary(10),
pa.large_string(),
pa.large_binary(),
pa.list_(pa.int32()),
pa.large_list(pa.uint16()),
pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())]),
pa.struct([pa.field('a', pa.int32(), nullable=False),
pa.field('b', pa.int8(), nullable=False),
pa.field('c', pa.string())]),
pa.union([pa.field('a', pa.binary(10)),
pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE),
pa.union([pa.field('a', pa.binary(10)),
pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE),
pa.union([pa.field('a', pa.binary(10), nullable=False),
pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE),
pa.dictionary(pa.int32(), pa.string())
)
def test_is_boolean():
assert types.is_boolean(pa.bool_())
assert not types.is_boolean(pa.int8())
def test_is_integer():
signed_ints = [pa.int8(), pa.int16(), pa.int32(), pa.int64()]
unsigned_ints = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()]
for t in signed_ints + unsigned_ints:
assert types.is_integer(t)
for t in signed_ints:
assert types.is_signed_integer(t)
assert not types.is_unsigned_integer(t)
for t in unsigned_ints:
assert types.is_unsigned_integer(t)
assert not types.is_signed_integer(t)
assert not types.is_integer(pa.float32())
assert not types.is_signed_integer(pa.float32())
def test_is_floating():
for t in [pa.float16(), pa.float32(), pa.float64()]:
assert types.is_floating(t)
assert not types.is_floating(pa.int32())
def test_is_null():
assert types.is_null(pa.null())
assert not types.is_null(pa.list_(pa.int32()))
def test_is_decimal():
assert types.is_decimal(pa.decimal128(19, 4))
assert not types.is_decimal(pa.int32())
def test_is_list():
a = pa.list_(pa.int32())
b = pa.large_list(pa.int32())
assert types.is_list(a)
assert not types.is_large_list(a)
assert types.is_large_list(b)
assert not types.is_list(b)
assert not types.is_list(pa.int32())
def test_is_dictionary():
assert types.is_dictionary(pa.dictionary(pa.int32(), pa.string()))
assert not types.is_dictionary(pa.int32())
def test_is_nested_or_struct():
struct_ex = pa.struct([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())])
assert types.is_struct(struct_ex)
assert not types.is_struct(pa.list_(pa.int32()))
assert types.is_nested(struct_ex)
assert types.is_nested(pa.list_(pa.int32()))
assert types.is_nested(pa.large_list(pa.int32()))
assert not types.is_nested(pa.int32())
def test_is_union():
for mode in [pa.lib.UnionMode_SPARSE, pa.lib.UnionMode_DENSE]:
assert types.is_union(pa.union([pa.field('a', pa.int32()),
pa.field('b', pa.int8()),
pa.field('c', pa.string())],
mode=mode))
assert not types.is_union(pa.list_(pa.int32()))
# TODO(wesm): is_map, once implemented
def test_is_binary_string():
assert types.is_binary(pa.binary())
assert not types.is_binary(pa.string())
assert not types.is_binary(pa.large_binary())
assert not types.is_binary(pa.large_string())
assert types.is_string(pa.string())
assert types.is_unicode(pa.string())
assert not types.is_string(pa.binary())
assert not types.is_string(pa.large_string())
assert not types.is_string(pa.large_binary())
assert types.is_large_binary(pa.large_binary())
assert not types.is_large_binary(pa.large_string())
assert not types.is_large_binary(pa.binary())
assert not types.is_large_binary(pa.string())
assert types.is_large_string(pa.large_string())
assert not types.is_large_string(pa.large_binary())
assert not types.is_large_string(pa.string())
assert not types.is_large_string(pa.binary())
assert types.is_fixed_size_binary(pa.binary(5))
assert not types.is_fixed_size_binary(pa.binary())
def test_is_temporal_date_time_timestamp():
date_types = [pa.date32(), pa.date64()]
time_types = [pa.time32('s'), pa.time64('ns')]
timestamp_types = [pa.timestamp('ms')]
duration_types = [pa.duration('ms')]
for case in date_types + time_types + timestamp_types + duration_types:
assert types.is_temporal(case)
for case in date_types:
assert types.is_date(case)
assert not types.is_time(case)
assert not types.is_timestamp(case)
assert not types.is_duration(case)
for case in time_types:
assert types.is_time(case)
assert not types.is_date(case)
assert not types.is_timestamp(case)
assert not types.is_duration(case)
for case in timestamp_types:
assert types.is_timestamp(case)
assert not types.is_date(case)
assert not types.is_time(case)
assert not types.is_duration(case)
for case in duration_types:
assert types.is_duration(case)
assert not types.is_date(case)
assert not types.is_time(case)
assert not types.is_timestamp(case)
assert not types.is_temporal(pa.int32())
def test_is_primitive():
assert types.is_primitive(pa.int32())
assert not types.is_primitive(pa.list_(pa.int32()))
def test_timestamp():
for unit in ('s', 'ms', 'us', 'ns'):
for tz in (None, 'UTC', 'Europe/Paris'):
ty = pa.timestamp(unit, tz=tz)
assert ty.unit == unit
assert ty.tz == tz
for invalid_unit in ('m', 'arbit', 'rary'):
with pytest.raises(ValueError, match='Invalid TimeUnit string'):
pa.timestamp(invalid_unit)
def test_time32_units():
for valid_unit in ('s', 'ms'):
ty = pa.time32(valid_unit)
assert ty.unit == valid_unit
for invalid_unit in ('m', 'us', 'ns'):
error_msg = 'Invalid TimeUnit for time32: {}'.format(invalid_unit)
with pytest.raises(ValueError, match=error_msg):
pa.time32(invalid_unit)
def test_time64_units():
for valid_unit in ('us', 'ns'):
ty = pa.time64(valid_unit)
assert ty.unit == valid_unit
for invalid_unit in ('m', 's', 'ms'):
error_msg = 'Invalid TimeUnit for time64: {}'.format(invalid_unit)
with pytest.raises(ValueError, match=error_msg):
pa.time64(invalid_unit)
def test_duration():
for unit in ('s', 'ms', 'us', 'ns'):
ty = pa.duration(unit)
assert ty.unit == unit
for invalid_unit in ('m', 'arbit', 'rary'):
with pytest.raises(ValueError, match='Invalid TimeUnit string'):
pa.duration(invalid_unit)
def test_list_type():
ty = pa.list_(pa.int64())
assert isinstance(ty, pa.ListType)
assert ty.value_type == pa.int64()
with pytest.raises(TypeError):
pa.list_(None)
def test_large_list_type():
ty = pa.large_list(pa.utf8())
assert isinstance(ty, pa.LargeListType)
assert ty.value_type == pa.utf8()
with pytest.raises(TypeError):
pa.large_list(None)
def test_struct_type():
fields = [
# Duplicate field name on purpose
pa.field('a', pa.int64()),
pa.field('a', pa.int32()),
pa.field('b', pa.int32())
]
ty = pa.struct(fields)
assert len(ty) == ty.num_children == 3
assert list(ty) == fields
assert ty[0].name == 'a'
assert ty[2].type == pa.int32()
with pytest.raises(IndexError):
assert ty[3]
assert ty['b'] == ty[2]
# Duplicate
with pytest.warns(UserWarning):
with pytest.raises(KeyError):
ty['a']
# Not found
with pytest.raises(KeyError):
ty['c']
# Neither integer nor string
with pytest.raises(TypeError):
ty[None]
for a, b in zip(ty, fields):
a == b
# Construct from list of tuples
ty = pa.struct([('a', pa.int64()),
('a', pa.int32()),
('b', pa.int32())])
assert list(ty) == fields
for a, b in zip(ty, fields):
a == b
# Construct from mapping
fields = [pa.field('a', pa.int64()),
pa.field('b', pa.int32())]
ty = pa.struct(OrderedDict([('a', pa.int64()),
('b', pa.int32())]))
assert list(ty) == fields
for a, b in zip(ty, fields):
a == b
# Invalid args
with pytest.raises(TypeError):
pa.struct([('a', None)])
def test_union_type():
def check_fields(ty, fields):
assert ty.num_children == len(fields)
assert [ty[i] for i in range(ty.num_children)] == fields
fields = [pa.field('x', pa.list_(pa.int32())),
pa.field('y', pa.binary())]
for mode in ('sparse', pa.lib.UnionMode_SPARSE):
ty = pa.union(fields, mode=mode)
assert ty.mode == 'sparse'
check_fields(ty, fields)
assert ty.type_codes == [0, 1]
for mode in ('dense', pa.lib.UnionMode_DENSE):
ty = pa.union(fields, mode=mode)
assert ty.mode == 'dense'
check_fields(ty, fields)
assert ty.type_codes == [0, 1]
for mode in ('unknown', 2):
with pytest.raises(ValueError, match='Invalid union mode'):
pa.union(fields, mode=mode)
def test_dictionary_type():
ty0 = pa.dictionary(pa.int32(), pa.string())
assert ty0.index_type == pa.int32()
assert ty0.value_type == pa.string()
assert ty0.ordered is False
ty1 = pa.dictionary(pa.int8(), pa.float64(), ordered=True)
assert ty1.index_type == pa.int8()
assert ty1.value_type == pa.float64()
assert ty1.ordered is True
# construct from non-arrow objects
ty2 = pa.dictionary('int8', 'string')
assert ty2.index_type == pa.int8()
assert ty2.value_type == pa.string()
assert ty2.ordered is False
# invalid index type raises
with pytest.raises(TypeError):
pa.dictionary(pa.string(), pa.int64())
with pytest.raises(TypeError):
pa.dictionary(pa.uint32(), pa.string())
def test_dictionary_ordered_equals():
# Python side checking of ARROW-6345
d1 = pa.dictionary('int32', 'binary', ordered=True)
d2 = pa.dictionary('int32', 'binary', ordered=False)
d3 = pa.dictionary('int8', 'binary', ordered=True)
d4 = pa.dictionary('int32', 'binary', ordered=True)
assert not d1.equals(d2)
assert not d1.equals(d3)
assert d1.equals(d4)
def test_types_hashable():
many_types = get_many_types()
in_dict = {}
for i, type_ in enumerate(many_types):
assert hash(type_) == hash(type_)
in_dict[type_] = i
assert len(in_dict) == len(many_types)
for i, type_ in enumerate(many_types):
assert in_dict[type_] == i
def test_types_picklable():
for ty in get_many_types():
data = pickle.dumps(ty)
assert pickle.loads(data) == ty
def test_fields_hashable():
in_dict = {}
fields = [pa.field('a', pa.int32()),
pa.field('a', pa.int64()),
pa.field('a', pa.int64(), nullable=False),
pa.field('b', pa.int32()),
pa.field('b', pa.int32(), nullable=False)]
for i, field in enumerate(fields):
in_dict[field] = i
assert len(in_dict) == len(fields)
for i, field in enumerate(fields):
assert in_dict[field] == i
@pytest.mark.parametrize('t,check_func', [
(pa.date32(), types.is_date32),
(pa.date64(), types.is_date64),
(pa.time32('s'), types.is_time32),
(pa.time64('ns'), types.is_time64),
(pa.int8(), types.is_int8),
(pa.int16(), types.is_int16),
(pa.int32(), types.is_int32),
(pa.int64(), types.is_int64),
(pa.uint8(), types.is_uint8),
(pa.uint16(), types.is_uint16),
(pa.uint32(), types.is_uint32),
(pa.uint64(), types.is_uint64),
(pa.float16(), types.is_float16),
(pa.float32(), types.is_float32),
(pa.float64(), types.is_float64)
])
def test_exact_primitive_types(t, check_func):
assert check_func(t)
def test_type_id():
# enum values are not exposed publicly
for ty in get_many_types():
assert isinstance(ty.id, int)
def test_bit_width():
for ty, expected in [(pa.bool_(), 1),
(pa.int8(), 8),
(pa.uint32(), 32),
(pa.float16(), 16),
(pa.decimal128(19, 4), 128),
(pa.binary(42), 42 * 8)]:
assert ty.bit_width == expected
for ty in [pa.binary(), pa.string(), pa.list_(pa.int16())]:
with pytest.raises(ValueError, match="fixed width"):
ty.bit_width
def test_fixed_size_binary_byte_width():
ty = pa.binary(5)
assert ty.byte_width == 5
def test_decimal_properties():
ty = pa.decimal128(19, 4)
assert ty.byte_width == 16
assert ty.precision == 19
assert ty.scale == 4
def test_decimal_overflow():
pa.decimal128(1, 0)
pa.decimal128(38, 0)
for i in (0, -1, 39):
with pytest.raises(ValueError):
pa.decimal128(39, 0)
def test_type_equality_operators():
many_types = get_many_types()
non_pyarrow = ('foo', 16, {'s', 'e', 't'})
for index, ty in enumerate(many_types):
# could use two parametrization levels,
# but that'd bloat pytest's output
for i, other in enumerate(many_types + non_pyarrow):
if i == index:
assert ty == other
else:
assert ty != other
def test_field_basic():
t = pa.string()
f = pa.field('foo', t)
assert f.name == 'foo'
assert f.nullable
assert f.type is t
assert repr(f) == "pyarrow.Field<foo: string>"
f = pa.field('foo', t, False)
assert not f.nullable
with pytest.raises(TypeError):
pa.field('foo', None)
def test_field_equals():
meta1 = {b'foo': b'bar'}
meta2 = {b'bizz': b'bazz'}
f1 = pa.field('a', pa.int8(), nullable=True)
f2 = pa.field('a', pa.int8(), nullable=True)
f3 = pa.field('a', pa.int8(), nullable=False)
f4 = pa.field('a', pa.int16(), nullable=False)
f5 = pa.field('b', pa.int16(), nullable=False)
f6 = pa.field('a', pa.int8(), nullable=True, metadata=meta1)
f7 = pa.field('a', pa.int8(), nullable=True, metadata=meta1)
f8 = pa.field('a', pa.int8(), nullable=True, metadata=meta2)
assert f1.equals(f2)
assert f6.equals(f7)
assert not f1.equals(f3)
assert not f1.equals(f4)
assert not f3.equals(f4)
assert not f1.equals(f6)
assert not f4.equals(f5)
assert not f7.equals(f8)
def test_field_equality_operators():
f1 = pa.field('a', pa.int8(), nullable=True)
f2 = pa.field('a', pa.int8(), nullable=True)
f3 = pa.field('b', pa.int8(), nullable=True)
f4 = pa.field('b', pa.int8(), nullable=False)
assert f1 == f2
assert f1 != f3
assert f3 != f4
assert f1 != 'foo'
def test_field_metadata():
f1 = pa.field('a', pa.int8())
f2 = pa.field('a', pa.int8(), metadata={})
f3 = pa.field('a', pa.int8(), metadata={b'bizz': b'bazz'})
assert f1.metadata is None
assert f2.metadata == {}
assert f3.metadata[b'bizz'] == b'bazz'
def test_field_add_remove_metadata():
import collections
f0 = pa.field('foo', pa.int32())
assert f0.metadata is None
metadata = {b'foo': b'bar', b'pandas': b'badger'}
metadata2 = collections.OrderedDict([
(b'a', b'alpha'),
(b'b', b'beta')
])
f1 = f0.with_metadata(metadata)
assert f1.metadata == metadata
f2 = f0.with_metadata(metadata2)
assert f2.metadata == metadata2
with pytest.raises(TypeError):
f0.with_metadata([1, 2, 3])
f3 = f1.remove_metadata()
assert f3.metadata is None
# idempotent
f4 = f3.remove_metadata()
assert f4.metadata is None
f5 = pa.field('foo', pa.int32(), True, metadata)
f6 = f0.with_metadata(metadata)
assert f5.equals(f6)
def test_is_integer_value():
assert pa.types.is_integer_value(1)
assert pa.types.is_integer_value(np.int64(1))
assert not pa.types.is_integer_value('1')
def test_is_float_value():
assert not pa.types.is_float_value(1)
assert pa.types.is_float_value(1.)
assert pa.types.is_float_value(np.float64(1))
assert not pa.types.is_float_value('1.0')
def test_is_boolean_value():
assert not pa.types.is_boolean_value(1)
assert pa.types.is_boolean_value(True)
assert pa.types.is_boolean_value(False)
assert pa.types.is_boolean_value(np.bool_(True))
assert pa.types.is_boolean_value(np.bool_(False))
@h.given(
past.all_types |
past.all_fields |
past.all_schemas
)
@h.example(
pa.field(name='', type=pa.null(), metadata={'0': '', '': ''})
)
def test_pickling(field):
data = pickle.dumps(field)
assert pickle.loads(data) == field
@h.given(
st.lists(past.all_types) |
st.lists(past.all_fields) |
st.lists(past.all_schemas)
)
def test_hashing(items):
h.assume(
# well, this is still O(n^2), but makes the input unique
all(not a.equals(b) for i, a in enumerate(items) for b in items[:i])
)
container = {}
for i, item in enumerate(items):
assert hash(item) == hash(item)
container[item] = i
assert len(container) == len(items)
for i, item in enumerate(items):
assert container[item] == i
|
|
# -*- coding: utf-8 -*-
#
# SelfTest/Cipher/ARC4.py: Self-test for the Alleged-RC4 cipher
#
# Written in 2008 by Dwayne C. Litzenberger <[email protected]>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Cryptodome.Cipher.ARC4"""
__revision__ = "$Id$"
from Cryptodome.Util.py3compat import *
from Cryptodome.SelfTest.st_common import *
from binascii import unhexlify
from Cryptodome.Cipher import ARC4
# This is a list of (plaintext, ciphertext, key[, description]) tuples.
test_data = [
# Test vectors from Eric Rescorla's message with the subject
# "RC4 compatibility testing", sent to the cipherpunks mailing list on
# September 13, 1994.
# http://cypherpunks.venona.com/date/1994/09/msg00420.html
('0123456789abcdef', '75b7878099e0c596', '0123456789abcdef',
'Test vector 0'),
('0000000000000000', '7494c2e7104b0879', '0123456789abcdef',
'Test vector 1'),
('0000000000000000', 'de188941a3375d3a', '0000000000000000',
'Test vector 2'),
#('00000000000000000000', 'd6a141a7ec3c38dfbd61', 'ef012345',
# 'Test vector 3'),
('01' * 512,
'7595c3e6114a09780c4ad452338e1ffd9a1be9498f813d76533449b6778dcad8'
+ 'c78a8d2ba9ac66085d0e53d59c26c2d1c490c1ebbe0ce66d1b6b1b13b6b919b8'
+ '47c25a91447a95e75e4ef16779cde8bf0a95850e32af9689444fd377108f98fd'
+ 'cbd4e726567500990bcc7e0ca3c4aaa304a387d20f3b8fbbcd42a1bd311d7a43'
+ '03dda5ab078896ae80c18b0af66dff319616eb784e495ad2ce90d7f772a81747'
+ 'b65f62093b1e0db9e5ba532fafec47508323e671327df9444432cb7367cec82f'
+ '5d44c0d00b67d650a075cd4b70dedd77eb9b10231b6b5b741347396d62897421'
+ 'd43df9b42e446e358e9c11a9b2184ecbef0cd8e7a877ef968f1390ec9b3d35a5'
+ '585cb009290e2fcde7b5ec66d9084be44055a619d9dd7fc3166f9487f7cb2729'
+ '12426445998514c15d53a18c864ce3a2b7555793988126520eacf2e3066e230c'
+ '91bee4dd5304f5fd0405b35bd99c73135d3d9bc335ee049ef69b3867bf2d7bd1'
+ 'eaa595d8bfc0066ff8d31509eb0c6caa006c807a623ef84c3d33c195d23ee320'
+ 'c40de0558157c822d4b8c569d849aed59d4e0fd7f379586b4b7ff684ed6a189f'
+ '7486d49b9c4bad9ba24b96abf924372c8a8fffb10d55354900a77a3db5f205e1'
+ 'b99fcd8660863a159ad4abe40fa48934163ddde542a6585540fd683cbfd8c00f'
+ '12129a284deacc4cdefe58be7137541c047126c8d49e2755ab181ab7e940b0c0',
'0123456789abcdef',
"Test vector 4"),
]
class RFC6229_Tests(unittest.TestCase):
# Test vectors from RFC 6229. Each test vector is a tuple with two items:
# the ARC4 key and a dictionary. The dictionary has keystream offsets as keys
# and the 16-byte keystream starting at the relevant offset as value.
rfc6229_data = [
# Page 3
(
'0102030405',
{
0: 'b2 39 63 05 f0 3d c0 27 cc c3 52 4a 0a 11 18 a8',
16: '69 82 94 4f 18 fc 82 d5 89 c4 03 a4 7a 0d 09 19',
240: '28 cb 11 32 c9 6c e2 86 42 1d ca ad b8 b6 9e ae',
256: '1c fc f6 2b 03 ed db 64 1d 77 df cf 7f 8d 8c 93',
496: '42 b7 d0 cd d9 18 a8 a3 3d d5 17 81 c8 1f 40 41',
512: '64 59 84 44 32 a7 da 92 3c fb 3e b4 98 06 61 f6',
752: 'ec 10 32 7b de 2b ee fd 18 f9 27 76 80 45 7e 22',
768: 'eb 62 63 8d 4f 0b a1 fe 9f ca 20 e0 5b f8 ff 2b',
1008:'45 12 90 48 e6 a0 ed 0b 56 b4 90 33 8f 07 8d a5',
1024:'30 ab bc c7 c2 0b 01 60 9f 23 ee 2d 5f 6b b7 df',
1520:'32 94 f7 44 d8 f9 79 05 07 e7 0f 62 e5 bb ce ea',
1536:'d8 72 9d b4 18 82 25 9b ee 4f 82 53 25 f5 a1 30',
2032:'1e b1 4a 0c 13 b3 bf 47 fa 2a 0b a9 3a d4 5b 8b',
2048:'cc 58 2f 8b a9 f2 65 e2 b1 be 91 12 e9 75 d2 d7',
3056:'f2 e3 0f 9b d1 02 ec bf 75 aa ad e9 bc 35 c4 3c',
3072:'ec 0e 11 c4 79 dc 32 9d c8 da 79 68 fe 96 56 81',
4080:'06 83 26 a2 11 84 16 d2 1f 9d 04 b2 cd 1c a0 50',
4096:'ff 25 b5 89 95 99 67 07 e5 1f bd f0 8b 34 d8 75'
}
),
# Page 4
(
'01020304050607',
{
0: '29 3f 02 d4 7f 37 c9 b6 33 f2 af 52 85 fe b4 6b',
16: 'e6 20 f1 39 0d 19 bd 84 e2 e0 fd 75 20 31 af c1',
240: '91 4f 02 53 1c 92 18 81 0d f6 0f 67 e3 38 15 4c',
256: 'd0 fd b5 83 07 3c e8 5a b8 39 17 74 0e c0 11 d5',
496: '75 f8 14 11 e8 71 cf fa 70 b9 0c 74 c5 92 e4 54',
512: '0b b8 72 02 93 8d ad 60 9e 87 a5 a1 b0 79 e5 e4',
752: 'c2 91 12 46 b6 12 e7 e7 b9 03 df ed a1 da d8 66',
768: '32 82 8f 91 50 2b 62 91 36 8d e8 08 1d e3 6f c2',
1008:'f3 b9 a7 e3 b2 97 bf 9a d8 04 51 2f 90 63 ef f1',
1024:'8e cb 67 a9 ba 1f 55 a5 a0 67 e2 b0 26 a3 67 6f',
1520:'d2 aa 90 2b d4 2d 0d 7c fd 34 0c d4 58 10 52 9f',
1536:'78 b2 72 c9 6e 42 ea b4 c6 0b d9 14 e3 9d 06 e3',
2032:'f4 33 2f d3 1a 07 93 96 ee 3c ee 3f 2a 4f f0 49',
2048:'05 45 97 81 d4 1f da 7f 30 c1 be 7e 12 46 c6 23',
3056:'ad fd 38 68 b8 e5 14 85 d5 e6 10 01 7e 3d d6 09',
3072:'ad 26 58 1c 0c 5b e4 5f 4c ea 01 db 2f 38 05 d5',
4080:'f3 17 2c ef fc 3b 3d 99 7c 85 cc d5 af 1a 95 0c',
4096:'e7 4b 0b 97 31 22 7f d3 7c 0e c0 8a 47 dd d8 b8'
}
),
(
'0102030405060708',
{
0: '97 ab 8a 1b f0 af b9 61 32 f2 f6 72 58 da 15 a8',
16: '82 63 ef db 45 c4 a1 86 84 ef 87 e6 b1 9e 5b 09',
240: '96 36 eb c9 84 19 26 f4 f7 d1 f3 62 bd df 6e 18',
256: 'd0 a9 90 ff 2c 05 fe f5 b9 03 73 c9 ff 4b 87 0a',
496: '73 23 9f 1d b7 f4 1d 80 b6 43 c0 c5 25 18 ec 63',
512: '16 3b 31 99 23 a6 bd b4 52 7c 62 61 26 70 3c 0f',
752: '49 d6 c8 af 0f 97 14 4a 87 df 21 d9 14 72 f9 66',
768: '44 17 3a 10 3b 66 16 c5 d5 ad 1c ee 40 c8 63 d0',
1008:'27 3c 9c 4b 27 f3 22 e4 e7 16 ef 53 a4 7d e7 a4',
1024:'c6 d0 e7 b2 26 25 9f a9 02 34 90 b2 61 67 ad 1d',
1520:'1f e8 98 67 13 f0 7c 3d 9a e1 c1 63 ff 8c f9 d3',
1536:'83 69 e1 a9 65 61 0b e8 87 fb d0 c7 91 62 aa fb',
2032:'0a 01 27 ab b4 44 84 b9 fb ef 5a bc ae 1b 57 9f',
2048:'c2 cd ad c6 40 2e 8e e8 66 e1 f3 7b db 47 e4 2c',
3056:'26 b5 1e a3 7d f8 e1 d6 f7 6f c3 b6 6a 74 29 b3',
3072:'bc 76 83 20 5d 4f 44 3d c1 f2 9d da 33 15 c8 7b',
4080:'d5 fa 5a 34 69 d2 9a aa f8 3d 23 58 9d b8 c8 5b',
4096:'3f b4 6e 2c 8f 0f 06 8e dc e8 cd cd 7d fc 58 62'
}
),
# Page 5
(
'0102030405060708090a',
{
0: 'ed e3 b0 46 43 e5 86 cc 90 7d c2 18 51 70 99 02',
16: '03 51 6b a7 8f 41 3b eb 22 3a a5 d4 d2 df 67 11',
240: '3c fd 6c b5 8e e0 fd de 64 01 76 ad 00 00 04 4d',
256: '48 53 2b 21 fb 60 79 c9 11 4c 0f fd 9c 04 a1 ad',
496: '3e 8c ea 98 01 71 09 97 90 84 b1 ef 92 f9 9d 86',
512: 'e2 0f b4 9b db 33 7e e4 8b 8d 8d c0 f4 af ef fe',
752: '5c 25 21 ea cd 79 66 f1 5e 05 65 44 be a0 d3 15',
768: 'e0 67 a7 03 19 31 a2 46 a6 c3 87 5d 2f 67 8a cb',
1008:'a6 4f 70 af 88 ae 56 b6 f8 75 81 c0 e2 3e 6b 08',
1024:'f4 49 03 1d e3 12 81 4e c6 f3 19 29 1f 4a 05 16',
1520:'bd ae 85 92 4b 3c b1 d0 a2 e3 3a 30 c6 d7 95 99',
1536:'8a 0f ed db ac 86 5a 09 bc d1 27 fb 56 2e d6 0a',
2032:'b5 5a 0a 5b 51 a1 2a 8b e3 48 99 c3 e0 47 51 1a',
2048:'d9 a0 9c ea 3c e7 5f e3 96 98 07 03 17 a7 13 39',
3056:'55 22 25 ed 11 77 f4 45 84 ac 8c fa 6c 4e b5 fc',
3072:'7e 82 cb ab fc 95 38 1b 08 09 98 44 21 29 c2 f8',
4080:'1f 13 5e d1 4c e6 0a 91 36 9d 23 22 be f2 5e 3c',
4096:'08 b6 be 45 12 4a 43 e2 eb 77 95 3f 84 dc 85 53'
}
),
(
'0102030405060708090a0b0c0d0e0f10',
{
0: '9a c7 cc 9a 60 9d 1e f7 b2 93 28 99 cd e4 1b 97',
16: '52 48 c4 95 90 14 12 6a 6e 8a 84 f1 1d 1a 9e 1c',
240: '06 59 02 e4 b6 20 f6 cc 36 c8 58 9f 66 43 2f 2b',
256: 'd3 9d 56 6b c6 bc e3 01 07 68 15 15 49 f3 87 3f',
496: 'b6 d1 e6 c4 a5 e4 77 1c ad 79 53 8d f2 95 fb 11',
512: 'c6 8c 1d 5c 55 9a 97 41 23 df 1d bc 52 a4 3b 89',
752: 'c5 ec f8 8d e8 97 fd 57 fe d3 01 70 1b 82 a2 59',
768: 'ec cb e1 3d e1 fc c9 1c 11 a0 b2 6c 0b c8 fa 4d',
1008:'e7 a7 25 74 f8 78 2a e2 6a ab cf 9e bc d6 60 65',
1024:'bd f0 32 4e 60 83 dc c6 d3 ce dd 3c a8 c5 3c 16',
1520:'b4 01 10 c4 19 0b 56 22 a9 61 16 b0 01 7e d2 97',
1536:'ff a0 b5 14 64 7e c0 4f 63 06 b8 92 ae 66 11 81',
2032:'d0 3d 1b c0 3c d3 3d 70 df f9 fa 5d 71 96 3e bd',
2048:'8a 44 12 64 11 ea a7 8b d5 1e 8d 87 a8 87 9b f5',
3056:'fa be b7 60 28 ad e2 d0 e4 87 22 e4 6c 46 15 a3',
3072:'c0 5d 88 ab d5 03 57 f9 35 a6 3c 59 ee 53 76 23',
4080:'ff 38 26 5c 16 42 c1 ab e8 d3 c2 fe 5e 57 2b f8',
4096:'a3 6a 4c 30 1a e8 ac 13 61 0c cb c1 22 56 ca cc'
}
),
# Page 6
(
'0102030405060708090a0b0c0d0e0f101112131415161718',
{
0: '05 95 e5 7f e5 f0 bb 3c 70 6e da c8 a4 b2 db 11',
16: 'df de 31 34 4a 1a f7 69 c7 4f 07 0a ee 9e 23 26',
240: 'b0 6b 9b 1e 19 5d 13 d8 f4 a7 99 5c 45 53 ac 05',
256: '6b d2 37 8e c3 41 c9 a4 2f 37 ba 79 f8 8a 32 ff',
496: 'e7 0b ce 1d f7 64 5a db 5d 2c 41 30 21 5c 35 22',
512: '9a 57 30 c7 fc b4 c9 af 51 ff da 89 c7 f1 ad 22',
752: '04 85 05 5f d4 f6 f0 d9 63 ef 5a b9 a5 47 69 82',
768: '59 1f c6 6b cd a1 0e 45 2b 03 d4 55 1f 6b 62 ac',
1008:'27 53 cc 83 98 8a fa 3e 16 88 a1 d3 b4 2c 9a 02',
1024:'93 61 0d 52 3d 1d 3f 00 62 b3 c2 a3 bb c7 c7 f0',
1520:'96 c2 48 61 0a ad ed fe af 89 78 c0 3d e8 20 5a',
1536:'0e 31 7b 3d 1c 73 b9 e9 a4 68 8f 29 6d 13 3a 19',
2032:'bd f0 e6 c3 cc a5 b5 b9 d5 33 b6 9c 56 ad a1 20',
2048:'88 a2 18 b6 e2 ec e1 e6 24 6d 44 c7 59 d1 9b 10',
3056:'68 66 39 7e 95 c1 40 53 4f 94 26 34 21 00 6e 40',
3072:'32 cb 0a 1e 95 42 c6 b3 b8 b3 98 ab c3 b0 f1 d5',
4080:'29 a0 b8 ae d5 4a 13 23 24 c6 2e 42 3f 54 b4 c8',
4096:'3c b0 f3 b5 02 0a 98 b8 2a f9 fe 15 44 84 a1 68'
}
),
(
'0102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f20',
{
0: 'ea a6 bd 25 88 0b f9 3d 3f 5d 1e 4c a2 61 1d 91',
16: 'cf a4 5c 9f 7e 71 4b 54 bd fa 80 02 7c b1 43 80',
240: '11 4a e3 44 de d7 1b 35 f2 e6 0f eb ad 72 7f d8',
256: '02 e1 e7 05 6b 0f 62 39 00 49 64 22 94 3e 97 b6',
496: '91 cb 93 c7 87 96 4e 10 d9 52 7d 99 9c 6f 93 6b',
512: '49 b1 8b 42 f8 e8 36 7c be b5 ef 10 4b a1 c7 cd',
752: '87 08 4b 3b a7 00 ba de 95 56 10 67 27 45 b3 74',
768: 'e7 a7 b9 e9 ec 54 0d 5f f4 3b db 12 79 2d 1b 35',
1008:'c7 99 b5 96 73 8f 6b 01 8c 76 c7 4b 17 59 bd 90',
1024:'7f ec 5b fd 9f 9b 89 ce 65 48 30 90 92 d7 e9 58',
1520:'40 f2 50 b2 6d 1f 09 6a 4a fd 4c 34 0a 58 88 15',
1536:'3e 34 13 5c 79 db 01 02 00 76 76 51 cf 26 30 73',
2032:'f6 56 ab cc f8 8d d8 27 02 7b 2c e9 17 d4 64 ec',
2048:'18 b6 25 03 bf bc 07 7f ba bb 98 f2 0d 98 ab 34',
3056:'8a ed 95 ee 5b 0d cb fb ef 4e b2 1d 3a 3f 52 f9',
3072:'62 5a 1a b0 0e e3 9a 53 27 34 6b dd b0 1a 9c 18',
4080:'a1 3a 7c 79 c7 e1 19 b5 ab 02 96 ab 28 c3 00 b9',
4096:'f3 e4 c0 a2 e0 2d 1d 01 f7 f0 a7 46 18 af 2b 48'
}
),
# Page 7
(
'833222772a',
{
0: '80 ad 97 bd c9 73 df 8a 2e 87 9e 92 a4 97 ef da',
16: '20 f0 60 c2 f2 e5 12 65 01 d3 d4 fe a1 0d 5f c0',
240: 'fa a1 48 e9 90 46 18 1f ec 6b 20 85 f3 b2 0e d9',
256: 'f0 da f5 ba b3 d5 96 83 98 57 84 6f 73 fb fe 5a',
496: '1c 7e 2f c4 63 92 32 fe 29 75 84 b2 96 99 6b c8',
512: '3d b9 b2 49 40 6c c8 ed ff ac 55 cc d3 22 ba 12',
752: 'e4 f9 f7 e0 06 61 54 bb d1 25 b7 45 56 9b c8 97',
768: '75 d5 ef 26 2b 44 c4 1a 9c f6 3a e1 45 68 e1 b9',
1008:'6d a4 53 db f8 1e 82 33 4a 3d 88 66 cb 50 a1 e3',
1024:'78 28 d0 74 11 9c ab 5c 22 b2 94 d7 a9 bf a0 bb',
1520:'ad b8 9c ea 9a 15 fb e6 17 29 5b d0 4b 8c a0 5c',
1536:'62 51 d8 7f d4 aa ae 9a 7e 4a d5 c2 17 d3 f3 00',
2032:'e7 11 9b d6 dd 9b 22 af e8 f8 95 85 43 28 81 e2',
2048:'78 5b 60 fd 7e c4 e9 fc b6 54 5f 35 0d 66 0f ab',
3056:'af ec c0 37 fd b7 b0 83 8e b3 d7 0b cd 26 83 82',
3072:'db c1 a7 b4 9d 57 35 8c c9 fa 6d 61 d7 3b 7c f0',
4080:'63 49 d1 26 a3 7a fc ba 89 79 4f 98 04 91 4f dc',
4096:'bf 42 c3 01 8c 2f 7c 66 bf de 52 49 75 76 81 15'
}
),
(
'1910833222772a',
{
0: 'bc 92 22 db d3 27 4d 8f c6 6d 14 cc bd a6 69 0b',
16: '7a e6 27 41 0c 9a 2b e6 93 df 5b b7 48 5a 63 e3',
240: '3f 09 31 aa 03 de fb 30 0f 06 01 03 82 6f 2a 64',
256: 'be aa 9e c8 d5 9b b6 81 29 f3 02 7c 96 36 11 81',
496: '74 e0 4d b4 6d 28 64 8d 7d ee 8a 00 64 b0 6c fe',
512: '9b 5e 81 c6 2f e0 23 c5 5b e4 2f 87 bb f9 32 b8',
752: 'ce 17 8f c1 82 6e fe cb c1 82 f5 79 99 a4 61 40',
768: '8b df 55 cd 55 06 1c 06 db a6 be 11 de 4a 57 8a',
1008:'62 6f 5f 4d ce 65 25 01 f3 08 7d 39 c9 2c c3 49',
1024:'42 da ac 6a 8f 9a b9 a7 fd 13 7c 60 37 82 56 82',
1520:'cc 03 fd b7 91 92 a2 07 31 2f 53 f5 d4 dc 33 d9',
1536:'f7 0f 14 12 2a 1c 98 a3 15 5d 28 b8 a0 a8 a4 1d',
2032:'2a 3a 30 7a b2 70 8a 9c 00 fe 0b 42 f9 c2 d6 a1',
2048:'86 26 17 62 7d 22 61 ea b0 b1 24 65 97 ca 0a e9',
3056:'55 f8 77 ce 4f 2e 1d db bf 8e 13 e2 cd e0 fd c8',
3072:'1b 15 56 cb 93 5f 17 33 37 70 5f bb 5d 50 1f c1',
4080:'ec d0 e9 66 02 be 7f 8d 50 92 81 6c cc f2 c2 e9',
4096:'02 78 81 fa b4 99 3a 1c 26 20 24 a9 4f ff 3f 61'
}
),
# Page 8
(
'641910833222772a',
{
0: 'bb f6 09 de 94 13 17 2d 07 66 0c b6 80 71 69 26',
16: '46 10 1a 6d ab 43 11 5d 6c 52 2b 4f e9 36 04 a9',
240: 'cb e1 ff f2 1c 96 f3 ee f6 1e 8f e0 54 2c bd f0',
256: '34 79 38 bf fa 40 09 c5 12 cf b4 03 4b 0d d1 a7',
496: '78 67 a7 86 d0 0a 71 47 90 4d 76 dd f1 e5 20 e3',
512: '8d 3e 9e 1c ae fc cc b3 fb f8 d1 8f 64 12 0b 32',
752: '94 23 37 f8 fd 76 f0 fa e8 c5 2d 79 54 81 06 72',
768: 'b8 54 8c 10 f5 16 67 f6 e6 0e 18 2f a1 9b 30 f7',
1008:'02 11 c7 c6 19 0c 9e fd 12 37 c3 4c 8f 2e 06 c4',
1024:'bd a6 4f 65 27 6d 2a ac b8 f9 02 12 20 3a 80 8e',
1520:'bd 38 20 f7 32 ff b5 3e c1 93 e7 9d 33 e2 7c 73',
1536:'d0 16 86 16 86 19 07 d4 82 e3 6c da c8 cf 57 49',
2032:'97 b0 f0 f2 24 b2 d2 31 71 14 80 8f b0 3a f7 a0',
2048:'e5 96 16 e4 69 78 79 39 a0 63 ce ea 9a f9 56 d1',
3056:'c4 7e 0d c1 66 09 19 c1 11 01 20 8f 9e 69 aa 1f',
3072:'5a e4 f1 28 96 b8 37 9a 2a ad 89 b5 b5 53 d6 b0',
4080:'6b 6b 09 8d 0c 29 3b c2 99 3d 80 bf 05 18 b6 d9',
4096:'81 70 cc 3c cd 92 a6 98 62 1b 93 9d d3 8f e7 b9'
}
),
(
'8b37641910833222772a',
{
0: 'ab 65 c2 6e dd b2 87 60 0d b2 fd a1 0d 1e 60 5c',
16: 'bb 75 90 10 c2 96 58 f2 c7 2d 93 a2 d1 6d 29 30',
240: 'b9 01 e8 03 6e d1 c3 83 cd 3c 4c 4d d0 a6 ab 05',
256: '3d 25 ce 49 22 92 4c 55 f0 64 94 33 53 d7 8a 6c',
496: '12 c1 aa 44 bb f8 7e 75 e6 11 f6 9b 2c 38 f4 9b',
512: '28 f2 b3 43 4b 65 c0 98 77 47 00 44 c6 ea 17 0d',
752: 'bd 9e f8 22 de 52 88 19 61 34 cf 8a f7 83 93 04',
768: '67 55 9c 23 f0 52 15 84 70 a2 96 f7 25 73 5a 32',
1008:'8b ab 26 fb c2 c1 2b 0f 13 e2 ab 18 5e ab f2 41',
1024:'31 18 5a 6d 69 6f 0c fa 9b 42 80 8b 38 e1 32 a2',
1520:'56 4d 3d ae 18 3c 52 34 c8 af 1e 51 06 1c 44 b5',
1536:'3c 07 78 a7 b5 f7 2d 3c 23 a3 13 5c 7d 67 b9 f4',
2032:'f3 43 69 89 0f cf 16 fb 51 7d ca ae 44 63 b2 dd',
2048:'02 f3 1c 81 e8 20 07 31 b8 99 b0 28 e7 91 bf a7',
3056:'72 da 64 62 83 22 8c 14 30 08 53 70 17 95 61 6f',
3072:'4e 0a 8c 6f 79 34 a7 88 e2 26 5e 81 d6 d0 c8 f4',
4080:'43 8d d5 ea fe a0 11 1b 6f 36 b4 b9 38 da 2a 68',
4096:'5f 6b fc 73 81 58 74 d9 71 00 f0 86 97 93 57 d8'
}
),
# Page 9
(
'ebb46227c6cc8b37641910833222772a',
{
0: '72 0c 94 b6 3e df 44 e1 31 d9 50 ca 21 1a 5a 30',
16: 'c3 66 fd ea cf 9c a8 04 36 be 7c 35 84 24 d2 0b',
240: 'b3 39 4a 40 aa bf 75 cb a4 22 82 ef 25 a0 05 9f',
256: '48 47 d8 1d a4 94 2d bc 24 9d ef c4 8c 92 2b 9f',
496: '08 12 8c 46 9f 27 53 42 ad da 20 2b 2b 58 da 95',
512: '97 0d ac ef 40 ad 98 72 3b ac 5d 69 55 b8 17 61',
752: '3c b8 99 93 b0 7b 0c ed 93 de 13 d2 a1 10 13 ac',
768: 'ef 2d 67 6f 15 45 c2 c1 3d c6 80 a0 2f 4a db fe',
1008:'b6 05 95 51 4f 24 bc 9f e5 22 a6 ca d7 39 36 44',
1024:'b5 15 a8 c5 01 17 54 f5 90 03 05 8b db 81 51 4e',
1520:'3c 70 04 7e 8c bc 03 8e 3b 98 20 db 60 1d a4 95',
1536:'11 75 da 6e e7 56 de 46 a5 3e 2b 07 56 60 b7 70',
2032:'00 a5 42 bb a0 21 11 cc 2c 65 b3 8e bd ba 58 7e',
2048:'58 65 fd bb 5b 48 06 41 04 e8 30 b3 80 f2 ae de',
3056:'34 b2 1a d2 ad 44 e9 99 db 2d 7f 08 63 f0 d9 b6',
3072:'84 a9 21 8f c3 6e 8a 5f 2c cf be ae 53 a2 7d 25',
4080:'a2 22 1a 11 b8 33 cc b4 98 a5 95 40 f0 54 5f 4a',
4096:'5b be b4 78 7d 59 e5 37 3f db ea 6c 6f 75 c2 9b'
}
),
(
'c109163908ebe51debb46227c6cc8b37641910833222772a',
{
0: '54 b6 4e 6b 5a 20 b5 e2 ec 84 59 3d c7 98 9d a7',
16: 'c1 35 ee e2 37 a8 54 65 ff 97 dc 03 92 4f 45 ce',
240: 'cf cc 92 2f b4 a1 4a b4 5d 61 75 aa bb f2 d2 01',
256: '83 7b 87 e2 a4 46 ad 0e f7 98 ac d0 2b 94 12 4f',
496: '17 a6 db d6 64 92 6a 06 36 b3 f4 c3 7a 4f 46 94',
512: '4a 5f 9f 26 ae ee d4 d4 a2 5f 63 2d 30 52 33 d9',
752: '80 a3 d0 1e f0 0c 8e 9a 42 09 c1 7f 4e eb 35 8c',
768: 'd1 5e 7d 5f fa aa bc 02 07 bf 20 0a 11 77 93 a2',
1008:'34 96 82 bf 58 8e aa 52 d0 aa 15 60 34 6a ea fa',
1024:'f5 85 4c db 76 c8 89 e3 ad 63 35 4e 5f 72 75 e3',
1520:'53 2c 7c ec cb 39 df 32 36 31 84 05 a4 b1 27 9c',
1536:'ba ef e6 d9 ce b6 51 84 22 60 e0 d1 e0 5e 3b 90',
2032:'e8 2d 8c 6d b5 4e 3c 63 3f 58 1c 95 2b a0 42 07',
2048:'4b 16 e5 0a bd 38 1b d7 09 00 a9 cd 9a 62 cb 23',
3056:'36 82 ee 33 bd 14 8b d9 f5 86 56 cd 8f 30 d9 fb',
3072:'1e 5a 0b 84 75 04 5d 9b 20 b2 62 86 24 ed fd 9e',
4080:'63 ed d6 84 fb 82 62 82 fe 52 8f 9c 0e 92 37 bc',
4096:'e4 dd 2e 98 d6 96 0f ae 0b 43 54 54 56 74 33 91'
}
),
# Page 10
(
'1ada31d5cf688221c109163908ebe51debb46227c6cc8b37641910833222772a',
{
0: 'dd 5b cb 00 18 e9 22 d4 94 75 9d 7c 39 5d 02 d3',
16: 'c8 44 6f 8f 77 ab f7 37 68 53 53 eb 89 a1 c9 eb',
240: 'af 3e 30 f9 c0 95 04 59 38 15 15 75 c3 fb 90 98',
256: 'f8 cb 62 74 db 99 b8 0b 1d 20 12 a9 8e d4 8f 0e',
496: '25 c3 00 5a 1c b8 5d e0 76 25 98 39 ab 71 98 ab',
512: '9d cb c1 83 e8 cb 99 4b 72 7b 75 be 31 80 76 9c',
752: 'a1 d3 07 8d fa 91 69 50 3e d9 d4 49 1d ee 4e b2',
768: '85 14 a5 49 58 58 09 6f 59 6e 4b cd 66 b1 06 65',
1008:'5f 40 d5 9e c1 b0 3b 33 73 8e fa 60 b2 25 5d 31',
1024:'34 77 c7 f7 64 a4 1b ac ef f9 0b f1 4f 92 b7 cc',
1520:'ac 4e 95 36 8d 99 b9 eb 78 b8 da 8f 81 ff a7 95',
1536:'8c 3c 13 f8 c2 38 8b b7 3f 38 57 6e 65 b7 c4 46',
2032:'13 c4 b9 c1 df b6 65 79 ed dd 8a 28 0b 9f 73 16',
2048:'dd d2 78 20 55 01 26 69 8e fa ad c6 4b 64 f6 6e',
3056:'f0 8f 2e 66 d2 8e d1 43 f3 a2 37 cf 9d e7 35 59',
3072:'9e a3 6c 52 55 31 b8 80 ba 12 43 34 f5 7b 0b 70',
4080:'d5 a3 9e 3d fc c5 02 80 ba c4 a6 b5 aa 0d ca 7d',
4096:'37 0b 1c 1f e6 55 91 6d 97 fd 0d 47 ca 1d 72 b8'
}
)
]
def test_keystream(self):
for tv in self.rfc6229_data:
key = unhexlify(b((tv[0])))
cipher = ARC4.new(key)
count = 0
for offset in range(0,4096+1,16):
ct = cipher.encrypt(b('\x00')*16)
expected = tv[1].get(offset)
if expected:
expected = unhexlify(b(expected.replace(" ",'')))
self.assertEquals(ct, expected)
count += 1
self.assertEqual(count, len(tv[1]))
class Drop_Tests(unittest.TestCase):
key = b('\xAA')*16
data = b('\x00')*5000
def setUp(self):
self.cipher = ARC4.new(self.key)
def test_drop256_encrypt(self):
cipher_drop = ARC4.new(self.key, 256)
ct_drop = cipher_drop.encrypt(self.data[:16])
ct = self.cipher.encrypt(self.data)[256:256+16]
self.assertEquals(ct_drop, ct)
def test_drop256_decrypt(self):
cipher_drop = ARC4.new(self.key, 256)
pt_drop = cipher_drop.decrypt(self.data[:16])
pt = self.cipher.decrypt(self.data)[256:256+16]
self.assertEquals(pt_drop, pt)
class KeyLength(unittest.TestCase):
def runTest(self):
self.assertRaises(ValueError, ARC4.new, bchr(0) * 4)
self.assertRaises(ValueError, ARC4.new, bchr(0) * 257)
def get_tests(config={}):
from .common import make_stream_tests
tests = make_stream_tests(ARC4, "ARC4", test_data)
tests += list_test_cases(RFC6229_Tests)
tests += list_test_cases(Drop_Tests)
tests.append(KeyLength())
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
|
import threading
from ctypes import POINTER, Structure, byref, c_char, c_char_p, c_int, c_size_t
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_sized_string, check_string,
)
from django.contrib.gis.geos.prototypes.geom import c_uchar_p, geos_char_p
from django.utils import six
from django.utils.encoding import force_bytes
# ### The WKB/WKT Reader/Writer structures and pointers ###
class WKTReader_st(Structure):
pass
class WKTWriter_st(Structure):
pass
class WKBReader_st(Structure):
pass
class WKBWriter_st(Structure):
pass
WKT_READ_PTR = POINTER(WKTReader_st)
WKT_WRITE_PTR = POINTER(WKTWriter_st)
WKB_READ_PTR = POINTER(WKBReader_st)
WKB_WRITE_PTR = POINTER(WKBReader_st)
# WKTReader routines
wkt_reader_create = GEOSFuncFactory('GEOSWKTReader_create', restype=WKT_READ_PTR)
wkt_reader_destroy = GEOSFuncFactory('GEOSWKTReader_destroy', argtypes=[WKT_READ_PTR])
wkt_reader_read = GEOSFuncFactory(
'GEOSWKTReader_read', argtypes=[WKT_READ_PTR, c_char_p], restype=GEOM_PTR, errcheck=check_geom
)
# WKTWriter routines
wkt_writer_create = GEOSFuncFactory('GEOSWKTWriter_create', restype=WKT_WRITE_PTR)
wkt_writer_destroy = GEOSFuncFactory('GEOSWKTWriter_destroy', argtypes=[WKT_WRITE_PTR])
wkt_writer_write = GEOSFuncFactory(
'GEOSWKTWriter_write', argtypes=[WKT_WRITE_PTR, GEOM_PTR], restype=geos_char_p, errcheck=check_string
)
wkt_writer_get_outdim = GEOSFuncFactory(
'GEOSWKTWriter_getOutputDimension', argtypes=[WKT_WRITE_PTR], restype=c_int
)
wkt_writer_set_outdim = GEOSFuncFactory(
'GEOSWKTWriter_setOutputDimension', argtypes=[WKT_WRITE_PTR, c_int]
)
wkt_writer_set_trim = GEOSFuncFactory('GEOSWKTWriter_setTrim', argtypes=[WKT_WRITE_PTR, c_char])
wkt_writer_set_precision = GEOSFuncFactory('GEOSWKTWriter_setRoundingPrecision', argtypes=[WKT_WRITE_PTR, c_int])
# WKBReader routines
wkb_reader_create = GEOSFuncFactory('GEOSWKBReader_create', restype=WKB_READ_PTR)
wkb_reader_destroy = GEOSFuncFactory('GEOSWKBReader_destroy', argtypes=[WKB_READ_PTR])
class WKBReadFunc(GEOSFuncFactory):
# Although the function definitions take `const unsigned char *`
# as their parameter, we use c_char_p here so the function may
# take Python strings directly as parameters. Inside Python there
# is not a difference between signed and unsigned characters, so
# it is not a problem.
argtypes = [WKB_READ_PTR, c_char_p, c_size_t]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
wkb_reader_read = WKBReadFunc('GEOSWKBReader_read')
wkb_reader_read_hex = WKBReadFunc('GEOSWKBReader_readHEX')
# WKBWriter routines
wkb_writer_create = GEOSFuncFactory('GEOSWKBWriter_create', restype=WKB_WRITE_PTR)
wkb_writer_destroy = GEOSFuncFactory('GEOSWKBWriter_destroy', argtypes=[WKB_WRITE_PTR])
# WKB Writing prototypes.
class WKBWriteFunc(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, GEOM_PTR, POINTER(c_size_t)]
restype = c_uchar_p
errcheck = staticmethod(check_sized_string)
wkb_writer_write = WKBWriteFunc('GEOSWKBWriter_write')
wkb_writer_write_hex = WKBWriteFunc('GEOSWKBWriter_writeHEX')
# WKBWriter property getter/setter prototypes.
class WKBWriterGet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR]
restype = c_int
class WKBWriterSet(GEOSFuncFactory):
argtypes = [WKB_WRITE_PTR, c_int]
wkb_writer_get_byteorder = WKBWriterGet('GEOSWKBWriter_getByteOrder')
wkb_writer_set_byteorder = WKBWriterSet('GEOSWKBWriter_setByteOrder')
wkb_writer_get_outdim = WKBWriterGet('GEOSWKBWriter_getOutputDimension')
wkb_writer_set_outdim = WKBWriterSet('GEOSWKBWriter_setOutputDimension')
wkb_writer_get_include_srid = WKBWriterGet('GEOSWKBWriter_getIncludeSRID', restype=c_char)
wkb_writer_set_include_srid = WKBWriterSet('GEOSWKBWriter_setIncludeSRID', argtypes=[WKB_WRITE_PTR, c_char])
# ### Base I/O Class ###
class IOBase(GEOSBase):
"Base class for GEOS I/O objects."
def __init__(self):
# Getting the pointer with the constructor.
self.ptr = self._constructor()
# Loading the real destructor function at this point as doing it in
# __del__ is too late (import error).
self.destructor.func = self.destructor.get_func(
*self.destructor.args, **self.destructor.kwargs
)
# ### Base WKB/WKT Reading and Writing objects ###
# Non-public WKB/WKT reader classes for internal use because
# their `read` methods return _pointers_ instead of GEOSGeometry
# objects.
class _WKTReader(IOBase):
_constructor = wkt_reader_create
ptr_type = WKT_READ_PTR
destructor = wkt_reader_destroy
def read(self, wkt):
if not isinstance(wkt, (bytes, six.string_types)):
raise TypeError
return wkt_reader_read(self.ptr, force_bytes(wkt))
class _WKBReader(IOBase):
_constructor = wkb_reader_create
ptr_type = WKB_READ_PTR
destructor = wkb_reader_destroy
def read(self, wkb):
"Returns a _pointer_ to C GEOS Geometry object from the given WKB."
if isinstance(wkb, six.memoryview):
wkb_s = bytes(wkb)
return wkb_reader_read(self.ptr, wkb_s, len(wkb_s))
elif isinstance(wkb, (bytes, six.string_types)):
return wkb_reader_read_hex(self.ptr, wkb, len(wkb))
else:
raise TypeError
# ### WKB/WKT Writer Classes ###
class WKTWriter(IOBase):
_constructor = wkt_writer_create
ptr_type = WKT_WRITE_PTR
destructor = wkt_writer_destroy
_trim = False
_precision = None
def __init__(self, dim=2, trim=False, precision=None):
super(WKTWriter, self).__init__()
if bool(trim) != self._trim:
self.trim = trim
if precision is not None:
self.precision = precision
self.outdim = dim
def write(self, geom):
"Returns the WKT representation of the given geometry."
return wkt_writer_write(self.ptr, geom.ptr)
@property
def outdim(self):
return wkt_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKT output dimension must be 2 or 3')
wkt_writer_set_outdim(self.ptr, new_dim)
@property
def trim(self):
return self._trim
@trim.setter
def trim(self, flag):
if bool(flag) != self._trim:
self._trim = bool(flag)
wkt_writer_set_trim(self.ptr, b'\x01' if flag else b'\x00')
@property
def precision(self):
return self._precision
@precision.setter
def precision(self, precision):
if (not isinstance(precision, int) or precision < 0) and precision is not None:
raise AttributeError('WKT output rounding precision must be non-negative integer or None.')
if precision != self._precision:
self._precision = precision
wkt_writer_set_precision(self.ptr, -1 if precision is None else precision)
class WKBWriter(IOBase):
_constructor = wkb_writer_create
ptr_type = WKB_WRITE_PTR
destructor = wkb_writer_destroy
def __init__(self, dim=2):
super(WKBWriter, self).__init__()
self.outdim = dim
def _handle_empty_point(self, geom):
from django.contrib.gis.geos import Point
if isinstance(geom, Point) and geom.empty:
if self.srid:
# PostGIS uses POINT(NaN NaN) for WKB representation of empty
# points. Use it for EWKB as it's a PostGIS specific format.
# https://trac.osgeo.org/postgis/ticket/3181
geom = Point(float('NaN'), float('NaN'), srid=geom.srid)
else:
raise ValueError('Empty point is not representable in WKB.')
return geom
def write(self, geom):
"Returns the WKB representation of the given geometry."
from django.contrib.gis.geos import Polygon
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write(self.ptr, geom.ptr, byref(c_size_t()))
if isinstance(geom, Polygon) and geom.empty:
# Fix GEOS output for empty polygon.
# See https://trac.osgeo.org/geos/ticket/680.
wkb = wkb[:-8] + b'\0' * 4
return six.memoryview(wkb)
def write_hex(self, geom):
"Returns the HEXEWKB representation of the given geometry."
from django.contrib.gis.geos.polygon import Polygon
geom = self._handle_empty_point(geom)
wkb = wkb_writer_write_hex(self.ptr, geom.ptr, byref(c_size_t()))
if isinstance(geom, Polygon) and geom.empty:
wkb = wkb[:-16] + b'0' * 8
return wkb
# ### WKBWriter Properties ###
# Property for getting/setting the byteorder.
def _get_byteorder(self):
return wkb_writer_get_byteorder(self.ptr)
def _set_byteorder(self, order):
if order not in (0, 1):
raise ValueError('Byte order parameter must be 0 (Big Endian) or 1 (Little Endian).')
wkb_writer_set_byteorder(self.ptr, order)
byteorder = property(_get_byteorder, _set_byteorder)
# Property for getting/setting the output dimension.
@property
def outdim(self):
return wkb_writer_get_outdim(self.ptr)
@outdim.setter
def outdim(self, new_dim):
if new_dim not in (2, 3):
raise ValueError('WKB output dimension must be 2 or 3')
wkb_writer_set_outdim(self.ptr, new_dim)
# Property for getting/setting the include srid flag.
@property
def srid(self):
return bool(ord(wkb_writer_get_include_srid(self.ptr)))
@srid.setter
def srid(self, include):
if include:
flag = b'\x01'
else:
flag = b'\x00'
wkb_writer_set_include_srid(self.ptr, flag)
# `ThreadLocalIO` object holds instances of the WKT and WKB reader/writer
# objects that are local to the thread. The `GEOSGeometry` internals
# access these instances by calling the module-level functions, defined
# below.
class ThreadLocalIO(threading.local):
wkt_r = None
wkt_w = None
wkb_r = None
wkb_w = None
ewkb_w = None
thread_context = ThreadLocalIO()
# These module-level routines return the I/O object that is local to the
# thread. If the I/O object does not exist yet it will be initialized.
def wkt_r():
if not thread_context.wkt_r:
thread_context.wkt_r = _WKTReader()
return thread_context.wkt_r
def wkt_w(dim=2, trim=False, precision=None):
if not thread_context.wkt_w:
thread_context.wkt_w = WKTWriter(dim=dim, trim=trim, precision=precision)
else:
thread_context.wkt_w.outdim = dim
thread_context.wkt_w.trim = trim
thread_context.wkt_w.precision = precision
return thread_context.wkt_w
def wkb_r():
if not thread_context.wkb_r:
thread_context.wkb_r = _WKBReader()
return thread_context.wkb_r
def wkb_w(dim=2):
if not thread_context.wkb_w:
thread_context.wkb_w = WKBWriter(dim=dim)
else:
thread_context.wkb_w.outdim = dim
return thread_context.wkb_w
def ewkb_w(dim=2):
if not thread_context.ewkb_w:
thread_context.ewkb_w = WKBWriter(dim=dim)
thread_context.ewkb_w.srid = True
else:
thread_context.ewkb_w.outdim = dim
return thread_context.ewkb_w
|
|
import logging
import os
from botocore import model
from botocore.compat import OrderedDict
from botocore.validate import validate_parameters
from botocore.docs.bcdoc import docevents
import awscli
from awscli.argparser import ArgTableArgParser
from awscli.argprocess import unpack_argument, unpack_cli_arg
from awscli.arguments import CustomArgument, create_argument_model_from_schema
from awscli.clidocs import OperationDocumentEventHandler
from awscli.clidriver import CLICommand
from awscli.help import HelpCommand
from awscli.schema import SchemaTransformer
LOG = logging.getLogger(__name__)
_open = open
class _FromFile(object):
def __init__(self, *paths, **kwargs):
"""
``**kwargs`` can contain a ``root_module`` argument
that contains the root module where the file contents
should be searched. This is an optional argument, and if
no value is provided, will default to ``awscli``. This means
that by default we look for examples in the ``awscli`` module.
"""
self.filename = None
if paths:
self.filename = os.path.join(*paths)
if 'root_module' in kwargs:
self.root_module = kwargs['root_module']
else:
self.root_module = awscli
class BasicCommand(CLICommand):
"""Basic top level command with no subcommands.
If you want to create a new command, subclass this and
provide the values documented below.
"""
# This is the name of your command, so if you want to
# create an 'aws mycommand ...' command, the NAME would be
# 'mycommand'
NAME = 'commandname'
# This is the description that will be used for the 'help'
# command.
DESCRIPTION = 'describe the command'
# This is optional, if you are fine with the default synopsis
# (the way all the built in operations are documented) then you
# can leave this empty.
SYNOPSIS = ''
# If you want to provide some hand written examples, you can do
# so here. This is written in RST format. This is optional,
# you don't have to provide any examples, though highly encouraged!
EXAMPLES = ''
# If your command has arguments, you can specify them here. This is
# somewhat of an implementation detail, but this is a list of dicts
# where the dicts match the kwargs of the CustomArgument's __init__.
# For example, if I want to add a '--argument-one' and an
# '--argument-two' command, I'd say:
#
# ARG_TABLE = [
# {'name': 'argument-one', 'help_text': 'This argument does foo bar.',
# 'action': 'store', 'required': False, 'cli_type_name': 'string',},
# {'name': 'argument-two', 'help_text': 'This argument does some other thing.',
# 'action': 'store', 'choices': ['a', 'b', 'c']},
# ]
#
# A `schema` parameter option is available to accept a custom JSON
# structure as input. See the file `awscli/schema.py` for more info.
ARG_TABLE = []
# If you want the command to have subcommands, you can provide a list of
# dicts. We use a list here because we want to allow a user to provide
# the order they want to use for subcommands.
# SUBCOMMANDS = [
# {'name': 'subcommand1', 'command_class': SubcommandClass},
# {'name': 'subcommand2', 'command_class': SubcommandClass2},
# ]
# The command_class must subclass from ``BasicCommand``.
SUBCOMMANDS = []
FROM_FILE = _FromFile
# You can set the DESCRIPTION, SYNOPSIS, and EXAMPLES to FROM_FILE
# and we'll automatically read in that data from the file.
# This is useful if you have a lot of content and would prefer to keep
# the docs out of the class definition. For example:
#
# DESCRIPTION = FROM_FILE
#
# will set the DESCRIPTION value to the contents of
# awscli/examples/<command name>/_description.rst
# The naming conventions for these attributes are:
#
# DESCRIPTION = awscli/examples/<command name>/_description.rst
# SYNOPSIS = awscli/examples/<command name>/_synopsis.rst
# EXAMPLES = awscli/examples/<command name>/_examples.rst
#
# You can also provide a relative path and we'll load the file
# from the specified location:
#
# DESCRIPTION = awscli/examples/<filename>
#
# For example:
#
# DESCRIPTION = FROM_FILE('command, 'subcommand, '_description.rst')
# DESCRIPTION = 'awscli/examples/command/subcommand/_description.rst'
#
# At this point, the only other thing you have to implement is a _run_main
# method (see the method for more information).
def __init__(self, session):
self._session = session
self._arg_table = None
self._subcommand_table = None
self._lineage = [self]
def __call__(self, args, parsed_globals):
# args is the remaining unparsed args.
# We might be able to parse these args so we need to create
# an arg parser and parse them.
self._subcommand_table = self._build_subcommand_table()
self._arg_table = self._build_arg_table()
event = 'before-building-argument-table-parser.%s' % \
".".join(self.lineage_names)
self._session.emit(event, argument_table=self._arg_table, args=args,
session=self._session)
parser = ArgTableArgParser(self.arg_table, self.subcommand_table)
parsed_args, remaining = parser.parse_known_args(args)
# Unpack arguments
for key, value in vars(parsed_args).items():
cli_argument = None
# Convert the name to use dashes instead of underscore
# as these are how the parameters are stored in the
# `arg_table`.
xformed = key.replace('_', '-')
if xformed in self.arg_table:
cli_argument = self.arg_table[xformed]
value = unpack_argument(
self._session,
'custom',
self.name,
cli_argument,
value
)
# If this parameter has a schema defined, then allow plugins
# a chance to process and override its value.
if self._should_allow_plugins_override(cli_argument, value):
override = self._session\
.emit_first_non_none_response(
'process-cli-arg.%s.%s' % ('custom', self.name),
cli_argument=cli_argument, value=value, operation=None)
if override is not None:
# A plugin supplied a conversion
value = override
else:
# Unpack the argument, which is a string, into the
# correct Python type (dict, list, etc)
value = unpack_cli_arg(cli_argument, value)
self._validate_value_against_schema(
cli_argument.argument_model, value)
setattr(parsed_args, key, value)
if hasattr(parsed_args, 'help'):
self._display_help(parsed_args, parsed_globals)
elif getattr(parsed_args, 'subcommand', None) is None:
# No subcommand was specified so call the main
# function for this top level command.
if remaining:
raise ValueError("Unknown options: %s" % ','.join(remaining))
return self._run_main(parsed_args, parsed_globals)
else:
return self.subcommand_table[parsed_args.subcommand](remaining,
parsed_globals)
def _validate_value_against_schema(self, model, value):
validate_parameters(value, model)
def _should_allow_plugins_override(self, param, value):
if (param and param.argument_model is not None and
value is not None):
return True
return False
def _run_main(self, parsed_args, parsed_globals):
# Subclasses should implement this method.
# parsed_globals are the parsed global args (things like region,
# profile, output, etc.)
# parsed_args are any arguments you've defined in your ARG_TABLE
# that are parsed. These will come through as whatever you've
# provided as the 'dest' key. Otherwise they default to the
# 'name' key. For example: ARG_TABLE[0] = {"name": "foo-arg", ...}
# can be accessed by ``parsed_args.foo_arg``.
raise NotImplementedError("_run_main")
def _build_subcommand_table(self):
subcommand_table = OrderedDict()
for subcommand in self.SUBCOMMANDS:
subcommand_name = subcommand['name']
subcommand_class = subcommand['command_class']
subcommand_table[subcommand_name] = subcommand_class(self._session)
self._session.emit('building-command-table.%s' % self.NAME,
command_table=subcommand_table,
session=self._session,
command_object=self)
self._add_lineage(subcommand_table)
return subcommand_table
def _display_help(self, parsed_args, parsed_globals):
help_command = self.create_help_command()
help_command(parsed_args, parsed_globals)
def create_help_command(self):
command_help_table = {}
if self.SUBCOMMANDS:
command_help_table = self.create_help_command_table()
return BasicHelp(self._session, self, command_table=command_help_table,
arg_table=self.arg_table)
def create_help_command_table(self):
"""
Create the command table into a form that can be handled by the
BasicDocHandler.
"""
commands = {}
for command in self.SUBCOMMANDS:
commands[command['name']] = command['command_class'](self._session)
self._add_lineage(commands)
return commands
def _build_arg_table(self):
arg_table = OrderedDict()
self._session.emit('building-arg-table.%s' % self.NAME,
arg_table=self.ARG_TABLE)
for arg_data in self.ARG_TABLE:
# If a custom schema was passed in, create the argument_model
# so that it can be validated and docs can be generated.
if 'schema' in arg_data:
argument_model = create_argument_model_from_schema(
arg_data.pop('schema'))
arg_data['argument_model'] = argument_model
custom_argument = CustomArgument(**arg_data)
arg_table[arg_data['name']] = custom_argument
return arg_table
def _add_lineage(self, command_table):
for command in command_table:
command_obj = command_table[command]
command_obj.lineage = self.lineage + [command_obj]
@property
def arg_table(self):
if self._arg_table is None:
self._arg_table = self._build_arg_table()
return self._arg_table
@property
def subcommand_table(self):
if self._subcommand_table is None:
self._subcommand_table = self._build_subcommand_table()
return self._subcommand_table
@classmethod
def add_command(cls, command_table, session, **kwargs):
command_table[cls.NAME] = cls(session)
@property
def name(self):
return self.NAME
@property
def lineage(self):
return self._lineage
@lineage.setter
def lineage(self, value):
self._lineage = value
class BasicHelp(HelpCommand):
def __init__(self, session, command_object, command_table, arg_table,
event_handler_class=None):
super(BasicHelp, self).__init__(session, command_object,
command_table, arg_table)
# This is defined in HelpCommand so we're matching the
# casing here.
if event_handler_class is None:
event_handler_class = BasicDocHandler
self.EventHandlerClass = event_handler_class
# These are public attributes that are mapped from the command
# object. These are used by the BasicDocHandler below.
self._description = command_object.DESCRIPTION
self._synopsis = command_object.SYNOPSIS
self._examples = command_object.EXAMPLES
@property
def name(self):
return self.obj.NAME
@property
def description(self):
return self._get_doc_contents('_description')
@property
def synopsis(self):
return self._get_doc_contents('_synopsis')
@property
def examples(self):
return self._get_doc_contents('_examples')
@property
def event_class(self):
return '.'.join(self.obj.lineage_names)
def _get_doc_contents(self, attr_name):
value = getattr(self, attr_name)
if isinstance(value, BasicCommand.FROM_FILE):
if value.filename is not None:
trailing_path = value.filename
else:
trailing_path = os.path.join(self.name, attr_name + '.rst')
root_module = value.root_module
doc_path = os.path.join(
os.path.abspath(os.path.dirname(root_module.__file__)),
'examples', trailing_path)
with _open(doc_path) as f:
return f.read()
else:
return value
def __call__(self, args, parsed_globals):
# Create an event handler for a Provider Document
instance = self.EventHandlerClass(self)
# Now generate all of the events for a Provider document.
# We pass ourselves along so that we can, in turn, get passed
# to all event handlers.
docevents.generate_events(self.session, self)
self.renderer.render(self.doc.getvalue())
instance.unregister()
class BasicDocHandler(OperationDocumentEventHandler):
def __init__(self, help_command):
super(BasicDocHandler, self).__init__(help_command)
self.doc = help_command.doc
def build_translation_map(self):
return {}
def doc_description(self, help_command, **kwargs):
self.doc.style.h2('Description')
self.doc.write(help_command.description)
self.doc.style.new_paragraph()
def doc_synopsis_start(self, help_command, **kwargs):
if not help_command.synopsis:
super(BasicDocHandler, self).doc_synopsis_start(
help_command=help_command, **kwargs)
else:
self.doc.style.h2('Synopsis')
self.doc.style.start_codeblock()
self.doc.writeln(help_command.synopsis)
def doc_synopsis_option(self, arg_name, help_command, **kwargs):
if not help_command.synopsis:
doc = help_command.doc
argument = help_command.arg_table[arg_name]
if argument.synopsis:
option_str = argument.synopsis
elif argument.group_name in self._arg_groups:
if argument.group_name in self._documented_arg_groups:
# This arg is already documented so we can move on.
return
option_str = ' | '.join(
[a.cli_name for a in
self._arg_groups[argument.group_name]])
self._documented_arg_groups.append(argument.group_name)
elif argument.cli_type_name == 'boolean':
option_str = '%s' % argument.cli_name
elif argument.nargs == '+':
option_str = "%s <value> [<value>...]" % argument.cli_name
else:
option_str = '%s <value>' % argument.cli_name
if not (argument.required or argument.positional_arg):
option_str = '[%s]' % option_str
doc.writeln('%s' % option_str)
else:
# A synopsis has been provided so we don't need to write
# anything here.
pass
def doc_synopsis_end(self, help_command, **kwargs):
if not help_command.synopsis:
super(BasicDocHandler, self).doc_synopsis_end(
help_command=help_command, **kwargs)
else:
self.doc.style.end_codeblock()
def doc_examples(self, help_command, **kwargs):
if help_command.examples:
self.doc.style.h2('Examples')
self.doc.write(help_command.examples)
def doc_subitems_start(self, help_command, **kwargs):
if help_command.command_table:
doc = help_command.doc
doc.style.h2('Available Commands')
doc.style.toctree()
def doc_subitem(self, command_name, help_command, **kwargs):
if help_command.command_table:
doc = help_command.doc
doc.style.tocitem(command_name)
def doc_subitems_end(self, help_command, **kwargs):
pass
def doc_output(self, help_command, event_name, **kwargs):
pass
|
|
import numpy as np
import itertools
# This file contains a dictionary that maps an integer n to the
# distribution of the Wilcoxon signed rank test statistic.
# The dictionary can be generated by the functions
# _generate_wilcoxon_exact_table and _generate_wilcoxon_exact_table_fast.
# The second function is about 20% faster.
def _generate_wilcoxon_exact_table(N):
"""
Generate counts of the Wilcoxon ranksum statistic r_plus (sum of
ranks of positive differences). For fixed n, simulate all possible states
{0, 1}**n and compute the sum of the ranks over the indices that are equal
to one (positive differences).
Return a dictionary that maps n=3,...N to the corresponding list of counts
"""
res_dict = {}
for n in range(1, N+1):
res = []
ranks = np.arange(n) + 1
M = n*(n + 1)/2
for x in itertools.product((0, 1), repeat=n):
# note that by symmetry, given a state x, we can directly compute
# the positive ranksum of the inverted state (i.e. ~x or 1 - x),
# therefore, it is enough to consider sequences starting with a one
if x[0] == 1:
rank_sum = np.sum(x * ranks)
res.append(rank_sum)
res.append(M - rank_sum)
_, cnt = np.unique(res, return_counts=True)
res_dict[n] = list(cnt)
return res_dict
def _generate_wilcoxon_exact_table_fast(N):
"""
Same functionality as _generate_wilcoxon_exact_table, but about 20% faster,
but harder to follow.
"""
res_dict = {}
for n in range(1, N+1):
ranks = np.arange(n) + 1
M = int(n*(n + 1)/2)
res = np.zeros(M + 1, dtype=int)
for x in itertools.product((0, 1), repeat=n):
if x[0] == 1:
rank_sum = int(np.sum(x * ranks))
res[rank_sum] += 1
# flip array to get counts of symmetric sequences starting with 0
res_dict[n] = list(res + np.flip(res))
return res_dict
COUNTS = {
1: [1, 1],
2: [1, 1, 1, 1],
3: [1, 1, 1, 2, 1, 1, 1],
4: [1, 1, 1, 2, 2, 2, 2, 2, 1, 1, 1],
5: [1, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 1],
6: [1, 1, 1, 2, 2, 3, 4, 4, 4, 5, 5, 5, 5, 4, 4, 4, 3, 2, 2, 1, 1, 1],
7: [1, 1, 1, 2, 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 8, 8, 8, 7, 7, 6, 5, 5, 4,
3, 2, 2, 1, 1, 1],
8: [1, 1, 1, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 13, 13, 14, 13,
13, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 2, 1, 1, 1],
9: [1, 1, 1, 2, 2, 3, 4, 5, 6, 8, 9, 10, 12, 13, 15, 17, 18, 19, 21, 21,
22, 23, 23, 23, 23, 22, 21, 21, 19, 18, 17, 15, 13, 12, 10, 9, 8, 6,
5, 4, 3, 2, 2, 1, 1, 1],
10: [1, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 11, 13, 15, 17, 20, 22, 24, 27, 29,
31, 33, 35, 36, 38, 39, 39, 40, 40, 39, 39, 38, 36, 35, 33, 31, 29,
27, 24, 22, 20, 17, 15, 13, 11, 10, 8, 6, 5, 4, 3, 2, 2, 1, 1, 1],
11: [1, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 12, 14, 16, 19, 22, 25, 28, 32, 35,
39, 43, 46, 49, 53, 56, 59, 62, 64, 66, 68, 69, 69, 70, 69, 69, 68,
66, 64, 62, 59, 56, 53, 49, 46, 43, 39, 35, 32, 28, 25, 22, 19, 16,
14, 12, 10, 8, 6, 5, 4, 3, 2, 2, 1, 1, 1],
12: [1, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 12, 15, 17, 20, 24, 27, 31, 36, 40,
45, 51, 56, 61, 67, 72, 78, 84, 89, 94, 100, 104, 108, 113, 115, 118,
121, 122, 123, 124, 123, 122, 121, 118, 115, 113, 108, 104, 100, 94,
89, 84, 78, 72, 67, 61, 56, 51, 45, 40, 36, 31, 27, 24, 20, 17, 15,
12, 10, 8, 6, 5, 4, 3, 2, 2, 1, 1, 1],
13: [1, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 12, 15, 18, 21, 25, 29, 33, 39, 44,
50, 57, 64, 71, 79, 87, 95, 104, 113, 121, 131, 140, 148, 158, 166,
174, 182, 189, 195, 202, 207, 211, 215, 218, 219, 221, 221, 219, 218,
215, 211, 207, 202, 195, 189, 182, 174, 166, 158, 148, 140, 131, 121,
113, 104, 95, 87, 79, 71, 64, 57, 50, 44, 39, 33, 29, 25, 21, 18, 15,
12, 10, 8, 6, 5, 4, 3, 2, 2, 1, 1, 1],
14: [1, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 12, 15, 18, 22, 26, 30, 35, 41, 47,
54, 62, 70, 79, 89, 99, 110, 122, 134, 146, 160, 173, 187, 202, 216,
231, 246, 260, 274, 289, 302, 315, 328, 339, 350, 361, 369, 377, 384,
389, 393, 396, 397, 397, 396, 393, 389, 384, 377, 369, 361, 350, 339,
328, 315, 302, 289, 274, 260, 246, 231, 216, 202, 187, 173, 160, 146,
134, 122, 110, 99, 89, 79, 70, 62, 54, 47, 41, 35, 30, 26, 22, 18,
15, 12, 10, 8, 6, 5, 4, 3, 2, 2, 1, 1, 1],
15: [1, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 12, 15, 18, 22, 27, 31, 36, 43, 49,
57, 66, 75, 85, 97, 109, 122, 137, 152, 168, 186, 203, 222, 243, 263,
285, 308, 330, 353, 378, 401, 425, 450, 473, 496, 521, 542, 564, 586,
605, 624, 642, 657, 671, 685, 695, 704, 712, 716, 719, 722, 719, 716,
712, 704, 695, 685, 671, 657, 642, 624, 605, 586, 564, 542, 521, 496,
473, 450, 425, 401, 378, 353, 330, 308, 285, 263, 243, 222, 203, 186,
168, 152, 137, 122, 109, 97, 85, 75, 66, 57, 49, 43, 36, 31, 27, 22,
18, 15, 12, 10, 8, 6, 5, 4, 3, 2, 2, 1, 1, 1],
16: [1, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 12, 15, 18, 22, 27, 32, 37, 44, 51,
59, 69, 79, 90, 103, 117, 132, 149, 167, 186, 208, 230, 253, 279,
306, 334, 365, 396, 428, 463, 498, 534, 572, 610, 648, 689, 728, 767,
808, 848, 887, 927, 965, 1001, 1038, 1073, 1105, 1137, 1166, 1192,
1218, 1240, 1258, 1276, 1290, 1300, 1309, 1313, 1314, 1313, 1309,
1300, 1290, 1276, 1258, 1240, 1218, 1192, 1166, 1137, 1105, 1073,
1038, 1001, 965, 927, 887, 848, 808, 767, 728, 689, 648, 610, 572,
534, 498, 463, 428, 396, 365, 334, 306, 279, 253, 230, 208, 186, 167,
149, 132, 117, 103, 90, 79, 69, 59, 51, 44, 37, 32, 27, 22, 18, 15,
12, 10, 8, 6, 5, 4, 3, 2, 2, 1, 1, 1],
17: [1, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 12, 15, 18, 22, 27, 32, 38, 45, 52,
61, 71, 82, 94, 108, 123, 140, 159, 179, 201, 226, 252, 280, 311,
343, 378, 416, 455, 497, 542, 588, 637, 689, 742, 797, 856, 914, 975,
1038, 1101, 1166, 1233, 1299, 1366, 1434, 1501, 1568, 1635, 1700,
1764, 1828, 1888, 1947, 2004, 2057, 2108, 2157, 2200, 2241, 2278,
2310, 2338, 2363, 2381, 2395, 2406, 2410, 2410, 2406, 2395, 2381,
2363, 2338, 2310, 2278, 2241, 2200, 2157, 2108, 2057, 2004, 1947,
1888, 1828, 1764, 1700, 1635, 1568, 1501, 1434, 1366, 1299, 1233,
1166, 1101, 1038, 975, 914, 856, 797, 742, 689, 637, 588, 542, 497,
455, 416, 378, 343, 311, 280, 252, 226, 201, 179, 159, 140, 123, 108,
94, 82, 71, 61, 52, 45, 38, 32, 27, 22, 18, 15, 12, 10, 8, 6, 5, 4,
3, 2, 2, 1, 1, 1],
18: [1, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 12, 15, 18, 22, 27, 32, 38, 46, 53,
62, 73, 84, 97, 112, 128, 146, 167, 189, 213, 241, 270, 302, 338,
375, 416, 461, 507, 558, 613, 670, 731, 797, 865, 937, 1015, 1093,
1176, 1264, 1353, 1446, 1544, 1642, 1744, 1850, 1956, 2065, 2177,
2288, 2401, 2517, 2630, 2744, 2860, 2971, 3083, 3195, 3301, 3407,
3511, 3609, 3704, 3797, 3882, 3963, 4041, 4110, 4174, 4234, 4283,
4328, 4367, 4395, 4418, 4435, 4441, 4441, 4435, 4418, 4395, 4367,
4328, 4283, 4234, 4174, 4110, 4041, 3963, 3882, 3797, 3704, 3609,
3511, 3407, 3301, 3195, 3083, 2971, 2860, 2744, 2630, 2517, 2401,
2288, 2177, 2065, 1956, 1850, 1744, 1642, 1544, 1446, 1353, 1264,
1176, 1093, 1015, 937, 865, 797, 731, 670, 613, 558, 507, 461, 416,
375, 338, 302, 270, 241, 213, 189, 167, 146, 128, 112, 97, 84, 73,
62, 53, 46, 38, 32, 27, 22, 18, 15, 12, 10, 8, 6, 5, 4, 3, 2, 2, 1,
1, 1],
19: [1, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 12, 15, 18, 22, 27, 32, 38, 46, 54,
63, 74, 86, 99, 115, 132, 151, 173, 197, 223, 253, 285, 320, 360,
402, 448, 499, 553, 611, 675, 743, 815, 894, 977, 1065, 1161, 1260,
1365, 1477, 1594, 1716, 1846, 1980, 2119, 2266, 2417, 2572, 2735,
2901, 3071, 3248, 3427, 3609, 3797, 3986, 4176, 4371, 4565, 4760,
4957, 5153, 5346, 5541, 5732, 5919, 6106, 6287, 6462, 6635, 6800,
6958, 7111, 7255, 7389, 7518, 7636, 7742, 7842, 7929, 8004, 8071,
8125, 8165, 8197, 8215, 8220, 8215, 8197, 8165, 8125, 8071, 8004,
7929, 7842, 7742, 7636, 7518, 7389, 7255, 7111, 6958, 6800, 6635,
6462, 6287, 6106, 5919, 5732, 5541, 5346, 5153, 4957, 4760, 4565,
4371, 4176, 3986, 3797, 3609, 3427, 3248, 3071, 2901, 2735, 2572,
2417, 2266, 2119, 1980, 1846, 1716, 1594, 1477, 1365, 1260, 1161,
1065, 977, 894, 815, 743, 675, 611, 553, 499, 448, 402, 360, 320, 285,
253, 223, 197, 173, 151, 132, 115, 99, 86, 74, 63, 54, 46, 38, 32, 27,
22, 18, 15, 12, 10, 8, 6, 5, 4, 3, 2, 2, 1, 1, 1],
20: [1, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 12, 15, 18, 22, 27, 32, 38, 46, 54,
64, 75, 87, 101, 117, 135, 155, 178, 203, 231, 263, 297, 335, 378,
424, 475, 531, 591, 657, 729, 806, 889, 980, 1076, 1180, 1293, 1411,
1538, 1674, 1817, 1969, 2131, 2300, 2479, 2668, 2865, 3071, 3288,
3512, 3746, 3991, 4242, 4503, 4774, 5051, 5337, 5631, 5930, 6237,
6551, 6869, 7192, 7521, 7851, 8185, 8523, 8859, 9197, 9536, 9871,
10206, 10538, 10864, 11186, 11504, 11812, 12113, 12407, 12689, 12961,
13224, 13471, 13706, 13929, 14134, 14326, 14502, 14659, 14800, 14925,
15029, 15115, 15184, 15231, 15260, 15272, 15260, 15231, 15184, 15115,
15029, 14925, 14800, 14659, 14502, 14326, 14134, 13929, 13706, 13471,
13224, 12961, 12689, 12407, 12113, 11812, 11504, 11186, 10864, 10538,
10206, 9871, 9536, 9197, 8859, 8523, 8185, 7851, 7521, 7192, 6869,
6551, 6237, 5930, 5631, 5337, 5051, 4774, 4503, 4242, 3991, 3746,
3512, 3288, 3071, 2865, 2668, 2479, 2300, 2131, 1969, 1817, 1674,
1538, 1411, 1293, 1180, 1076, 980, 889, 806, 729, 657, 591, 531, 475,
424, 378, 335, 297, 263, 231, 203, 178, 155, 135, 117, 101, 87, 75,
64, 54, 46, 38, 32, 27, 22, 18, 15, 12, 10, 8, 6, 5, 4, 3, 2, 2, 1, 1,
1],
21: [1, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 12, 15, 18, 22, 27, 32, 38, 46, 54,
64, 76, 88, 102, 119, 137, 158, 182, 208, 237, 271, 307, 347, 393,
442, 497, 558, 623, 695, 775, 860, 953, 1055, 1163, 1281, 1410, 1546,
1693, 1852, 2020, 2200, 2394, 2597, 2814, 3046, 3289, 3546, 3819,
4103, 4403, 4720, 5048, 5392, 5754, 6127, 6517, 6924, 7341, 7775,
8225, 8686, 9161, 9652, 10151, 10664, 11191, 11724, 12268, 12824,
13383, 13952, 14529, 15106, 15689, 16278, 16863, 17450, 18038, 18619,
19198, 19775, 20340, 20898, 21450, 21985, 22511, 23025, 23518, 23997,
24461, 24900, 25321, 25722, 26095, 26446, 26776, 27072, 27344, 27591,
27804, 27990, 28149, 28271, 28365, 28431, 28460, 28460, 28431, 28365,
28271, 28149, 27990, 27804, 27591, 27344, 27072, 26776, 26446, 26095,
25722, 25321, 24900, 24461, 23997, 23518, 23025, 22511, 21985, 21450,
20898, 20340, 19775, 19198, 18619, 18038, 17450, 16863, 16278, 15689,
15106, 14529, 13952, 13383, 12824, 12268, 11724, 11191, 10664, 10151,
9652, 9161, 8686, 8225, 7775, 7341, 6924, 6517, 6127, 5754, 5392,
5048, 4720, 4403, 4103, 3819, 3546, 3289, 3046, 2814, 2597, 2394,
2200, 2020, 1852, 1693, 1546, 1410, 1281, 1163, 1055, 953, 860, 775,
695, 623, 558, 497, 442, 393, 347, 307, 271, 237, 208, 182, 158, 137,
119, 102, 88, 76, 64, 54, 46, 38, 32, 27, 22, 18, 15, 12, 10, 8, 6,
5, 4, 3, 2, 2, 1, 1, 1],
22: [1, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 12, 15, 18, 22, 27, 32, 38, 46, 54,
64, 76, 89, 103, 120, 139, 160, 185, 212, 242, 277, 315, 357, 405,
457, 515, 580, 650, 727, 813, 906, 1007, 1119, 1239, 1369, 1512, 1665,
1830, 2010, 2202, 2408, 2631, 2868, 3121, 3393, 3682, 3988, 4316,
4661, 5026, 5415, 5823, 6252, 6707, 7182, 7680, 8205, 8751, 9321,
9918, 10538, 11181, 11852, 12545, 13261, 14005, 14770, 15557, 16370,
17202, 18055, 18932, 19826, 20737, 21670, 22617, 23577, 24555, 25543,
26539, 27550, 28565, 29584, 30611, 31637, 32662, 33689, 34709, 35721,
36729, 37724, 38704, 39674, 40624, 41552, 42465, 43350, 44207, 45041,
45842, 46609, 47347, 48046, 48705, 49329, 49910, 50445, 50942, 51390,
51789, 52146, 52451, 52704, 52912, 53066, 53167, 53222, 53222, 53167,
53066, 52912, 52704, 52451, 52146, 51789, 51390, 50942, 50445, 49910,
49329, 48705, 48046, 47347, 46609, 45842, 45041, 44207, 43350, 42465,
41552, 40624, 39674, 38704, 37724, 36729, 35721, 34709, 33689, 32662,
31637, 30611, 29584, 28565, 27550, 26539, 25543, 24555, 23577, 22617,
21670, 20737, 19826, 18932, 18055, 17202, 16370, 15557, 14770, 14005,
13261, 12545, 11852, 11181, 10538, 9918, 9321, 8751, 8205, 7680, 7182,
6707, 6252, 5823, 5415, 5026, 4661, 4316, 3988, 3682, 3393, 3121,
2868, 2631, 2408, 2202, 2010, 1830, 1665, 1512, 1369, 1239, 1119,
1007, 906, 813, 727, 650, 580, 515, 457, 405, 357, 315, 277, 242, 212,
185, 160, 139, 120, 103, 89, 76, 64, 54, 46, 38, 32, 27, 22, 18, 15,
12, 10, 8, 6, 5, 4, 3, 2, 2, 1, 1, 1],
23: [1, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 12, 15, 18, 22, 27, 32, 38, 46, 54,
64, 76, 89, 104, 121, 140, 162, 187, 215, 246, 282, 321, 365, 415,
469, 530, 598, 672, 754, 845, 944, 1053, 1173, 1303, 1445, 1601,
1768, 1950, 2149, 2362, 2593, 2843, 3110, 3398, 3708, 4039, 4393,
4773, 5176, 5606, 6065, 6550, 7065, 7613, 8189, 8799, 9444, 10120,
10833, 11583, 12368, 13191, 14054, 14953, 15892, 16873, 17891, 18950,
20052, 21190, 22371, 23593, 24852, 26152, 27493, 28869, 30284, 31737,
33223, 34744, 36301, 37886, 39502, 41149, 42818, 44514, 46234, 47970,
49726, 51499, 53281, 55074, 56876, 58679, 60484, 62291, 64087, 65877,
67658, 69419, 71164, 72890, 74585, 76255, 77894, 79494, 81056, 82579,
84052, 85478, 86855, 88172, 89433, 90636, 91770, 92841, 93846, 94774,
95632, 96416, 97119, 97745, 98293, 98755, 99136, 99436, 99647, 99774,
99820, 99774, 99647, 99436, 99136, 98755, 98293, 97745, 97119, 96416,
95632, 94774, 93846, 92841, 91770, 90636, 89433, 88172, 86855, 85478,
84052, 82579, 81056, 79494, 77894, 76255, 74585, 72890, 71164, 69419,
67658, 65877, 64087, 62291, 60484, 58679, 56876, 55074, 53281, 51499,
49726, 47970, 46234, 44514, 42818, 41149, 39502, 37886, 36301, 34744,
33223, 31737, 30284, 28869, 27493, 26152, 24852, 23593, 22371, 21190,
20052, 18950, 17891, 16873, 15892, 14953, 14054, 13191, 12368, 11583,
10833, 10120, 9444, 8799, 8189, 7613, 7065, 6550, 6065, 5606, 5176,
4773, 4393, 4039, 3708, 3398, 3110, 2843, 2593, 2362, 2149, 1950,
1768, 1601, 1445, 1303, 1173, 1053, 944, 845, 754, 672, 598, 530, 469,
415, 365, 321, 282, 246, 215, 187, 162, 140, 121, 104, 89, 76, 64, 54,
46, 38, 32, 27, 22, 18, 15, 12, 10, 8, 6, 5, 4, 3, 2, 2, 1, 1, 1],
24: [1, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 12, 15, 18, 22, 27, 32, 38, 46, 54,
64, 76, 89, 104, 122, 141, 163, 189, 217, 249, 286, 326, 371, 423,
479, 542, 613, 690, 776, 872, 976, 1091, 1219, 1357, 1509, 1677, 1857,
2054, 2270, 2502, 2755, 3030, 3325, 3644, 3990, 4360, 4758, 5188,
5645, 6136, 6663, 7222, 7819, 8458, 9133, 9852, 10617, 11423, 12278,
13184, 14136, 15141, 16203, 17315, 18485, 19716, 21001, 22348, 23760,
25229, 26764, 28366, 30028, 31758, 33558, 35419, 37349, 39350, 41412,
43543, 45745, 48006, 50335, 52732, 55186, 57705, 60288, 62923, 65618,
68372, 71172, 74024, 76928, 79869, 82855, 85884, 88939, 92029, 95151,
98288, 101448, 104627, 107808, 110999, 114195, 117380, 120558, 123728,
126870, 129992, 133089, 136142, 139159, 142135, 145051, 147915,
150722, 153453, 156116, 158707, 161206, 163622, 165951, 168174,
170300, 172326, 174232, 176029, 177714, 179268, 180703, 182015,
183188, 184233, 185148, 185917, 186552, 187052, 187402, 187615,
187692, 187615, 187402, 187052, 186552, 185917, 185148, 184233,
183188, 182015, 180703, 179268, 177714, 176029, 174232, 172326,
170300, 168174, 165951, 163622, 161206, 158707, 156116, 153453,
150722, 147915, 145051, 142135, 139159, 136142, 133089, 129992,
126870, 123728, 120558, 117380, 114195, 110999, 107808, 104627,
101448, 98288, 95151, 92029, 88939, 85884, 82855, 79869, 76928,
74024, 71172, 68372, 65618, 62923, 60288, 57705, 55186, 52732, 50335,
48006, 45745, 43543, 41412, 39350, 37349, 35419, 33558, 31758, 30028,
28366, 26764, 25229, 23760, 22348, 21001, 19716, 18485, 17315, 16203,
15141, 14136, 13184, 12278, 11423, 10617, 9852, 9133, 8458, 7819,
7222, 6663, 6136, 5645, 5188, 4758, 4360, 3990, 3644, 3325, 3030,
2755, 2502, 2270, 2054, 1857, 1677, 1509, 1357, 1219, 1091, 976, 872,
776, 690, 613, 542, 479, 423, 371, 326, 286, 249, 217, 189, 163, 141,
122, 104, 89, 76, 64, 54, 46, 38, 32, 27, 22, 18, 15, 12, 10, 8, 6,
5, 4, 3, 2, 2, 1, 1, 1],
25: [1, 1, 1, 2, 2, 3, 4, 5, 6, 8, 10, 12, 15, 18, 22, 27, 32, 38, 46, 54,
64, 76, 89, 104, 122, 142, 164, 190, 219, 251, 289, 330, 376, 429,
487, 552, 625, 705, 794, 894, 1003, 1123, 1257, 1403, 1563, 1741,
1933, 2143, 2374, 2624, 2896, 3193, 3514, 3861, 4239, 4646, 5084,
5559, 6068, 6615, 7205, 7835, 8509, 9234, 10005, 10828, 11708, 12642,
13635, 14693, 15813, 16998, 18257, 19585, 20987, 22471, 24031, 25673,
27404, 29219, 31124, 33124, 35216, 37403, 39694, 42082, 44571, 47169,
49870, 52676, 55597, 58623, 61758, 65010, 68370, 71841, 75429, 79126,
82933, 86857, 90888, 95025, 99276, 103629, 108084, 112648, 117305,
122057, 126909, 131846, 136867, 141976, 147158, 152411, 157738,
163125, 168564, 174063, 179602, 185178, 190794, 196430, 202082,
207753, 213423, 219087, 224746, 230381, 235985, 241562, 247090,
252561, 257980, 263325, 268588, 273774, 278859, 283837, 288713,
293463, 298083, 302573, 306916, 311103, 315140, 319006, 322694,
326211, 329537, 332666, 335607, 338337, 340855, 343168, 345259,
347123, 348770, 350184, 351362, 352315, 353029, 353500, 353743,
353743, 353500, 353029, 352315, 351362, 350184, 348770, 347123,
345259, 343168, 340855, 338337, 335607, 332666, 329537, 326211,
322694, 319006, 315140, 311103, 306916, 302573, 298083, 293463,
288713, 283837, 278859, 273774, 268588, 263325, 257980, 252561,
247090, 241562, 235985, 230381, 224746, 219087, 213423, 207753,
202082, 196430, 190794, 185178, 179602, 174063, 168564, 163125,
157738, 152411, 147158, 141976, 136867, 131846, 126909, 122057,
117305, 112648, 108084, 103629, 99276, 95025, 90888, 86857, 82933,
79126, 75429, 71841, 68370, 65010, 61758, 58623, 55597, 52676, 49870,
47169, 44571, 42082, 39694, 37403, 35216, 33124, 31124, 29219, 27404,
25673, 24031, 22471, 20987, 19585, 18257, 16998, 15813, 14693, 13635,
12642, 11708, 10828, 10005, 9234, 8509, 7835, 7205, 6615, 6068, 5559,
5084, 4646, 4239, 3861, 3514, 3193, 2896, 2624, 2374, 2143, 1933,
1741, 1563, 1403, 1257, 1123, 1003, 894, 794, 705, 625, 552, 487,
429, 376, 330, 289, 251, 219, 190, 164, 142, 122, 104, 89, 76, 64,
54, 46, 38, 32, 27, 22, 18, 15, 12, 10, 8, 6, 5, 4, 3, 2, 2, 1, 1, 1]
}
|
|
import ntplib
import cPickle as pickle
from threading import Lock
from mi.core.driver_scheduler import DriverSchedulerConfigKey, TriggerType
from mi.core.exceptions import InstrumentProtocolException, InstrumentParameterException
from mi.core.instrument.data_particle import DataParticle, DataParticleKey
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.port_agent_client import PortAgentPacket
from mi.core.instrument.protocol_param_dict import ParameterDictVisibility, ParameterDictType
from mi.core.log import get_logger
from mi.instrument.antelope.orb.ooicore.packet_log import PacketLog, GapException
log = get_logger()
from mi.core.common import BaseEnum, Units
from mi.core.persistent_store import PersistentStoreDict
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver, DriverConfigKey
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.core.instrument.instrument_protocol import InstrumentProtocol
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
ORBOLDEST = -13
class ProtocolState(BaseEnum):
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
STOPPING = 'DRIVER_STATE_STOPPING'
WRITE_ERROR = 'DRIVER_STATE_WRITE_ERROR'
class ProtocolEvent(BaseEnum):
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
DISCOVER = DriverEvent.DISCOVER
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
GET = DriverEvent.GET
SET = DriverEvent.SET
FLUSH = 'PROTOCOL_EVENT_FLUSH'
CLEAR_WRITE_ERROR = 'PROTOCOL_EVENT_CLEAR_WRITE_ERROR'
class Capability(BaseEnum):
DISCOVER = DriverEvent.DISCOVER
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
GET = DriverEvent.GET
SET = DriverEvent.SET
CLEAR_WRITE_ERROR = ProtocolEvent.CLEAR_WRITE_ERROR
class Parameter(BaseEnum):
REFDES = 'refdes'
SOURCE_REGEX = 'source_regex'
START_PKTID = 'start_pktid'
FLUSH_INTERVAL = 'flush_interval'
DB_ADDR = 'database_address'
DB_PORT = 'database_port'
FILE_LOCATION = 'file_location'
class ScheduledJob(BaseEnum):
FLUSH = 'flush'
class AntelopeDataParticles(BaseEnum):
METADATA = 'antelope_metadata'
class AntelopeMetadataParticleKey(BaseEnum):
NET = 'antelope_network'
STATION = 'antelope_station'
LOCATION = 'antelope_location'
CHANNEL = 'antelope_channel'
START = 'antelope_starttime'
END = 'antelope_endtime'
RATE = 'antelope_sampling_rate'
NSAMPS = 'antelope_num_samples'
FILENAME = 'filepath'
UUID = 'uuid'
class AntelopeMetadataParticle(DataParticle):
_data_particle_type = AntelopeDataParticles.METADATA
def __init__(self, raw_data, **kwargs):
super(AntelopeMetadataParticle, self).__init__(raw_data, **kwargs)
self.set_internal_timestamp(unix_time=raw_data.header.starttime)
def _build_parsed_values(self):
header = self.raw_data.header
pk = AntelopeMetadataParticleKey
return [
self._encode_value(pk.NET, header.net, str),
self._encode_value(pk.STATION, header.station, str),
self._encode_value(pk.LOCATION, header.location, str),
self._encode_value(pk.CHANNEL, header.channel, str),
self._encode_value(pk.START, ntplib.system_to_ntp_time(header.starttime), float),
self._encode_value(pk.END, ntplib.system_to_ntp_time(header.endtime), float),
self._encode_value(pk.RATE, header.rate, int),
self._encode_value(pk.NSAMPS, header.num_samples, int),
self._encode_value(pk.FILENAME, self.raw_data.relname, str),
self._encode_value(pk.UUID, self.raw_data.bin_uuid, str),
]
class InstrumentDriver(SingleConnectionInstrumentDriver):
"""
Generic antelope instrument driver
"""
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(self._driver_event)
# noinspection PyMethodMayBeStatic,PyUnusedLocal
class Protocol(InstrumentProtocol):
def __init__(self, driver_event):
super(Protocol, self).__init__(driver_event)
self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent,
ProtocolEvent.ENTER, ProtocolEvent.EXIT)
handlers = {
ProtocolState.UNKNOWN: (
(ProtocolEvent.ENTER, self._handler_unknown_enter),
(ProtocolEvent.EXIT, self._handler_unknown_exit),
(ProtocolEvent.DISCOVER, self._handler_unknown_discover),
),
ProtocolState.COMMAND: (
(ProtocolEvent.ENTER, self._handler_command_enter),
(ProtocolEvent.EXIT, self._handler_command_exit),
(ProtocolEvent.GET, self._handler_get),
(ProtocolEvent.SET, self._handler_set),
(ProtocolEvent.START_AUTOSAMPLE, self._handler_command_start_autosample),
),
ProtocolState.AUTOSAMPLE: (
(ProtocolEvent.ENTER, self._handler_autosample_enter),
(ProtocolEvent.EXIT, self._handler_autosample_exit),
(ProtocolEvent.GET, self._handler_get),
(ProtocolEvent.FLUSH, self._flush),
(ProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop_autosample),
),
ProtocolState.STOPPING: (
(ProtocolEvent.ENTER, self._handler_stopping_enter),
(ProtocolEvent.EXIT, self._handler_stopping_exit),
(ProtocolEvent.FLUSH, self._flush),
),
ProtocolState.WRITE_ERROR: (
(ProtocolEvent.ENTER, self._handler_write_error_enter),
(ProtocolEvent.EXIT, self._handler_write_error_exit),
(ProtocolEvent.CLEAR_WRITE_ERROR, self._handler_clear_write_error),
)}
for state in handlers:
for event, handler in handlers[state]:
self._protocol_fsm.add_handler(state, event, handler)
# Build dictionaries for driver schema
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
# State state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
self._logs = {}
self._filled_logs = []
self._pickle_cache = []
# persistent store, cannot initialize until startup config has been applied
# since we need the address for postgres
self._persistent_store = None
# lock for flush actions to prevent writing or altering the data files
# during flush
self._lock = Lock()
self._pktid = 0
def _filter_capabilities(self, events):
"""
Filter a list of events to only include valid capabilities
@param events: list of events to be filtered
@return: list of filtered events
"""
return [x for x in events if Capability.has(x)]
def _build_command_dict(self):
"""
Populate the command dictionary with commands.
"""
self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="Start Autosample")
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="Stop Autosample")
self._cmd_dict.add(Capability.GET, display_name="Get")
self._cmd_dict.add(Capability.SET, display_name="Set")
self._cmd_dict.add(Capability.DISCOVER, display_name="Discover")
self._cmd_dict.add(Capability.CLEAR_WRITE_ERROR, display_name="Clear Write Error")
def _build_param_dict(self):
self._param_dict.add(Parameter.REFDES,
'NA',
str,
str,
visibility=ParameterDictVisibility.IMMUTABLE,
startup_param=True,
display_name='Reference Designator',
description='Reference Designator for this driver',
type=ParameterDictType.STRING)
self._param_dict.add(Parameter.SOURCE_REGEX,
'NA',
str,
str,
visibility=ParameterDictVisibility.IMMUTABLE,
startup_param=True,
display_name='Source Filter Regex',
description='Filter sources to be processed from the ORB',
type=ParameterDictType.STRING,
value_description='Regular expression')
self._param_dict.add(Parameter.FLUSH_INTERVAL,
'NA',
str,
str,
visibility=ParameterDictVisibility.IMMUTABLE,
startup_param=True,
display_name='Flush Interval',
description='Interval after which all records are flushed to disk',
type=ParameterDictType.INT,
value_description='Interval, in seconds',
units=Units.SECOND)
self._param_dict.add(Parameter.DB_ADDR,
'NA',
str,
str,
visibility=ParameterDictVisibility.IMMUTABLE,
startup_param=True,
default_value='localhost',
display_name='Database Address',
description='Postgres database IP address or hostname',
type=ParameterDictType.STRING,
value_description='IP address or hostname')
self._param_dict.add(Parameter.DB_PORT,
'NA',
str,
str,
visibility=ParameterDictVisibility.IMMUTABLE,
startup_param=True,
default_value=5432,
display_name='Database Port',
description='Postgres database port number',
type=ParameterDictType.INT,
value_description='Integer port number (default 5432)')
self._param_dict.add(Parameter.FILE_LOCATION,
'NA',
str,
str,
visibility=ParameterDictVisibility.IMMUTABLE,
startup_param=True,
default_value="./antelope_data",
display_name='File Location',
description='Root file path of the packet data files',
type=ParameterDictType.STRING,
value_description='String representing the packet data root file path')
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, False)
def _build_persistent_dict(self):
name = 'antelope'
refdes = self._param_dict.get(Parameter.REFDES)
host = self._param_dict.get(Parameter.DB_ADDR)
port = self._param_dict.get(Parameter.DB_PORT)
self._persistent_store = PersistentStoreDict(name, refdes, host=host, port=port)
if 'pktid' not in self._persistent_store:
self._persistent_store['pktid'] = ORBOLDEST
def _handler_set(self, *args, **kwargs):
pass
def _update_params(self, *args, **kwargs):
pass
def _set_params(self, *args, **kwargs):
"""
Set various parameters
@param args: arglist, should contain a dictionary of parameters/values to be set
"""
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('Set command requires a parameter dict.')
self._verify_not_readonly(*args, **kwargs)
old_config = self._param_dict.get_config()
# all constraints met or no constraints exist, set the values
for key, value in params.iteritems():
self._param_dict.set_value(key, value)
new_config = self._param_dict.get_config()
if not old_config == new_config:
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
# Set the base directory for the packet data file location.
PacketLog.base_dir = self._param_dict.get(Parameter.FILE_LOCATION)
def _flush(self):
log.info('flush')
particles = []
with self._lock:
log.info('got lock')
# On the last flush, close all the bins.
last_flush = self.get_current_state() == ProtocolState.STOPPING
if last_flush:
self._filled_logs.extend(self._logs.values())
self._logs = {}
for _log in self._logs.itervalues():
try:
_log.flush()
except InstrumentProtocolException as ex:
# Ensure the current logs are clear to prevent residual data from being flushed.
self._driver_event(DriverAsyncEvent.ERROR, ex)
self._logs = {}
self._filled_logs = []
return ProtocolState.WRITE_ERROR, (ProtocolState.WRITE_ERROR, None)
particles.append(AntelopeMetadataParticle(_log, preferred_timestamp=DataParticleKey.INTERNAL_TIMESTAMP))
for _log in self._filled_logs:
try:
_log.flush()
except InstrumentProtocolException as ex:
# Ensure the current logs are clear to prevent residual data from being flushed.
self._driver_event(DriverAsyncEvent.ERROR, ex)
self._logs = {}
self._filled_logs = []
return ProtocolState.WRITE_ERROR, (ProtocolState.WRITE_ERROR, None)
particles.append(AntelopeMetadataParticle(_log, preferred_timestamp=DataParticleKey.INTERNAL_TIMESTAMP))
_log.data = []
self._filled_logs = []
log.info('updating persistent store')
self._persistent_store['pktid'] = self._pktid
for particle in particles:
self._driver_event(DriverAsyncEvent.SAMPLE, particle.generate())
if last_flush:
self.stop_scheduled_job(ScheduledJob.FLUSH)
return ProtocolState.COMMAND, (ProtocolState.COMMAND, None)
return None, (None, None)
# noinspection PyProtectedMember
def _orbstart(self):
self._connection._command_port_agent('orbselect %s' % self._param_dict.get(Parameter.SOURCE_REGEX))
self._connection._command_port_agent('orbseek %s' % self._persistent_store['pktid'])
self._connection._command_port_agent('orbstart')
# noinspection PyProtectedMember
def _orbstop(self):
self._connection._command_port_agent('orbstop')
def stop_scheduled_job(self, schedule_job):
"""
Remove the scheduled job
@param schedule_job scheduling job.
"""
if self._scheduler is not None:
try:
self._remove_scheduler(schedule_job)
except KeyError:
log.warn("_remove_scheduler could not find %s", schedule_job)
def start_scheduled_job(self, param, schedule_job, protocol_event):
"""
Add a scheduled job
"""
self.stop_scheduled_job(schedule_job)
val = self._param_dict.get(param)
try:
seconds = int(val)
except ValueError:
raise InstrumentParameterException('Bad interval. Cannot parse %r as integer' % val)
if seconds > 0:
config = {
DriverConfigKey.SCHEDULER: {
schedule_job: {
DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.INTERVAL,
DriverSchedulerConfigKey.SECONDS: seconds
}
}
}
}
self.set_init_params(config)
self._add_scheduler_event(schedule_job, protocol_event)
def got_data(self, port_agent_packet):
data_length = port_agent_packet.get_data_length()
data_type = port_agent_packet.get_header_type()
if data_type == PortAgentPacket.PICKLED_FROM_INSTRUMENT:
self._pickle_cache.append(port_agent_packet.get_data())
# this is the max size (65535) minus the header size (16)
# any packet of this length will be followed by one or more packets
# with additional data. Keep accumulating packets until we have
# the complete data, then unpickle.
if data_length != 65519:
data = pickle.loads(''.join(self._pickle_cache))
self._pickle_cache = []
self._bin_data(data)
else:
raise InstrumentProtocolException('Received unpickled data from port agent')
def got_raw(self, port_agent_packet):
pass
def _get_bin(self, packet):
rate_map = {
1: 86400, # 1 day
8: 86400, # 1 day
40: 86400, # 1 day
200: 86400, # 1 day
64000: 60 * 5, # 5 minutes
256000: 60, # 1 minute
}
start_time = packet['time']
rate = packet['samprate']
bin_size = rate_map.get(rate, 60)
bin_value = int(start_time/bin_size)
bin_start = bin_value * bin_size
bin_end = (bin_value + 1) * bin_size
return bin_start, bin_end
def _bin_data(self, packet):
key = '%s.%s.%s.%s' % (packet['net'], packet.get('location', ''),
packet.get('sta', ''), packet['chan'])
start, end = self._get_bin(packet)
with self._lock:
self._pktid = packet['pktid']
if key not in self._logs:
self._logs[key] = PacketLog.from_packet(packet, end, self._param_dict.get(Parameter.REFDES))
try:
while True:
packet = self._logs[key].add_packet(packet)
if packet is None:
break
# residual, we need a new bin
# log is complete, move to holding list until next flush
self._filled_logs.append(self._logs[key])
del self._logs[key]
# create the new log...
start, end = self._get_bin(packet)
self._logs[key] = PacketLog.from_packet(packet, end, self._param_dict.get(Parameter.REFDES))
except GapException:
# non-contiguous data detected, close this log and open a new one
self._filled_logs.append(self._logs[key])
del self._logs[key]
# create the new log
self._logs[key] = PacketLog.from_packet(packet, end, self._param_dict.get(Parameter.REFDES))
self._logs[key].add_packet(packet)
########################################################################
# UNKNOWN handlers.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
"""
Enter unknown state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_unknown_exit(self, *args, **kwargs):
"""
Exit unknown state.
"""
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state; always COMMAND.
@return protocol_state, protocol_state
"""
next_state = ProtocolState.COMMAND
result = []
return next_state, (next_state, result)
########################################################################
# COMMAND handlers.
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
@throws InstrumentTimeoutException if the device cannot be woken.
@throws InstrumentProtocolException if the update commands and not recognized.
"""
self._init_params()
# We can't build the persistent dict until parameters are applied, so build it here
if self._persistent_store is None:
self._build_persistent_dict()
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_exit(self, *args, **kwargs):
"""
Exit command state.
"""
def _handler_command_start_autosample(self, *args, **kwargs):
"""
Switch into autosample mode.
@return next_state, (next_state, result) if successful.
"""
result = []
# Ensure the current logs are clear to prevent residual data from being flushed.
self._logs = {}
self._filled_logs = []
self._orbstart()
next_state = ProtocolState.AUTOSAMPLE
next_agent_state = ResourceAgentState.STREAMING
return next_state, (next_state, result)
######################################################
# AUTOSAMPLE handlers
######################################################
def _handler_autosample_enter(self, *args, **kwargs):
"""
Enter autosample state.
"""
self.start_scheduled_job(Parameter.FLUSH_INTERVAL, ScheduledJob.FLUSH, ProtocolEvent.FLUSH)
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_autosample_exit(self, *args, **kwargs):
"""
Exit autosample state.
"""
self._orbstop()
def _handler_autosample_stop_autosample(self, *args, **kwargs):
"""
Stop autosample and switch back to command mode.
@return next_state, (next_state, result) if successful.
"""
self._orbstop()
result = []
next_state = ProtocolState.STOPPING
next_agent_state = None
return next_state, (next_state, result)
######################################################
# STOPPING handlers
######################################################
def _handler_stopping_enter(self, *args, **kwargs):
"""
Enter stopping state.
"""
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_stopping_exit(self, *args, **kwargs):
"""
Exit stopping state.
Stop the scheduled flush job and schedule flush one more time and
indicate that it is the last flush before stopping auto sampling.
"""
pass
######################################################
# WRITE_ERROR handlers
######################################################
def _handler_write_error_enter(self, *args, **kwargs):
"""
Enter write error state.
"""
self.stop_scheduled_job(ScheduledJob.FLUSH)
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_write_error_exit(self, *args, **kwargs):
"""
Exit write error state.
"""
pass
def _handler_clear_write_error(self, *args, **kwargs):
"""
Clear the WRITE_ERROR state by transitioning to the COMMAND state.
@return next_state, (next_state, result)
"""
next_state = ProtocolState.COMMAND
result = []
return next_state, (next_state, result)
|
|
# Copyright (c) 2016 Yingxin Cheng
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from workflow_parser.driver import init
from workflow_parser.driver import register_driver
sr, graph, rv = init("CephRbdobjectreq")
#### services ####
sr.f_register("ceph", "client")
client = sr.ceph.client
client.color = "#54c0e8"
#### functions ####
# function aio-operate send
e1 , n1 , f_write = graph.build_func(
1, "oreq_aiooperate_entry", "write")
e , _ = n1.build_endf("oreq_aiooperate_exit")
# function copyup send
e2 , n1 , f_copyup = graph.build_func(
1, "oreq_copyupsend_entry", "copyup")
e , _ = n1.build_endf("oreq_copyupsend_exit")
# function finish
e , n1 , f_finish = graph.build_func(
1, "oreq_finish_entry", "finish")
e , _ = n1.build_endf("oreq_finish_exit")
end1 = n1
# function postomap
e , n1 , f_postom = graph.build_func(
1, "oreq_postomap_entry", "postom")
## finish
e , n2 = n1.build( 2, f_finish)
e3 , n3 = n2.build( 3, "oreq_postomap_exit")
## postomap send
e4 , n4 = n1.build( 4, "oreq_postomap_send")
### success
_ , _ = n4.build( n3, e3)
### skip
e , n5 = n4.build( 5, "oreq_postomap_skip")
e , n6 = n5.build( 6, f_finish)
_ , _ = n6.build( n3, e3)
#### request objectreq ####
# receive objectreq
e11, n1 = graph.build_thread(client,
1, "oreq_send_entry", "objectreq")
## fail async
e5 , n2 = n1.build( 2, "oreq_send_fail")
e6 , n3 = n2.build( 3, "oreq_send_exit")
## write
e , n4 = n1.build( 4, f_write)
_ , _ = n4.build( n3, e6)
## copyup
e , n5 = n1.build( 5, f_copyup)
_ , _ = n5.build( n3, e6)
## send preomap
e7 , n6 = n1.build( 6, "oreq_preomap_send")
### success
_ , _ = n6.build( n3, e6)
### skip
_ , n7 = n6.build( 7, "oreq_preomap_skip")
_ , n8 = n7.build( 8, f_write)
_ , _ = n8.build( n3, e6)
# handle preomap
e8 , n10 = graph.build_thread(client,
10, "oreq_handlepreomap_entry")
_ , n11 = n10.build( 11, f_write)
e , n12 = n11.build( 12, "oreq_handlepreomap_exit")
# handle write
e9 , n15 = graph.build_thread(client,
15, "oreq_handlewrite_entry")
## finish: fail
e , n16 = n15.build( 16, f_finish)
e10, n17 = n16.build( 17, "oreq_handlewrite_exit")
## copyup
e , n18 = n15.build( 18, f_copyup)
_ , _ = n18.build(n17, e10)
## postomap
e , n19 = n15.build( 19, f_postom)
_ , _ = n19.build(n17, e10)
# handle copyup
e15, n25 = graph.build_thread(client,
25, "oreq_handlecopyup_entry")
## finish
e , n26 = n25.build( 26, f_finish)
e16, n27 = n26.build( 27, "oreq_handlecopyup_exit")
## write
e , n28 = n25.build( 28, f_write)
_ , _ = n28.build(n27, e16)
## postomap
e , n29 = n25.build( 29, f_postom)
_ , _ = n29.build(n27, e16)
# handle postomap
e20, n35 = graph.build_thread(client,
35, "oreq_handlepostomap_entry")
_ , n36 = n35.build( 36, f_finish)
e , n37 = n36.build( 37, "oreq_handlepostomap_exit")
# finish thread
e25, n40 = graph.build_thread(client,
40, "oreq_finish_entry")
_ , n41 = n40.build( 41, "oreq_finish_exit")
#### request imagereq states ####
n40.set_state("SUCCESS")
end1.set_state("FAIL_ASYNC")
e11.refresh_var("oreq")
#### relationship ####
# preomap
j1 = e7.join_one( e8, False, ["oreq"])
# write
j2 = e1.join_one( e9, False, ["oreq"])
# copyup
j3 = e2.join_one(e15, False, ["oreq"])
# postomap
j4 = e4.join_one(e20, False, ["oreq"])
# finish async
j5 = e5.join_one(e25, False, ["oreq"])
def filter_logfile(f_dir, f_name, var_dict):
if f_name.startswith("out"):
return False
else:
var_dict[rv.HOST] = f_name.rsplit(".", 1)[0]
return True
def filter_logline(line, var_dict):
if " rbdobjectreq:" not in line:
return False
# time, seconds
lines = line.split(" ", 1)
line = lines[1]
time = lines[0][1:-1]
var_dict[rv.TIME] = time
_time_s = time.split(":")
seconds = int(_time_s[0]) * 3600 + int(_time_s[1]) * 60 + float(_time_s[2])
var_dict[rv.SECONDS] = seconds
# component, target
lines = line.split(" ", 2)
line = lines[2]
_comp = lines[1].split(":")
comp = _comp[1]
if comp == "python":
comp = client
elif comp == "fio":
comp = client
elif comp == "qemu-system-x86":
comp = client
else:
raise RuntimeError("Unknown component: %s" % comp)
var_dict[rv.COMPONENT] = comp
target_alias = _comp[1] + ":" + _comp[2]
var_dict[rv.TARGET] = target_alias
# keyword
lines = line.split(" ", 1)
line = lines[1]
var_dict[rv.KEYWORD] = lines[0].split(":", 1)[1][:-1]
def _convert(dict_str):
try:
ret = {}
dict_str = dict_str.strip()
if dict_str:
items = dict_str.split(", ")
for item in items:
k, v = item.strip().split(" = ", 1)
k = k.strip()
ret[k] = eval(v.strip())
except Exception:
raise RuntimeError("Cannot evaluate %s" % dict_str)
return ret
# thread
lines = line.split(" }, { ")
# dict_ = _convert(lines[0].strip()[1:])
# var_dict[rv.THREAD] = str(dict_["cpu_id"])
dict1_ = _convert(lines[1])
var_dict.update(dict1_)
dict2_ = _convert(lines[2].strip()[:-1])
var_dict.update(dict2_)
var_dict[rv.THREAD] = str(var_dict["pthread_id"])
return True
register_driver(
__name__, sr, graph,
filter_logfile, filter_logline,
["ctraces"])
|
|
import collections.abc
from datetime import datetime
from math import ceil
from operator import attrgetter
from django.core.exceptions import FieldError
from django.db import connection, models
from django.db.models import Exists, Max, OuterRef
from django.db.models.functions import Substr
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import isolate_apps
from .models import (
Article, Author, Freebie, Game, IsNullWithNoneAsRHS, Player, Season, Tag,
)
class LookupTests(TestCase):
@classmethod
def setUpTestData(cls):
# Create a few Authors.
cls.au1 = Author.objects.create(name='Author 1', alias='a1')
cls.au2 = Author.objects.create(name='Author 2', alias='a2')
# Create a few Articles.
cls.a1 = Article.objects.create(
headline='Article 1',
pub_date=datetime(2005, 7, 26),
author=cls.au1,
slug='a1',
)
cls.a2 = Article.objects.create(
headline='Article 2',
pub_date=datetime(2005, 7, 27),
author=cls.au1,
slug='a2',
)
cls.a3 = Article.objects.create(
headline='Article 3',
pub_date=datetime(2005, 7, 27),
author=cls.au1,
slug='a3',
)
cls.a4 = Article.objects.create(
headline='Article 4',
pub_date=datetime(2005, 7, 28),
author=cls.au1,
slug='a4',
)
cls.a5 = Article.objects.create(
headline='Article 5',
pub_date=datetime(2005, 8, 1, 9, 0),
author=cls.au2,
slug='a5',
)
cls.a6 = Article.objects.create(
headline='Article 6',
pub_date=datetime(2005, 8, 1, 8, 0),
author=cls.au2,
slug='a6',
)
cls.a7 = Article.objects.create(
headline='Article 7',
pub_date=datetime(2005, 7, 27),
author=cls.au2,
slug='a7',
)
# Create a few Tags.
cls.t1 = Tag.objects.create(name='Tag 1')
cls.t1.articles.add(cls.a1, cls.a2, cls.a3)
cls.t2 = Tag.objects.create(name='Tag 2')
cls.t2.articles.add(cls.a3, cls.a4, cls.a5)
cls.t3 = Tag.objects.create(name='Tag 3')
cls.t3.articles.add(cls.a5, cls.a6, cls.a7)
def test_exists(self):
# We can use .exists() to check that there are some
self.assertTrue(Article.objects.exists())
for a in Article.objects.all():
a.delete()
# There should be none now!
self.assertFalse(Article.objects.exists())
def test_lookup_int_as_str(self):
# Integer value can be queried using string
self.assertSequenceEqual(
Article.objects.filter(id__iexact=str(self.a1.id)),
[self.a1],
)
@skipUnlessDBFeature('supports_date_lookup_using_string')
def test_lookup_date_as_str(self):
# A date lookup can be performed using a string search
self.assertSequenceEqual(
Article.objects.filter(pub_date__startswith='2005'),
[self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
def test_iterator(self):
# Each QuerySet gets iterator(), which is a generator that "lazily"
# returns results using database-level iteration.
self.assertIsInstance(Article.objects.iterator(), collections.abc.Iterator)
self.assertQuerysetEqual(
Article.objects.iterator(),
[
'Article 5',
'Article 6',
'Article 4',
'Article 2',
'Article 3',
'Article 7',
'Article 1',
],
transform=attrgetter('headline')
)
# iterator() can be used on any QuerySet.
self.assertQuerysetEqual(
Article.objects.filter(headline__endswith='4').iterator(),
['Article 4'],
transform=attrgetter('headline'))
def test_count(self):
# count() returns the number of objects matching search criteria.
self.assertEqual(Article.objects.count(), 7)
self.assertEqual(Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).count(), 3)
self.assertEqual(Article.objects.filter(headline__startswith='Blah blah').count(), 0)
# count() should respect sliced query sets.
articles = Article.objects.all()
self.assertEqual(articles.count(), 7)
self.assertEqual(articles[:4].count(), 4)
self.assertEqual(articles[1:100].count(), 6)
self.assertEqual(articles[10:100].count(), 0)
# Date and date/time lookups can also be done with strings.
self.assertEqual(Article.objects.filter(pub_date__exact='2005-07-27 00:00:00').count(), 3)
def test_in_bulk(self):
# in_bulk() takes a list of IDs and returns a dictionary mapping IDs to objects.
arts = Article.objects.in_bulk([self.a1.id, self.a2.id])
self.assertEqual(arts[self.a1.id], self.a1)
self.assertEqual(arts[self.a2.id], self.a2)
self.assertEqual(
Article.objects.in_bulk(),
{
self.a1.id: self.a1,
self.a2.id: self.a2,
self.a3.id: self.a3,
self.a4.id: self.a4,
self.a5.id: self.a5,
self.a6.id: self.a6,
self.a7.id: self.a7,
}
)
self.assertEqual(Article.objects.in_bulk([self.a3.id]), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk({self.a3.id}), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk(frozenset([self.a3.id])), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk((self.a3.id,)), {self.a3.id: self.a3})
self.assertEqual(Article.objects.in_bulk([1000]), {})
self.assertEqual(Article.objects.in_bulk([]), {})
self.assertEqual(Article.objects.in_bulk(iter([self.a1.id])), {self.a1.id: self.a1})
self.assertEqual(Article.objects.in_bulk(iter([])), {})
with self.assertRaises(TypeError):
Article.objects.in_bulk(headline__startswith='Blah')
def test_in_bulk_lots_of_ids(self):
test_range = 2000
max_query_params = connection.features.max_query_params
expected_num_queries = ceil(test_range / max_query_params) if max_query_params else 1
Author.objects.bulk_create([Author() for i in range(test_range - Author.objects.count())])
authors = {author.pk: author for author in Author.objects.all()}
with self.assertNumQueries(expected_num_queries):
self.assertEqual(Author.objects.in_bulk(authors), authors)
def test_in_bulk_with_field(self):
self.assertEqual(
Article.objects.in_bulk([self.a1.slug, self.a2.slug, self.a3.slug], field_name='slug'),
{
self.a1.slug: self.a1,
self.a2.slug: self.a2,
self.a3.slug: self.a3,
}
)
def test_in_bulk_meta_constraint(self):
season_2011 = Season.objects.create(year=2011)
season_2012 = Season.objects.create(year=2012)
Season.objects.create(year=2013)
self.assertEqual(
Season.objects.in_bulk(
[season_2011.year, season_2012.year],
field_name='year',
),
{season_2011.year: season_2011, season_2012.year: season_2012},
)
def test_in_bulk_non_unique_field(self):
msg = "in_bulk()'s field_name must be a unique field but 'author' isn't."
with self.assertRaisesMessage(ValueError, msg):
Article.objects.in_bulk([self.au1], field_name='author')
@skipUnlessDBFeature('can_distinct_on_fields')
def test_in_bulk_distinct_field(self):
self.assertEqual(
Article.objects.order_by('headline').distinct('headline').in_bulk(
[self.a1.headline, self.a5.headline],
field_name='headline',
),
{self.a1.headline: self.a1, self.a5.headline: self.a5},
)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_in_bulk_multiple_distinct_field(self):
msg = "in_bulk()'s field_name must be a unique field but 'pub_date' isn't."
with self.assertRaisesMessage(ValueError, msg):
Article.objects.order_by('headline', 'pub_date').distinct(
'headline', 'pub_date',
).in_bulk(field_name='pub_date')
@isolate_apps('lookup')
def test_in_bulk_non_unique_meta_constaint(self):
class Model(models.Model):
ean = models.CharField(max_length=100)
brand = models.CharField(max_length=100)
name = models.CharField(max_length=80)
class Meta:
constraints = [
models.UniqueConstraint(
fields=['ean'],
name='partial_ean_unique',
condition=models.Q(is_active=True)
),
models.UniqueConstraint(
fields=['brand', 'name'],
name='together_brand_name_unique',
),
]
msg = "in_bulk()'s field_name must be a unique field but '%s' isn't."
for field_name in ['brand', 'ean']:
with self.subTest(field_name=field_name):
with self.assertRaisesMessage(ValueError, msg % field_name):
Model.objects.in_bulk(field_name=field_name)
def test_values(self):
# values() returns a list of dictionaries instead of object instances --
# and you can specify which fields you want to retrieve.
self.assertSequenceEqual(
Article.objects.values('headline'),
[
{'headline': 'Article 5'},
{'headline': 'Article 6'},
{'headline': 'Article 4'},
{'headline': 'Article 2'},
{'headline': 'Article 3'},
{'headline': 'Article 7'},
{'headline': 'Article 1'},
],
)
self.assertSequenceEqual(
Article.objects.filter(pub_date__exact=datetime(2005, 7, 27)).values('id'),
[{'id': self.a2.id}, {'id': self.a3.id}, {'id': self.a7.id}],
)
self.assertSequenceEqual(
Article.objects.values('id', 'headline'),
[
{'id': self.a5.id, 'headline': 'Article 5'},
{'id': self.a6.id, 'headline': 'Article 6'},
{'id': self.a4.id, 'headline': 'Article 4'},
{'id': self.a2.id, 'headline': 'Article 2'},
{'id': self.a3.id, 'headline': 'Article 3'},
{'id': self.a7.id, 'headline': 'Article 7'},
{'id': self.a1.id, 'headline': 'Article 1'},
],
)
# You can use values() with iterator() for memory savings,
# because iterator() uses database-level iteration.
self.assertSequenceEqual(
list(Article.objects.values('id', 'headline').iterator()),
[
{'headline': 'Article 5', 'id': self.a5.id},
{'headline': 'Article 6', 'id': self.a6.id},
{'headline': 'Article 4', 'id': self.a4.id},
{'headline': 'Article 2', 'id': self.a2.id},
{'headline': 'Article 3', 'id': self.a3.id},
{'headline': 'Article 7', 'id': self.a7.id},
{'headline': 'Article 1', 'id': self.a1.id},
],
)
# The values() method works with "extra" fields specified in extra(select).
self.assertSequenceEqual(
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_one'),
[
{'id': self.a5.id, 'id_plus_one': self.a5.id + 1},
{'id': self.a6.id, 'id_plus_one': self.a6.id + 1},
{'id': self.a4.id, 'id_plus_one': self.a4.id + 1},
{'id': self.a2.id, 'id_plus_one': self.a2.id + 1},
{'id': self.a3.id, 'id_plus_one': self.a3.id + 1},
{'id': self.a7.id, 'id_plus_one': self.a7.id + 1},
{'id': self.a1.id, 'id_plus_one': self.a1.id + 1},
],
)
data = {
'id_plus_one': 'id+1',
'id_plus_two': 'id+2',
'id_plus_three': 'id+3',
'id_plus_four': 'id+4',
'id_plus_five': 'id+5',
'id_plus_six': 'id+6',
'id_plus_seven': 'id+7',
'id_plus_eight': 'id+8',
}
self.assertSequenceEqual(
Article.objects.filter(id=self.a1.id).extra(select=data).values(*data),
[{
'id_plus_one': self.a1.id + 1,
'id_plus_two': self.a1.id + 2,
'id_plus_three': self.a1.id + 3,
'id_plus_four': self.a1.id + 4,
'id_plus_five': self.a1.id + 5,
'id_plus_six': self.a1.id + 6,
'id_plus_seven': self.a1.id + 7,
'id_plus_eight': self.a1.id + 8,
}],
)
# You can specify fields from forward and reverse relations, just like filter().
self.assertSequenceEqual(
Article.objects.values('headline', 'author__name'),
[
{'headline': self.a5.headline, 'author__name': self.au2.name},
{'headline': self.a6.headline, 'author__name': self.au2.name},
{'headline': self.a4.headline, 'author__name': self.au1.name},
{'headline': self.a2.headline, 'author__name': self.au1.name},
{'headline': self.a3.headline, 'author__name': self.au1.name},
{'headline': self.a7.headline, 'author__name': self.au2.name},
{'headline': self.a1.headline, 'author__name': self.au1.name},
],
)
self.assertSequenceEqual(
Author.objects.values('name', 'article__headline').order_by('name', 'article__headline'),
[
{'name': self.au1.name, 'article__headline': self.a1.headline},
{'name': self.au1.name, 'article__headline': self.a2.headline},
{'name': self.au1.name, 'article__headline': self.a3.headline},
{'name': self.au1.name, 'article__headline': self.a4.headline},
{'name': self.au2.name, 'article__headline': self.a5.headline},
{'name': self.au2.name, 'article__headline': self.a6.headline},
{'name': self.au2.name, 'article__headline': self.a7.headline},
],
)
self.assertSequenceEqual(
(
Author.objects
.values('name', 'article__headline', 'article__tag__name')
.order_by('name', 'article__headline', 'article__tag__name')
),
[
{'name': self.au1.name, 'article__headline': self.a1.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a2.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t1.name},
{'name': self.au1.name, 'article__headline': self.a3.headline, 'article__tag__name': self.t2.name},
{'name': self.au1.name, 'article__headline': self.a4.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t2.name},
{'name': self.au2.name, 'article__headline': self.a5.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a6.headline, 'article__tag__name': self.t3.name},
{'name': self.au2.name, 'article__headline': self.a7.headline, 'article__tag__name': self.t3.name},
],
)
# However, an exception FieldDoesNotExist will be thrown if you specify
# a nonexistent field name in values() (a field that is neither in the
# model nor in extra(select)).
msg = (
"Cannot resolve keyword 'id_plus_two' into field. Choices are: "
"author, author_id, headline, id, id_plus_one, pub_date, slug, tag"
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_two')
# If you don't specify field names to values(), all are returned.
self.assertSequenceEqual(
Article.objects.filter(id=self.a5.id).values(),
[{
'id': self.a5.id,
'author_id': self.au2.id,
'headline': 'Article 5',
'pub_date': datetime(2005, 8, 1, 9, 0),
'slug': 'a5',
}],
)
def test_values_list(self):
# values_list() is similar to values(), except that the results are
# returned as a list of tuples, rather than a list of dictionaries.
# Within each tuple, the order of the elements is the same as the order
# of fields in the values_list() call.
self.assertSequenceEqual(
Article.objects.values_list('headline'),
[
('Article 5',),
('Article 6',),
('Article 4',),
('Article 2',),
('Article 3',),
('Article 7',),
('Article 1',),
],
)
self.assertSequenceEqual(
Article.objects.values_list('id').order_by('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
)
self.assertSequenceEqual(
Article.objects.values_list('id', flat=True).order_by('id'),
[self.a1.id, self.a2.id, self.a3.id, self.a4.id, self.a5.id, self.a6.id, self.a7.id],
)
self.assertSequenceEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id'),
[(self.a1.id,), (self.a2.id,), (self.a3.id,), (self.a4.id,), (self.a5.id,), (self.a6.id,), (self.a7.id,)],
)
self.assertSequenceEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id_plus_one', 'id'),
[
(self.a1.id + 1, self.a1.id),
(self.a2.id + 1, self.a2.id),
(self.a3.id + 1, self.a3.id),
(self.a4.id + 1, self.a4.id),
(self.a5.id + 1, self.a5.id),
(self.a6.id + 1, self.a6.id),
(self.a7.id + 1, self.a7.id)
],
)
self.assertSequenceEqual(
Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id', 'id_plus_one'),
[
(self.a1.id, self.a1.id + 1),
(self.a2.id, self.a2.id + 1),
(self.a3.id, self.a3.id + 1),
(self.a4.id, self.a4.id + 1),
(self.a5.id, self.a5.id + 1),
(self.a6.id, self.a6.id + 1),
(self.a7.id, self.a7.id + 1)
],
)
args = ('name', 'article__headline', 'article__tag__name')
self.assertSequenceEqual(
Author.objects.values_list(*args).order_by(*args),
[
(self.au1.name, self.a1.headline, self.t1.name),
(self.au1.name, self.a2.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t1.name),
(self.au1.name, self.a3.headline, self.t2.name),
(self.au1.name, self.a4.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t2.name),
(self.au2.name, self.a5.headline, self.t3.name),
(self.au2.name, self.a6.headline, self.t3.name),
(self.au2.name, self.a7.headline, self.t3.name),
],
)
with self.assertRaises(TypeError):
Article.objects.values_list('id', 'headline', flat=True)
def test_get_next_previous_by(self):
# Every DateField and DateTimeField creates get_next_by_FOO() and
# get_previous_by_FOO() methods. In the case of identical date values,
# these methods will use the ID as a fallback check. This guarantees
# that no records are skipped or duplicated.
self.assertEqual(repr(self.a1.get_next_by_pub_date()), '<Article: Article 2>')
self.assertEqual(repr(self.a2.get_next_by_pub_date()), '<Article: Article 3>')
self.assertEqual(repr(self.a2.get_next_by_pub_date(headline__endswith='6')), '<Article: Article 6>')
self.assertEqual(repr(self.a3.get_next_by_pub_date()), '<Article: Article 7>')
self.assertEqual(repr(self.a4.get_next_by_pub_date()), '<Article: Article 6>')
with self.assertRaises(Article.DoesNotExist):
self.a5.get_next_by_pub_date()
self.assertEqual(repr(self.a6.get_next_by_pub_date()), '<Article: Article 5>')
self.assertEqual(repr(self.a7.get_next_by_pub_date()), '<Article: Article 4>')
self.assertEqual(repr(self.a7.get_previous_by_pub_date()), '<Article: Article 3>')
self.assertEqual(repr(self.a6.get_previous_by_pub_date()), '<Article: Article 4>')
self.assertEqual(repr(self.a5.get_previous_by_pub_date()), '<Article: Article 6>')
self.assertEqual(repr(self.a4.get_previous_by_pub_date()), '<Article: Article 7>')
self.assertEqual(repr(self.a3.get_previous_by_pub_date()), '<Article: Article 2>')
self.assertEqual(repr(self.a2.get_previous_by_pub_date()), '<Article: Article 1>')
def test_escaping(self):
# Underscores, percent signs and backslashes have special meaning in the
# underlying SQL code, but Django handles the quoting of them automatically.
a8 = Article.objects.create(headline='Article_ with underscore', pub_date=datetime(2005, 11, 20))
self.assertSequenceEqual(
Article.objects.filter(headline__startswith='Article'),
[a8, self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
self.assertSequenceEqual(
Article.objects.filter(headline__startswith='Article_'),
[a8],
)
a9 = Article.objects.create(headline='Article% with percent sign', pub_date=datetime(2005, 11, 21))
self.assertSequenceEqual(
Article.objects.filter(headline__startswith='Article'),
[a9, a8, self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
self.assertSequenceEqual(
Article.objects.filter(headline__startswith='Article%'),
[a9],
)
a10 = Article.objects.create(headline='Article with \\ backslash', pub_date=datetime(2005, 11, 22))
self.assertSequenceEqual(
Article.objects.filter(headline__contains='\\'),
[a10],
)
def test_exclude(self):
pub_date = datetime(2005, 11, 20)
a8 = Article.objects.create(headline='Article_ with underscore', pub_date=pub_date)
a9 = Article.objects.create(headline='Article% with percent sign', pub_date=pub_date)
a10 = Article.objects.create(headline='Article with \\ backslash', pub_date=pub_date)
# exclude() is the opposite of filter() when doing lookups:
self.assertSequenceEqual(
Article.objects.filter(headline__contains='Article').exclude(headline__contains='with'),
[self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
self.assertSequenceEqual(
Article.objects.exclude(headline__startswith="Article_"),
[a10, a9, self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
self.assertSequenceEqual(
Article.objects.exclude(headline="Article 7"),
[a10, a9, a8, self.a5, self.a6, self.a4, self.a2, self.a3, self.a1],
)
def test_none(self):
# none() returns a QuerySet that behaves like any other QuerySet object
self.assertQuerysetEqual(Article.objects.none(), [])
self.assertQuerysetEqual(Article.objects.none().filter(headline__startswith='Article'), [])
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='Article').none(), [])
self.assertEqual(Article.objects.none().count(), 0)
self.assertEqual(Article.objects.none().update(headline="This should not take effect"), 0)
self.assertQuerysetEqual(Article.objects.none().iterator(), [])
def test_in(self):
self.assertSequenceEqual(
Article.objects.exclude(id__in=[]),
[self.a5, self.a6, self.a4, self.a2, self.a3, self.a7, self.a1],
)
def test_in_empty_list(self):
self.assertSequenceEqual(Article.objects.filter(id__in=[]), [])
def test_in_different_database(self):
with self.assertRaisesMessage(
ValueError,
"Subqueries aren't allowed across different databases. Force the "
"inner query to be evaluated using `list(inner_query)`."
):
list(Article.objects.filter(id__in=Article.objects.using('other').all()))
def test_in_keeps_value_ordering(self):
query = Article.objects.filter(slug__in=['a%d' % i for i in range(1, 8)]).values('pk').query
self.assertIn(' IN (a1, a2, a3, a4, a5, a6, a7) ', str(query))
def test_in_ignore_none(self):
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(
Article.objects.filter(id__in=[None, self.a1.id]),
[self.a1],
)
sql = ctx.captured_queries[0]['sql']
self.assertIn('IN (%s)' % self.a1.pk, sql)
def test_in_ignore_solo_none(self):
with self.assertNumQueries(0):
self.assertSequenceEqual(Article.objects.filter(id__in=[None]), [])
def test_in_ignore_none_with_unhashable_items(self):
class UnhashableInt(int):
__hash__ = None
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(
Article.objects.filter(id__in=[None, UnhashableInt(self.a1.id)]),
[self.a1],
)
sql = ctx.captured_queries[0]['sql']
self.assertIn('IN (%s)' % self.a1.pk, sql)
def test_error_messages(self):
# Programming errors are pointed out with nice error messages
with self.assertRaisesMessage(
FieldError,
"Cannot resolve keyword 'pub_date_year' into field. Choices are: "
"author, author_id, headline, id, pub_date, slug, tag"
):
Article.objects.filter(pub_date_year='2005').count()
def test_unsupported_lookups(self):
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'starts' for CharField or join on the field "
"not permitted, perhaps you meant startswith or istartswith?"
):
Article.objects.filter(headline__starts='Article')
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'is_null' for DateTimeField or join on the field "
"not permitted, perhaps you meant isnull?"
):
Article.objects.filter(pub_date__is_null=True)
with self.assertRaisesMessage(
FieldError,
"Unsupported lookup 'gobbledygook' for DateTimeField or join on the field "
"not permitted."
):
Article.objects.filter(pub_date__gobbledygook='blahblah')
def test_relation_nested_lookup_error(self):
# An invalid nested lookup on a related field raises a useful error.
msg = 'Related Field got invalid lookup: editor'
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(author__editor__name='James')
msg = 'Related Field got invalid lookup: foo'
with self.assertRaisesMessage(FieldError, msg):
Tag.objects.filter(articles__foo='bar')
def test_regex(self):
# Create some articles with a bit more interesting headlines for testing field lookups:
for a in Article.objects.all():
a.delete()
now = datetime.now()
Article.objects.bulk_create([
Article(pub_date=now, headline='f'),
Article(pub_date=now, headline='fo'),
Article(pub_date=now, headline='foo'),
Article(pub_date=now, headline='fooo'),
Article(pub_date=now, headline='hey-Foo'),
Article(pub_date=now, headline='bar'),
Article(pub_date=now, headline='AbBa'),
Article(pub_date=now, headline='baz'),
Article(pub_date=now, headline='baxZ'),
])
# zero-or-more
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fo*'),
Article.objects.filter(headline__in=['f', 'fo', 'foo', 'fooo']),
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'fo*'),
Article.objects.filter(headline__in=['f', 'fo', 'foo', 'fooo', 'hey-Foo']),
)
# one-or-more
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fo+'),
Article.objects.filter(headline__in=['fo', 'foo', 'fooo']),
)
# wildcard
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'fooo?'),
Article.objects.filter(headline__in=['foo', 'fooo']),
)
# leading anchor
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'^b'),
Article.objects.filter(headline__in=['bar', 'baxZ', 'baz']),
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'^a'),
Article.objects.filter(headline='AbBa'),
)
# trailing anchor
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'z$'),
Article.objects.filter(headline='baz'),
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'z$'),
Article.objects.filter(headline__in=['baxZ', 'baz']),
)
# character sets
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'ba[rz]'),
Article.objects.filter(headline__in=['bar', 'baz']),
)
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'ba.[RxZ]'),
Article.objects.filter(headline='baxZ'),
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'ba[RxZ]'),
Article.objects.filter(headline__in=['bar', 'baxZ', 'baz']),
)
# and more articles:
Article.objects.bulk_create([
Article(pub_date=now, headline='foobar'),
Article(pub_date=now, headline='foobaz'),
Article(pub_date=now, headline='ooF'),
Article(pub_date=now, headline='foobarbaz'),
Article(pub_date=now, headline='zoocarfaz'),
Article(pub_date=now, headline='barfoobaz'),
Article(pub_date=now, headline='bazbaRFOO'),
])
# alternation
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'oo(f|b)'),
Article.objects.filter(headline__in=[
'barfoobaz',
'foobar',
'foobarbaz',
'foobaz',
]),
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'oo(f|b)'),
Article.objects.filter(headline__in=[
'barfoobaz',
'foobar',
'foobarbaz',
'foobaz',
'ooF',
]),
)
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'^foo(f|b)'),
Article.objects.filter(headline__in=['foobar', 'foobarbaz', 'foobaz']),
)
# greedy matching
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'b.*az'),
Article.objects.filter(headline__in=[
'barfoobaz',
'baz',
'bazbaRFOO',
'foobarbaz',
'foobaz',
]),
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iregex=r'b.*ar'),
Article.objects.filter(headline__in=[
'bar',
'barfoobaz',
'bazbaRFOO',
'foobar',
'foobarbaz',
]),
)
@skipUnlessDBFeature('supports_regex_backreferencing')
def test_regex_backreferencing(self):
# grouping and backreferences
now = datetime.now()
Article.objects.bulk_create([
Article(pub_date=now, headline='foobar'),
Article(pub_date=now, headline='foobaz'),
Article(pub_date=now, headline='ooF'),
Article(pub_date=now, headline='foobarbaz'),
Article(pub_date=now, headline='zoocarfaz'),
Article(pub_date=now, headline='barfoobaz'),
Article(pub_date=now, headline='bazbaRFOO'),
])
self.assertQuerysetEqual(
Article.objects.filter(headline__regex=r'b(.).*b\1').values_list('headline', flat=True),
['barfoobaz', 'bazbaRFOO', 'foobarbaz'],
)
def test_regex_null(self):
"""
A regex lookup does not fail on null/None values
"""
Season.objects.create(year=2012, gt=None)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^$'), [])
def test_regex_non_string(self):
"""
A regex lookup does not fail on non-string fields
"""
s = Season.objects.create(year=2013, gt=444)
self.assertQuerysetEqual(Season.objects.filter(gt__regex=r'^444$'), [s])
def test_regex_non_ascii(self):
"""
A regex lookup does not trip on non-ASCII characters.
"""
Player.objects.create(name='\u2660')
Player.objects.get(name__regex='\u2660')
def test_nonfield_lookups(self):
"""
A lookup query containing non-fields raises the proper exception.
"""
msg = "Unsupported lookup 'blahblah' for CharField or join on the field not permitted."
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(headline__blahblah=99)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(headline__blahblah__exact=99)
msg = (
"Cannot resolve keyword 'blahblah' into field. Choices are: "
"author, author_id, headline, id, pub_date, slug, tag"
)
with self.assertRaisesMessage(FieldError, msg):
Article.objects.filter(blahblah=99)
def test_lookup_collision(self):
"""
Genuine field names don't collide with built-in lookup types
('year', 'gt', 'range', 'in' etc.) (#11670).
"""
# 'gt' is used as a code number for the year, e.g. 111=>2009.
season_2009 = Season.objects.create(year=2009, gt=111)
season_2009.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2010 = Season.objects.create(year=2010, gt=222)
season_2010.games.create(home="Houston Astros", away="Chicago Cubs")
season_2010.games.create(home="Houston Astros", away="Milwaukee Brewers")
season_2010.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011 = Season.objects.create(year=2011, gt=333)
season_2011.games.create(home="Houston Astros", away="St. Louis Cardinals")
season_2011.games.create(home="Houston Astros", away="Milwaukee Brewers")
hunter_pence = Player.objects.create(name="Hunter Pence")
hunter_pence.games.set(Game.objects.filter(season__year__in=[2009, 2010]))
pudge = Player.objects.create(name="Ivan Rodriquez")
pudge.games.set(Game.objects.filter(season__year=2009))
pedro_feliz = Player.objects.create(name="Pedro Feliz")
pedro_feliz.games.set(Game.objects.filter(season__year__in=[2011]))
johnson = Player.objects.create(name="Johnson")
johnson.games.set(Game.objects.filter(season__year__in=[2011]))
# Games in 2010
self.assertEqual(Game.objects.filter(season__year=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__gt=222).count(), 3)
self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3)
# Games in 2011
self.assertEqual(Game.objects.filter(season__year=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__gt=333).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2)
self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2)
# Games played in 2010 and 2011
self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5)
self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5)
# Players who played in 2009
self.assertEqual(Player.objects.filter(games__season__year=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=111).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2)
# Players who played in 2010
self.assertEqual(Player.objects.filter(games__season__year=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt=222).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1)
# Players who played in 2011
self.assertEqual(Player.objects.filter(games__season__year=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=333).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2)
def test_chain_date_time_lookups(self):
self.assertCountEqual(
Article.objects.filter(pub_date__month__gt=7),
[self.a5, self.a6],
)
self.assertCountEqual(
Article.objects.filter(pub_date__day__gte=27),
[self.a2, self.a3, self.a4, self.a7],
)
self.assertCountEqual(
Article.objects.filter(pub_date__hour__lt=8),
[self.a1, self.a2, self.a3, self.a4, self.a7],
)
self.assertCountEqual(
Article.objects.filter(pub_date__minute__lte=0),
[self.a1, self.a2, self.a3, self.a4, self.a5, self.a6, self.a7],
)
def test_exact_none_transform(self):
"""Transforms are used for __exact=None."""
Season.objects.create(year=1, nulled_text_field='not null')
self.assertFalse(Season.objects.filter(nulled_text_field__isnull=True))
self.assertTrue(Season.objects.filter(nulled_text_field__nulled__isnull=True))
self.assertTrue(Season.objects.filter(nulled_text_field__nulled__exact=None))
self.assertTrue(Season.objects.filter(nulled_text_field__nulled=None))
def test_exact_sliced_queryset_limit_one(self):
self.assertCountEqual(
Article.objects.filter(author=Author.objects.all()[:1]),
[self.a1, self.a2, self.a3, self.a4]
)
def test_exact_sliced_queryset_limit_one_offset(self):
self.assertCountEqual(
Article.objects.filter(author=Author.objects.all()[1:2]),
[self.a5, self.a6, self.a7]
)
def test_exact_sliced_queryset_not_limited_to_one(self):
msg = (
'The QuerySet value for an exact lookup must be limited to one '
'result using slicing.'
)
with self.assertRaisesMessage(ValueError, msg):
list(Article.objects.filter(author=Author.objects.all()[:2]))
with self.assertRaisesMessage(ValueError, msg):
list(Article.objects.filter(author=Author.objects.all()[1:]))
def test_custom_field_none_rhs(self):
"""
__exact=value is transformed to __isnull=True if Field.get_prep_value()
converts value to None.
"""
season = Season.objects.create(year=2012, nulled_text_field=None)
self.assertTrue(Season.objects.filter(pk=season.pk, nulled_text_field__isnull=True))
self.assertTrue(Season.objects.filter(pk=season.pk, nulled_text_field=''))
def test_pattern_lookups_with_substr(self):
a = Author.objects.create(name='John Smith', alias='Johx')
b = Author.objects.create(name='Rhonda Simpson', alias='sonx')
tests = (
('startswith', [a]),
('istartswith', [a]),
('contains', [a, b]),
('icontains', [a, b]),
('endswith', [b]),
('iendswith', [b]),
)
for lookup, result in tests:
with self.subTest(lookup=lookup):
authors = Author.objects.filter(**{'name__%s' % lookup: Substr('alias', 1, 3)})
self.assertCountEqual(authors, result)
def test_custom_lookup_none_rhs(self):
"""Lookup.can_use_none_as_rhs=True allows None as a lookup value."""
season = Season.objects.create(year=2012, nulled_text_field=None)
query = Season.objects.get_queryset().query
field = query.model._meta.get_field('nulled_text_field')
self.assertIsInstance(query.build_lookup(['isnull_none_rhs'], field, None), IsNullWithNoneAsRHS)
self.assertTrue(Season.objects.filter(pk=season.pk, nulled_text_field__isnull_none_rhs=True))
def test_exact_exists(self):
qs = Article.objects.filter(pk=OuterRef('pk'))
seasons = Season.objects.annotate(
pk_exists=Exists(qs),
).filter(
pk_exists=Exists(qs),
)
self.assertCountEqual(seasons, Season.objects.all())
def test_nested_outerref_lhs(self):
tag = Tag.objects.create(name=self.au1.alias)
tag.articles.add(self.a1)
qs = Tag.objects.annotate(
has_author_alias_match=Exists(
Article.objects.annotate(
author_exists=Exists(
Author.objects.filter(alias=OuterRef(OuterRef('name')))
),
).filter(author_exists=True)
),
)
self.assertEqual(qs.get(has_author_alias_match=True), tag)
def test_exact_query_rhs_with_selected_columns(self):
newest_author = Author.objects.create(name='Author 2')
authors_max_ids = Author.objects.filter(
name='Author 2',
).values(
'name',
).annotate(
max_id=Max('id'),
).values('max_id')
authors = Author.objects.filter(id=authors_max_ids[:1])
self.assertEqual(authors.get(), newest_author)
def test_isnull_non_boolean_value(self):
msg = 'The QuerySet value for an isnull lookup must be True or False.'
tests = [
Author.objects.filter(alias__isnull=1),
Article.objects.filter(author__isnull=1),
Season.objects.filter(games__isnull=1),
Freebie.objects.filter(stock__isnull=1),
]
for qs in tests:
with self.subTest(qs=qs):
with self.assertRaisesMessage(ValueError, msg):
qs.exists()
|
|
# Copyright 2015-2016 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''This module provides helper functions for Gnome/GLib related
functionality such as gobject-introspection and gresources.'''
from .. import build
import os, sys
import subprocess
from ..mesonlib import MesonException
from .. import mlog
from .. import mesonlib
girwarning_printed = False
gresource_warning_printed = False
class GnomeModule:
def __print_gresources_warning(self):
global gresource_warning_printed
if not gresource_warning_printed:
mlog.log('Warning, glib compiled dependencies will not work reliably until this upstream issue is fixed:',
mlog.bold('https://bugzilla.gnome.org/show_bug.cgi?id=745754'))
gresource_warning_printed = True
return []
def compile_resources(self, state, args, kwargs):
self.__print_gresources_warning()
cmd = ['glib-compile-resources', '@INPUT@']
source_dirs = kwargs.pop('source_dir', [])
if not isinstance(source_dirs, list):
source_dirs = [source_dirs]
ifile = args[1]
if isinstance(ifile, mesonlib.File):
ifile = os.path.join(ifile.subdir, ifile.fname)
elif isinstance(ifile, str):
ifile = os.path.join(state.subdir, ifile)
else:
raise RuntimeError('Unreachable code.')
kwargs['depend_files'] = self.get_gresource_dependencies(state, ifile, source_dirs)
for source_dir in source_dirs:
sourcedir = os.path.join(state.build_to_src, state.subdir, source_dir)
cmd += ['--sourcedir', sourcedir]
if 'c_name' in kwargs:
cmd += ['--c-name', kwargs.pop('c_name')]
cmd += ['--generate', '--target', '@OUTPUT@']
kwargs['command'] = cmd
kwargs['input'] = args[1]
kwargs['output'] = args[0] + '.c'
target_c = build.CustomTarget(args[0] + '_c', state.subdir, kwargs)
kwargs['output'] = args[0] + '.h'
target_h = build.CustomTarget(args[0] + '_h', state.subdir, kwargs)
return [target_c, target_h]
def get_gresource_dependencies(self, state, input_file, source_dirs):
self.__print_gresources_warning()
cmd = ['glib-compile-resources',
input_file,
'--generate-dependencies']
for source_dir in source_dirs:
cmd += ['--sourcedir', os.path.join(state.subdir, source_dir)]
pc = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True,
cwd=state.environment.get_source_dir())
(stdout, _) = pc.communicate()
if pc.returncode != 0:
mlog.log(mlog.bold('Warning:'), 'glib-compile-resources has failed to get the dependencies for {}'.format(cmd[1]))
raise subprocess.CalledProcessError(pc.returncode, cmd)
return stdout.split('\n')[:-1]
def generate_gir(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Gir takes one argument')
girtarget = args[0]
while hasattr(girtarget, 'held_object'):
girtarget = girtarget.held_object
if not isinstance(girtarget, (build.Executable, build.SharedLibrary)):
raise MesonException('Gir target must be an executable or shared library')
try:
pkgstr = subprocess.check_output(['pkg-config', '--cflags', 'gobject-introspection-1.0'])
except Exception:
global girwarning_printed
if not girwarning_printed:
mlog.log(mlog.bold('Warning:'), 'gobject-introspection dependency was not found, disabling gir generation.')
girwarning_printed = True
return []
pkgargs = pkgstr.decode().strip().split()
ns = kwargs.pop('namespace')
nsversion = kwargs.pop('nsversion')
libsources = kwargs.pop('sources')
girfile = '%s-%s.gir' % (ns, nsversion)
depends = [girtarget]
scan_command = ['g-ir-scanner', '@INPUT@']
scan_command += pkgargs
scan_command += ['--no-libtool', '--namespace='+ns, '--nsversion=' + nsversion, '--warn-all',
'--output', '@OUTPUT@']
extra_args = kwargs.pop('extra_args', [])
if not isinstance(extra_args, list):
extra_args = [extra_args]
scan_command += extra_args
for incdirs in girtarget.include_dirs:
for incdir in incdirs.get_incdirs():
scan_command += ['-I%s' % os.path.join(state.environment.get_source_dir(), incdir)]
if 'link_with' in kwargs:
link_with = kwargs.pop('link_with')
if not isinstance(link_with, list):
link_with = [link_with]
for link in link_with:
lib = link.held_object
scan_command += ['-l%s' % lib.name]
if isinstance(lib, build.SharedLibrary):
scan_command += ['-L%s' %
os.path.join(state.environment.get_build_dir(),
lib.subdir)]
depends.append(lib)
if 'includes' in kwargs:
includes = kwargs.pop('includes')
if isinstance(includes, str):
scan_command += ['--include=%s' % includes]
elif isinstance(includes, list):
scan_command += ['--include=%s' % inc for inc in includes]
else:
raise MesonException('Gir includes must be str or list')
if state.global_args.get('c'):
scan_command += ['--cflags-begin']
scan_command += state.global_args['c']
scan_command += ['--cflags-end']
if kwargs.get('symbol_prefix'):
sym_prefix = kwargs.pop('symbol_prefix')
if not isinstance(sym_prefix, str):
raise MesonException('Gir symbol prefix must be str')
scan_command += ['--symbol-prefix=%s' % sym_prefix]
if kwargs.get('identifier_prefix'):
identifier_prefix = kwargs.pop('identifier_prefix')
if not isinstance(identifier_prefix, str):
raise MesonException('Gir identifier prefix must be str')
scan_command += ['--identifier-prefix=%s' % identifier_prefix]
if kwargs.get('export_packages'):
pkgs = kwargs.pop('export_packages')
if isinstance(pkgs, str):
scan_command += ['--pkg-export=%s' % pkgs]
elif isinstance(pkgs, list):
scan_command += ['--pkg-export=%s' % pkg for pkg in pkgs]
else:
raise MesonException('Gir export packages must be str or list')
deps = None
if 'dependencies' in kwargs:
deps = kwargs.pop('dependencies')
if not isinstance (deps, list):
deps = [deps]
for dep in deps:
girdir = dep.held_object.get_variable ("girdir")
if girdir:
scan_command += ["--add-include-path=%s" % girdir]
for lib in dep.held_object.libs:
if os.path.isabs(lib) and dep.held_object.is_libtool:
scan_command += ["-L%s" % os.path.dirname(lib)]
libname = os.path.basename(lib)
if libname.startswith("lib"):
libname = libname[3:]
libname = libname.split(".so")[0]
lib = "-l%s" % libname
scan_command += [lib]
inc_dirs = None
if kwargs.get('include_directories'):
inc_dirs = kwargs.pop('include_directories')
if not isinstance(inc_dirs, list):
inc_dirs = [inc_dirs]
for ind in inc_dirs:
if isinstance(ind.held_object, build.IncludeDirs):
scan_command += ['--add-include-path=%s' % inc for inc in ind.held_object.get_incdirs()]
else:
raise MesonException('Gir include dirs should be include_directories()')
if isinstance(girtarget, build.Executable):
scan_command += ['--program', girtarget]
elif isinstance(girtarget, build.SharedLibrary):
scan_command += ["-L@PRIVATE_OUTDIR_ABS_%s@" % girtarget.get_id()]
libname = girtarget.get_basename()
scan_command += ['--library', libname]
scankwargs = {'output' : girfile,
'input' : libsources,
'command' : scan_command,
'depends' : depends,
}
if kwargs.get('install'):
scankwargs['install'] = kwargs['install']
scankwargs['install_dir'] = os.path.join(state.environment.get_datadir(), 'gir-1.0')
scan_target = GirTarget(girfile, state.subdir, scankwargs)
typelib_output = '%s-%s.typelib' % (ns, nsversion)
typelib_cmd = ['g-ir-compiler', scan_target, '--output', '@OUTPUT@']
if inc_dirs:
for incd in inc_dirs:
typelib_cmd += ['--includedir=%s' % inc for inc in
incd.held_object.get_incdirs()]
if deps:
for dep in deps:
girdir = dep.held_object.get_variable ("girdir")
if girdir:
typelib_cmd += ["--includedir=%s" % girdir]
kwargs['output'] = typelib_output
kwargs['command'] = typelib_cmd
# Note that this can't be libdir, because e.g. on Debian it points to
# lib/x86_64-linux-gnu but the girepo dir is always under lib.
kwargs['install_dir'] = 'lib/girepository-1.0'
typelib_target = TypelibTarget(typelib_output, state.subdir, kwargs)
return [scan_target, typelib_target]
def compile_schemas(self, state, args, kwargs):
if len(args) != 0:
raise MesonException('Compile_schemas does not take positional arguments.')
srcdir = os.path.join(state.build_to_src, state.subdir)
outdir = state.subdir
cmd = ['glib-compile-schemas', '--targetdir', outdir, srcdir]
kwargs['command'] = cmd
kwargs['input'] = []
kwargs['output'] = 'gschemas.compiled'
if state.subdir == '':
targetname = 'gsettings-compile'
else:
targetname = 'gsettings-compile-' + state.subdir
target_g = build.CustomTarget(targetname, state.subdir, kwargs)
return target_g
def gtkdoc(self, state, args, kwargs):
if len(args) != 1:
raise MesonException('Gtkdoc must have one positional argument.')
modulename = args[0]
if not isinstance(modulename, str):
raise MesonException('Gtkdoc arg must be string.')
if not 'src_dir' in kwargs:
raise MesonException('Keyword argument src_dir missing.')
main_file = kwargs.get('main_sgml', '')
if not isinstance(main_file, str):
raise MesonException('Main sgml keyword argument must be a string.')
main_xml = kwargs.get('main_xml', '')
if not isinstance(main_xml, str):
raise MesonException('Main xml keyword argument must be a string.')
if main_xml != '':
if main_file != '':
raise MesonException('You can only specify main_xml or main_sgml, not both.')
main_file = main_xml
src_dir = kwargs['src_dir']
targetname = modulename + '-doc'
command = [state.environment.get_build_command(), '--internal', 'gtkdoc']
if hasattr(src_dir, 'held_object'):
src_dir= src_dir.held_object
if not isinstance(src_dir, build.IncludeDirs):
raise MesonException('Invalid keyword argument for src_dir.')
incdirs = src_dir.get_incdirs()
if len(incdirs) != 1:
raise MesonException('Argument src_dir has more than one directory specified.')
header_dir = os.path.join(state.environment.get_source_dir(), src_dir.get_curdir(), incdirs[0])
else:
header_dir = os.path.normpath(os.path.join(state.subdir, src_dir))
args = ['--sourcedir=' + state.environment.get_source_dir(),
'--builddir=' + state.environment.get_build_dir(),
'--subdir=' + state.subdir,
'--headerdir=' + header_dir,
'--mainfile=' + main_file,
'--modulename=' + modulename]
args += self.unpack_args('--htmlargs=', 'html_args', kwargs)
args += self.unpack_args('--scanargs=', 'scan_args', kwargs)
args += self.unpack_args('--fixxrefargs=', 'fixxref_args', kwargs)
res = [build.RunTarget(targetname, command[0], command[1:] + args, state.subdir)]
if kwargs.get('install', True):
res.append(build.InstallScript(command + args))
return res
def gtkdoc_html_dir(self, state, args, kwarga):
if len(args) != 1:
raise MesonException('Must have exactly one argument.')
modulename = args[0]
if not isinstance(modulename, str):
raise MesonException('Argument must be a string')
return os.path.join('share/gtkdoc/html', modulename)
def unpack_args(self, arg, kwarg_name, kwargs):
try:
new_args = kwargs[kwarg_name]
if not isinstance(new_args, list):
new_args = [new_args]
for i in new_args:
if not isinstance(i, str):
raise MesonException('html_args values must be strings.')
except KeyError:
return[]
if len(new_args) > 0:
return [arg + '@@'.join(new_args)]
return []
def gdbus_codegen(self, state, args, kwargs):
if len(args) != 2:
raise MesonException('Gdbus_codegen takes two arguments, name and xml file.')
namebase = args[0]
xml_file = args[1]
cmd = ['gdbus-codegen']
if 'interface_prefix' in kwargs:
cmd += ['--interface-prefix', kwargs.pop('interface_prefix')]
if 'namespace' in kwargs:
cmd += ['--c-namespace', kwargs.pop('namespace')]
cmd += ['--generate-c-code', '@OUTDIR@/' + namebase, '@INPUT@']
outputs = [namebase + '.c', namebase + '.h']
custom_kwargs = {'input' : xml_file,
'output' : outputs,
'command' : cmd
}
return build.CustomTarget(namebase + '-gdbus', state.subdir, custom_kwargs)
def initialize():
return GnomeModule()
class GirTarget(build.CustomTarget):
def __init__(self, name, subdir, kwargs):
super().__init__(name, subdir, kwargs)
class TypelibTarget(build.CustomTarget):
def __init__(self, name, subdir, kwargs):
super().__init__(name, subdir, kwargs)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division
import ndef
import pytest
from ndef.record import Record
from io import BytesIO
valid_record_types = [
('urn:nfc:wkt:XYZ', 1, b'XYZ'),
('urn:nfc:wkt:xyz', 1, b'xyz'),
('application/octet-stream', 2, b'application/octet-stream'),
('http://example.com/type.dtd', 3, b'http://example.com/type.dtd'),
('urn:nfc:ext:example.com:type', 4, b'example.com:type'),
('unknown', 5, b''),
('unchanged', 6, b''),
]
wrong_record_types = [
(int(), "record type string may be str or bytes, but not int"),
('invalid', "can not convert the record type string 'invalid'"),
('text/', "can not convert the record type string 'text/'"),
('urn:', "can not convert the record type string 'urn:'"),
('urn:nfc:', "can not convert the record type string 'urn:nfc:'"),
('urn:nfc:wkt', "can not convert the record type string 'urn:nfc:wkt'"),
('http:', "can not convert the record type string 'http:'"),
('http:/', "can not convert the record type string 'http:/'"),
('http:/a.b', "can not convert the record type string 'http:/a.b'"),
('urn:nfc:wkt:'+256*'a',
"an NDEF Record TYPE can not be more than 255 octet"),
]
class TestDecodeType:
@pytest.mark.parametrize("record_type, TNF, TYPE", valid_record_types)
def test_pass(self, record_type, TNF, TYPE):
assert Record._decode_type(TNF, TYPE) == record_type
assert type(Record._decode_type(TNF, TYPE)) == str
def test_fail(self):
errstr = "ndef.record.Record NDEF Record TNF values must be 0 to 6"
with pytest.raises((TypeError, ValueError)) as excinfo:
Record._decode_type(7, b'')
assert str(excinfo.value) == errstr
class TestEncodeType:
@pytest.mark.parametrize("record_type, TNF, TYPE", valid_record_types)
def test_pass(self, record_type, TNF, TYPE):
assert Record._encode_type(record_type) == (TNF, TYPE)
assert type(Record._encode_type(record_type)[1]) == bytes
@pytest.mark.parametrize("record_type, errstr", wrong_record_types)
def test_fail(self, record_type, errstr):
with pytest.raises((TypeError, ValueError)) as excinfo:
Record._encode_type(record_type)
assert str(excinfo.value) == "ndef.record.Record " + errstr
valid_init_args = [
((), '', '', b''),
((None,), '', '', b''),
((None, None), '', '', b''),
((None, None, None), '', '', b''),
((str(), None, None), '', '', b''),
((bytes(), None, None), '', '', b''),
((bytearray(), None, None), '', '', b''),
((None, str(), None), '', '', b''),
((None, bytes(), None), '', '', b''),
((None, bytearray(), None), '', '', b''),
((None, None, str()), '', '', b''),
((None, None, bytes()), '', '', b''),
((None, None, bytearray()), '', '', b''),
(('text/plain', None, None), 'text/plain', '', b''),
(('text/plain', 'id', None), 'text/plain', 'id', b''),
(('text/plain', 'id', 'text'), 'text/plain', 'id', b'text'),
(('text/plain', None, 'text'), 'text/plain', '', b'text'),
((None, 'id', 'text'), '', 'id', b'text'),
((None, 'id', None), '', 'id', b''),
(('a/'+253*'a', None, None), 'a/'+253*'a', '', b''),
((None, 255*'a', None), '', 255*'a', b''),
]
wrong_init_args = [
((int(),), " record type string may be str or bytes, but not int"),
(('ab',), " can not convert the record type string 'ab'"),
(('', int()), ".name may be str or None, but not int"),
(('', 256*'a'), ".name can not be more than 255 octets NDEF Record ID"),
(('', '', int()), ".data may be sequence or None, but not int"),
]
class TestInitArguments:
@pytest.mark.parametrize("args, _type, _name, _data", valid_init_args)
def test_pass(self, args, _type, _name, _data):
record = Record(*args)
assert record.type == _type
assert record.name == _name
assert record.data == _data
@pytest.mark.parametrize("args, errstr", wrong_init_args)
def test_fail(self, args, errstr):
with pytest.raises((TypeError, ValueError)) as excinfo:
Record(*args)
assert str(excinfo.value) == "ndef.record.Record" + errstr
class TestInitKeywords:
def test_pass(self):
record_1 = Record(type='text/plain', name='name', data='hello')
record_2 = Record(b'text/plain', b'name', b'hello')
assert record_1.type == record_2.type
assert record_1.name == record_2.name
assert record_1.data == record_2.data
def test_fail(self):
with pytest.raises(TypeError):
Record(undefined_keyword='abc')
class TestTypeAttribute:
def test_instance(self):
assert isinstance(Record().type, str)
def test_update(self):
with pytest.raises(AttributeError):
Record().type = ''
class TestNameAttribute:
def test_instance(self):
assert isinstance(Record().name, str)
def test_update(self):
record = Record()
assert record.name == ''
record.name = 255 * 'a'
assert record.name == 255 * 'a'
with pytest.raises(TypeError):
record.name = 1
with pytest.raises(ValueError):
record.name = 256 * 'a'
class TestDataAttribute:
def test_instance(self):
assert isinstance(Record().data, bytearray)
def test_update(self):
record = Record('unknown', '', 'abc')
assert record.data == b'abc'
record.data.extend(b'def')
assert record.data == b'abcdef'
with pytest.raises(AttributeError):
Record().data = bytearray(b'')
class TestStringFormat:
format_args_data = [
(('', '', ''), "'', '', bytearray(b'')"),
(('unknown', 'id', 'data'), "'unknown', 'id', bytearray(b'data')"),
]
format_str_data = [
(('', '', ''),
"TYPE '' ID '' PAYLOAD 0 byte"),
(('text/plain', '', ''),
"TYPE 'text/plain' ID '' PAYLOAD 0 byte"),
(('text/plain', 'id', ''),
"TYPE 'text/plain' ID 'id' PAYLOAD 0 byte"),
(('text/plain', 'id', '\x00\x01'),
"TYPE 'text/plain' ID 'id' PAYLOAD 2 byte '0001'"),
(('text/plain', '', '0123456789'),
"TYPE 'text/plain' ID '' PAYLOAD 10 byte '30313233343536373839'"),
(('text/plain', '', '012345678901'),
"TYPE 'text/plain' ID '' PAYLOAD 12 byte"
" '30313233343536373839' ... 2 more"),
]
@pytest.mark.parametrize("args, string", format_args_data)
def test_format_args(self, args, string):
assert "{:args}".format(Record(*args)) == string
@pytest.mark.parametrize("args, string", format_args_data)
def test_format_repr(self, args, string):
string = "ndef.record.Record({})".format(string)
assert "{!r}".format(Record(*args)) == string
@pytest.mark.parametrize("args, string", format_str_data)
def test_format_str(self, args, string):
assert "{!s}".format(Record(*args)) == "NDEF Record " + string
assert "{}".format(Record(*args)) == "NDEF Record " + string
class TestCompare:
compare_data = [
('', '', ''),
('a/b', '', ''),
('', 'abc', ''),
('', '', 'abc'),
]
@pytest.mark.parametrize("args", compare_data)
def test_equal(self, args):
assert Record(*args) == Record(*args)
@pytest.mark.parametrize("args1, args2",
zip(compare_data, compare_data[1:]))
def test_noteq(self, args1, args2):
assert Record(*args1) != Record(*args2)
class TestEncode:
valid_encode_data = [
(('', '', b''), '100000'),
(('urn:nfc:wkt:X', '', b''), '110100 58'),
(('text/plain', '', b''), '120a00 746578742f706c61696e'),
(('http://a.b/c', '', b''), '130c00 687474703a2f2f612e622f63'),
(('urn:nfc:ext:a.com:type', '', b''), '140A00 612e636f6d3a74797065'),
(('unknown', '', b''), '150000'),
(('unchanged', '', b''), '160000'),
(('urn:nfc:wkt:X', 'id', b''), '19010002 586964'),
(('urn:nfc:wkt:X', 'id', b'payload'), '19010702 5869647061796c6f6164'),
(('urn:nfc:wkt:X', 'id', 256*b'p'), '09010000010002 586964'+256*'70'),
]
@pytest.mark.parametrize("args, encoded", valid_encode_data)
def test_pass(self, args, encoded):
stream = BytesIO()
record = Record(*args)
octets = bytearray.fromhex(encoded)
assert record._encode(stream=stream) == len(octets)
assert stream.getvalue() == octets
def test_limit(self):
stream = BytesIO()
record = Record('unknown', '', 0x100000 * b'\0')
octets = bytearray.fromhex('050000100000') + 0x100000 * b'\0'
assert record._encode(stream=stream) == len(octets)
assert stream.getvalue() == octets
record = Record('unknown', '', 0x100001 * b'\0')
errstr = "payload of more than 1048576 octets can not be encoded"
with pytest.raises(ndef.EncodeError) as excinfo:
record._encode(stream=stream)
assert str(excinfo.value) == 'ndef.record.Record ' + errstr
valid_struct_data = [
("B", (1,), "01"),
("BB", (1, 2), "0102"),
("BB*", (1, 2, b'123'), "0102313233"),
("BB+", (1, b'123'), "0103313233"),
("BB+(B)", (1, (1, 2, 3)), "0103010203"),
("BB+(B)*", (1, (1, 2, 3), b'123'), "0103010203313233"),
(">H", (1,), "0001"),
(">HH", (1, 2), "00010002"),
(">HH*", (1, 2, b'123'), "00010002313233"),
(">HH+", (1, b'123'), "00010003313233"),
(">HH+(H)", (1, (1, 2, 3)), "00010003000100020003"),
(">HH+(H)*", (1, (1, 2, 3), b'123'), "00010003000100020003313233"),
]
@pytest.mark.parametrize("fmt, values, octets", valid_struct_data)
def test_struct(self, fmt, values, octets):
octets = bytearray.fromhex(octets)
assert Record._encode_struct(fmt, *values) == octets
def test_derived_record(self):
class MyRecord(Record):
_type = 'urn:nfc:wkt:x'
def __init__(self):
pass
def _encode_payload(self):
return b'\0'
stream = BytesIO()
octets = bytearray.fromhex('1101017800')
assert MyRecord()._encode(stream=stream) == len(octets)
assert stream.getvalue() == octets
class TestDecode:
valid_decode_data = TestEncode.valid_encode_data + [
(('', '', b''), '00 00 00 00 00 00'),
(('', '', b''), '00 00 00 00 00 00 00'),
]
wrong_decode_data = [
('07', "TNF field value must be between 0 and 6"),
('00', "buffer underflow at reading length fields"),
('0000', "buffer underflow at reading length fields"),
('000000', "buffer underflow at reading length fields"),
('00000000', "buffer underflow at reading length fields"),
('0000000000', "buffer underflow at reading length fields"),
('10010000', "TYPE_LENGTH must be 0 for TNF value 0"),
('110000', "TYPE_LENGTH must be > 0 for TNF value 1"),
('120000', "TYPE_LENGTH must be > 0 for TNF value 2"),
('130000', "TYPE_LENGTH must be > 0 for TNF value 3"),
('140000', "TYPE_LENGTH must be > 0 for TNF value 4"),
('15010000', "TYPE_LENGTH must be 0 for TNF value 5"),
('16010000', "TYPE_LENGTH must be 0 for TNF value 6"),
('1800000100', "ID_LENGTH must be 0 for TNF value 0"),
('10000100', "PAYLOAD_LENGTH must be 0 for TNF value 0"),
('000000000001', "PAYLOAD_LENGTH must be 0 for TNF value 0"),
('19010101', "buffer underflow at reading TYPE field"),
('1901010154', "buffer underflow at reading ID field"),
('190101015449', "buffer underflow at reading PAYLOAD field"),
]
valid_flag_data = [
('000000000000', 0, 0, 0),
('800000000000', 1, 0, 0),
('400000000000', 0, 1, 0),
('200000000000', 0, 0, 1),
('c00000000000', 1, 1, 0),
('a00000000000', 1, 0, 1),
('e00000000000', 1, 1, 1),
]
@pytest.mark.parametrize("args, encoded", valid_decode_data)
def test_pass(self, args, encoded):
stream = BytesIO(bytearray.fromhex(encoded))
record = Record._decode(stream, 'strict', {})[0]
assert record == Record(*args)
@pytest.mark.parametrize("encoded, errstr", wrong_decode_data)
def test_fail(self, encoded, errstr):
with pytest.raises(ndef.DecodeError) as excinfo:
stream = BytesIO(bytearray.fromhex(encoded))
Record._decode(stream, 'strict', {})
assert errstr in str(excinfo.value)
@pytest.mark.parametrize("encoded, _mb, _me, _cf", valid_flag_data)
def test_flags(self, encoded, _mb, _me, _cf):
stream = BytesIO(bytearray.fromhex(encoded))
record, mb, me, cf = Record._decode(stream, 'strict', {})
assert mb == _mb
assert me == _me
assert cf == _cf
def test_limit(self):
octets = bytearray.fromhex('')
record = Record._decode(BytesIO(octets), 'strict', {})[0]
assert record is None
octets = bytearray.fromhex('050000100000') + 0x100000 * b'\0'
record = Record._decode(BytesIO(octets), 'strict', {})[0]
assert len(record.data) == 0x100000
errstr = "payload of more than 1048576 octets can not be decoded"
octets = bytearray.fromhex('050000100001') + 0x100001 * b'\0'
with pytest.raises(ndef.DecodeError) as excinfo:
Record._decode(BytesIO(octets), 'strict', {})
assert str(excinfo.value) == 'ndef.record.Record ' + errstr
def test_decode_payload_is_not_implemented(self):
errstr = "must implement the _decode_payload() method"
with pytest.raises(NotImplementedError) as excinfo:
Record._decode_payload(b'', 'strict')
assert str(excinfo.value) == 'ndef.record.Record ' + errstr
def test_decode_known_type(self):
class MyRecord(Record):
_type = 'urn:nfc:wkt:x'
_decode_min_payload_length = 1
_decode_max_payload_length = 1
@classmethod
def _decode_payload(cls, octets, errors):
return MyRecord()
known_types = {MyRecord._type: MyRecord}
stream = BytesIO(bytearray.fromhex('1101017800'))
record = Record._decode(stream, 'strict', known_types)[0]
assert type(record) == MyRecord
errstr = 'payload length can not be less than 1'
stream = BytesIO(bytearray.fromhex('11010078'))
with pytest.raises(ndef.DecodeError) as excinfo:
Record._decode(stream, 'strict', known_types)
assert str(excinfo.value) == 'test_record.MyRecord ' + errstr
errstr = 'payload length can not be more than 1'
stream = BytesIO(bytearray.fromhex('110102780000'))
with pytest.raises(ndef.DecodeError) as excinfo:
Record._decode(stream, 'strict', known_types)
assert str(excinfo.value) == 'test_record.MyRecord ' + errstr
valid_struct_data = [
("B", "01", 0, 1),
("BB", "0102", 0, (1, 2)),
("BB*", "0102313233", 0, (1, 2, b'123')),
("BB+", "0102313233", 0, (1, b'12')),
("BB+", "000102313233", 1, (1, b'12')),
("BB+*", "0102313233", 0, (1, b'12', b'3')),
("BB+(B)", "01020102", 0, (1, (1, 2))),
("BB+(2s)", "010231323334", 0, (1, (b'12', b'34'))),
("BB+(B)*", "010201023132", 0, (1, (1, 2), b'12')),
(">H", "0001", 0, 1),
(">HH+", "00010002313233", 0, (1, b'12')),
(">HH+(H)", "0001000200010002", 0, (1, (1, 2))),
("BB+BB+", "010231320203313233", 0, (1, b'12', 2, b'123')),
]
@pytest.mark.parametrize("fmt, octets, offset, values", valid_struct_data)
def test_struct(self, fmt, octets, offset, values):
octets = bytearray.fromhex(octets)
assert Record._decode_struct(fmt, octets, offset) == values
class TestValueToAscii:
pass_values = [
'abc', u'abc', b'abc', bytearray(b'abc')
]
fail_values = [
(int(), "accepts str or bytes, but not int"),
('\x80', "conversion requires ascii text, but got '\\x80'"),
]
@pytest.mark.parametrize("value", pass_values)
def test_pass(self, value):
assert Record._value_to_ascii(value, 'value') == 'abc'
@pytest.mark.parametrize("value, errstr", fail_values)
def test_fail(self, value, errstr):
with pytest.raises((TypeError, ValueError)) as excinfo:
Record._value_to_ascii(value, 'value')
assert str(excinfo.value) == "ndef.record.Record value " + errstr
class TestValueToLatin:
pass_values = [
'\xe4bc', u'\xe4bc', b'\xe4bc', bytearray(b'\xe4bc')
]
fail_values = [
(int(), "accepts str or bytes, but not int"),
(u'\u0394', "conversion requires latin text, but got {u}'\u0394'"),
]
@pytest.mark.parametrize("value", pass_values)
def test_pass(self, value):
assert Record._value_to_latin(value, 'value') == '\xe4bc'
@pytest.mark.parametrize("value, errstr", fail_values)
def test_fail(self, value, errstr):
errstr = errstr.format(u=('', 'u')[ndef.record._PY2])
with pytest.raises((TypeError, ValueError)) as excinfo:
Record._value_to_latin(value, 'value')
assert str(excinfo.value) == "ndef.record.Record value " + errstr
class TestValueToUnicode:
pass_values = [
'abc', u'abc', b'abc', bytearray(b'abc')
]
fail_values = [
(int(), "accepts str or bytes, but not int"),
(b'\x80', "conversion requires ascii text, but got {b}'\\x80'"),
]
@pytest.mark.parametrize("value", pass_values)
def test_pass(self, value):
assert Record._value_to_unicode(value, 'value') == u'abc'
@pytest.mark.parametrize("value, errstr", fail_values)
def test_fail(self, value, errstr):
errstr = errstr.format(b=('b', '')[ndef.record._PY2])
with pytest.raises((TypeError, ValueError)) as excinfo:
Record._value_to_unicode(value, 'value')
assert str(excinfo.value) == "ndef.record.Record value " + errstr
|
|
# coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF metrics for Bandits algorithms."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Callable, Optional, Text
import gin
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.bandits.policies import constraints
from tf_agents.bandits.specs import utils as bandit_spec_utils
from tf_agents.metrics import tf_metric
from tf_agents.typing import types
from tf_agents.utils import common
@gin.configurable
class RegretMetric(tf_metric.TFStepMetric):
"""Computes the regret with respect to a baseline."""
def __init__(self,
baseline_reward_fn: Callable[[types.Tensor], types.Tensor],
name: Optional[Text] = 'RegretMetric',
dtype: float = tf.float32):
"""Computes the regret with respect to a baseline.
The regret is computed by computing the difference of the current reward
from the baseline action reward. The latter is computed by calling the input
`baseline_reward_fn` function that given a (batched) observation computes
the baseline action reward.
Args:
baseline_reward_fn: function that computes the reward used as a baseline
for computing the regret.
name: (str) name of the metric
dtype: dtype of the metric value.
"""
self._baseline_reward_fn = baseline_reward_fn
self.dtype = dtype
self.regret = common.create_variable(
initial_value=0, dtype=self.dtype, shape=(), name='regret')
super(RegretMetric, self).__init__(name=name)
def call(self, trajectory):
"""Update the regret value.
Args:
trajectory: A tf_agents.trajectory.Trajectory
Returns:
The arguments, for easy chaining.
"""
baseline_reward = self._baseline_reward_fn(trajectory.observation)
trajectory_reward = trajectory.reward
if isinstance(trajectory.reward, dict):
trajectory_reward = trajectory.reward[bandit_spec_utils.REWARD_SPEC_KEY]
trajectory_regret = baseline_reward - trajectory_reward
self.regret.assign(tf.reduce_mean(trajectory_regret))
return trajectory
def result(self):
return tf.identity(
self.regret, name=self.name)
@gin.configurable
class SuboptimalArmsMetric(tf_metric.TFStepMetric):
"""Computes the number of suboptimal arms with respect to a baseline."""
def __init__(self,
baseline_action_fn: Callable[[types.Tensor], types.Tensor],
name: Optional[Text] = 'SuboptimalArmsMetric',
dtype: float = tf.float32):
"""Computes the number of suboptimal arms with respect to a baseline.
Args:
baseline_action_fn: function that computes the action used as a baseline
for computing the metric.
name: (str) name of the metric
dtype: dtype of the metric value.
"""
self._baseline_action_fn = baseline_action_fn
self.dtype = dtype
self.suboptimal_arms = common.create_variable(
initial_value=0, dtype=self.dtype, shape=(), name='suboptimal_arms')
super(SuboptimalArmsMetric, self).__init__(name=name)
def call(self, trajectory):
"""Update the metric value.
Args:
trajectory: A tf_agents.trajectory.Trajectory
Returns:
The arguments, for easy chaining.
"""
baseline_action = self._baseline_action_fn(trajectory.observation)
disagreement = tf.cast(
tf.not_equal(baseline_action, trajectory.action), tf.float32)
self.suboptimal_arms.assign(tf.reduce_mean(disagreement))
return trajectory
def result(self):
return tf.identity(
self.suboptimal_arms, name=self.name)
@gin.configurable
class ConstraintViolationsMetric(tf_metric.TFStepMetric):
"""Computes the violations of a certain constraint."""
def __init__(self,
constraint: constraints.BaseConstraint,
name: Optional[Text] = 'ConstraintViolationMetric',
dtype: float = tf.float32):
"""Computes the constraint violations given an input constraint.
Given a certain constraint, this metric computes how often the selected
actions in the trajectory violate the constraint.
Args:
constraint: an instance of `tf_agents.bandits.policies.BaseConstraint`.
name: (str) name of the metric
dtype: dtype of the metric value.
"""
self._constraint = constraint
self.dtype = dtype
self.constraint_violations = common.create_variable(
initial_value=0.0,
dtype=self.dtype,
shape=(),
name='constraint_violations')
super(ConstraintViolationsMetric, self).__init__(name=name)
def call(self, trajectory):
"""Update the constraint violations metric.
Args:
trajectory: A tf_agents.trajectory.Trajectory
Returns:
The arguments, for easy chaining.
"""
feasibility_prob_all_actions = self._constraint(trajectory.observation)
feasibility_prob_selected_actions = common.index_with_actions(
feasibility_prob_all_actions,
tf.cast(trajectory.action, dtype=tf.int32))
self.constraint_violations.assign(tf.reduce_mean(
1.0 - feasibility_prob_selected_actions))
return trajectory
def result(self):
return tf.identity(self.constraint_violations, name=self.name)
@gin.configurable
class DistanceFromGreedyMetric(tf_metric.TFStepMetric):
"""Difference between the estimated reward of the chosen and the best action.
This metric measures how 'safely' the agent explores: it calculates the
difference between what the agent thinks it would have gotten had it chosen
the best looking action, vs the action it actually took. This metric is not
equivalent to the regret, because the regret is calculated as a distance from
optimality, while here everything calculated is based on the policy's
'belief'.
"""
def __init__(self,
estimated_reward_fn: Callable[[types.Tensor], types.Tensor],
name: Optional[Text] = 'DistanceFromGreedyMetric',
dtype: float = tf.float32):
"""Init function for the metric.
Args:
estimated_reward_fn: A function that takes the observation as input and
computes the estimated rewards that the greedy policy uses.
name: (str) name of the metric
dtype: dtype of the metric value.
"""
self._estimated_reward_fn = estimated_reward_fn
self.dtype = dtype
self.safe_explore = common.create_variable(
initial_value=0, dtype=self.dtype, shape=(), name='safe_explore')
super(DistanceFromGreedyMetric, self).__init__(name=name)
def call(self, trajectory):
"""Update the metric value.
Args:
trajectory: A tf_agents.trajectory.Trajectory
Returns:
The arguments, for easy chaining.
"""
all_estimated_rewards = self._estimated_reward_fn(trajectory.observation)
max_estimated_rewards = tf.reduce_max(all_estimated_rewards, axis=-1)
estimated_action_rewards = tf.gather(
all_estimated_rewards, trajectory.action, batch_dims=1)
self.safe_explore.assign(
tf.reduce_mean(max_estimated_rewards - estimated_action_rewards))
return trajectory
def result(self):
return tf.identity(self.safe_explore, name=self.name)
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains and evaluates deep neural network classifiers.
Trains and evaluates deep neural network classification models using Keras.
Performs parameter tuning with grid search and randomized search.
"""
import numpy as np
from scipy import sparse
from tensorflow.compat.v1.keras import backend as K
from tensorflow.compat.v1.keras import regularizers
from tensorflow.compat.v1.keras.layers import Activation
from tensorflow.compat.v1.keras.layers import Dense
from tensorflow.compat.v1.keras.layers import Dropout
from tensorflow.compat.v1.keras.layers import ELU
from tensorflow.compat.v1.keras.layers import Input
from tensorflow.compat.v1.keras.layers import LeakyReLU
from tensorflow.compat.v1.keras.layers import PReLU
from tensorflow.compat.v1.keras.layers import ThresholdedReLU
from tensorflow.compat.v1.keras.models import Model
from tensorflow.compat.v1.keras.optimizers import Adam
from tensorflow.compat.v1.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.compat.v1.keras.wrappers.scikit_learn import KerasRegressor
from sparse_data.exp_framework.utils import generic_pipeline
def pseudo_partial(func, **kwargs):
"""Does the same thing as functool.partial but returns a function.
Useful if an API (e.g., Keras) uses getargspec which doesn't handle functions.
Arguments:
func: function
**kwargs: additional keyword arguments
Returns:
new_func: function
a function which behaves like func(**kwargs)
"""
def new_func():
return func(**kwargs)
return new_func
def keras_build_fn(num_feature,
num_output,
is_sparse,
embedding_dim=-1,
num_hidden_layer=2,
hidden_layer_dim=512,
activation='elu',
learning_rate=1e-3,
dropout=0.5,
l1=0.0,
l2=0.0,
loss='categorical_crossentropy'):
"""Initializes and compiles a Keras DNN model using the Adam optimizer.
Args:
num_feature: number of features
num_output: number of outputs (targets, e.g., classes))
is_sparse: boolean whether input data is in sparse format
embedding_dim: int number of nodes in embedding layer; if value is <= 0 then
no embedding layer will be present in the model
num_hidden_layer: number of hidden layers
hidden_layer_dim: int number of nodes in the hidden layer(s)
activation: string
activation function for hidden layers; see https://keras.io/activations/
learning_rate: float learning rate for Adam
dropout: float proportion of nodes to dropout; values in [0, 1]
l1: float strength of L1 regularization on weights
l2: float strength of L2 regularization on weights
loss: string
loss function; see https://keras.io/losses/
Returns:
model: Keras.models.Model
compiled Keras model
"""
assert num_hidden_layer >= 1
inputs = Input(shape=(num_feature,), sparse=is_sparse)
activation_func_args = ()
if activation.lower() == 'prelu':
activation_func = PReLU
elif activation.lower() == 'leakyrelu':
activation_func = LeakyReLU
elif activation.lower() == 'elu':
activation_func = ELU
elif activation.lower() == 'thresholdedrelu':
activation_func = ThresholdedReLU
else:
activation_func = Activation
activation_func_args = (activation)
if l1 > 0 and l2 > 0:
reg_init = lambda: regularizers.l1_l2(l1, l2)
elif l1 > 0:
reg_init = lambda: regularizers.l1(l1)
elif l2 > 0:
reg_init = lambda: regularizers.l2(l2)
else:
reg_init = lambda: None
if embedding_dim > 0:
# embedding layer
e = Dense(embedding_dim)(inputs)
x = Dense(hidden_layer_dim, kernel_regularizer=reg_init())(e)
x = activation_func(*activation_func_args)(x)
x = Dropout(dropout)(x)
else:
x = Dense(hidden_layer_dim, kernel_regularizer=reg_init())(inputs)
x = activation_func(*activation_func_args)(x)
x = Dropout(dropout)(x)
# add additional hidden layers
for _ in range(num_hidden_layer - 1):
x = Dense(hidden_layer_dim, kernel_regularizer=reg_init())(x)
x = activation_func(*activation_func_args)(x)
x = Dropout(dropout)(x)
x = Dense(num_output)(x)
preds = Activation('softmax')(x)
model = Model(inputs=inputs, outputs=preds)
model.compile(optimizer=Adam(lr=learning_rate), loss=loss)
return model
def pipeline(x_train,
y_train,
x_test,
y_test,
param_dict=None,
problem='classification'):
"""Trains and evaluates a DNN classifier.
Args:
x_train: np.array or scipy.sparse.*matrix array of features of training data
y_train: np.array 1-D array of class labels of training data
x_test: np.array or scipy.sparse.*matrix array of features of test data
y_test: np.array 1-D array of class labels of the test data
param_dict: {string: ?} dictionary of parameters of their values
problem: string type of learning problem; values = 'classification',
'regression'
Returns:
model: Keras.models.Model
trained Keras model
metrics: {str: float}
dictionary of metric scores
"""
assert problem in ['classification', 'regression']
if param_dict is None:
param_dict = {'epochs': 10, 'batch_size': 256}
num_feature = x_train.shape[1]
is_sparse = sparse.issparse(x_train)
param_dict = param_dict.copy()
num_epoch = param_dict.pop('epochs')
batch_size = param_dict.pop('batch_size')
if problem == 'regression':
num_output = 1
loss = 'mean_squared_error'
model_init = KerasRegressor
else:
num_output = len(set(y_train))
loss = 'categorical_crossentropy'
model_init = FunctionalKerasClassifier
build_fn = pseudo_partial(
keras_build_fn,
num_feature=num_feature,
num_output=num_output,
is_sparse=is_sparse,
loss=loss,
**param_dict)
model = model_init(
build_fn=build_fn,
epochs=num_epoch,
batch_size=batch_size,
shuffle=True,
verbose=False)
return generic_pipeline(
model, x_train, y_train, x_test, y_test, problem=problem)
class FunctionalKerasClassifier(KerasClassifier):
"""Helper scikit-learn wrapper for a Keras model.
The default KerasClassifier's predict() method does not work for functional
Keras models (https://github.com/fchollet/keras/issues/2524); this breaks
using this wrapper with the scikit-learn framework (e.g., search methods).
"""
def predict_proba(self, x, **kwargs):
"""Predict classes from features.
Args:
x: np.array or scipy.sparse.*matrix array of features
**kwargs: additional keyword arguments
Returns:
y_pred: np.array
2-D array of class predicted probabilities
"""
kwargs = self.filter_sk_params(Model.predict, kwargs)
probas = self.model.predict(x, **kwargs)
return probas
def predict(self, x, **kwargs):
"""Predict classes from features.
Args:
x: np.array or scipy.sparse.*matrix array of features
**kwargs: additional keyword arguments
Returns:
y_pred: np.array
1-D array of class predictions (not probabilities)
"""
kwargs = self.filter_sk_params(Model.predict, kwargs)
probas = self.model.predict(x, **kwargs)
return np.argmax(probas, axis=1)
def clear_keras_session():
"""Clears Keras session."""
K.clear_session()
|
|
"""distutils.command.build_ext
Implements the Distutils 'build_ext' command, for building extension
modules (currently limited to C extensions, should accommodate C++
extensions ASAP)."""
import contextlib
import os
import re
import sys
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler, get_python_version
from distutils.dep_util import newer_group
from distutils.extension import Extension
from distutils.util import get_platform
from distutils import log
from site import USER_BASE
# An extension name is just a dot-separated list of Python NAMEs (ie.
# the same as a fully-qualified module name).
extension_name_re = re.compile \
(r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
def show_compilers ():
from distutils.ccompiler import show_compilers
show_compilers()
def _get_c_extension_suffix():
import importlib
suffixes = importlib.machinery.EXTENSION_SUFFIXES
return suffixes[0] if suffixes else None
class build_ext(Command):
description = "build C/C++ extensions (compile/link to build directory)"
# XXX thoughts on how to deal with complex command-line options like
# these, i.e. how to make it so fancy_getopt can suck them off the
# command line and make it look like setup.py defined the appropriate
# lists of tuples of what-have-you.
# - each command needs a callback to process its command-line options
# - Command.__init__() needs access to its share of the whole
# command line (must ultimately come from
# Distribution.parse_command_line())
# - it then calls the current command class' option-parsing
# callback to deal with weird options like -D, which have to
# parse the option text and churn out some custom data
# structure
# - that data structure (in this case, a list of 2-tuples)
# will then be present in the command object by the time
# we get to finalize_options() (i.e. the constructor
# takes care of both command-line and client options
# in between initialize_options() and finalize_options())
sep_by = " (separated by '%s')" % os.pathsep
user_options = [
('build-lib=', 'b',
"directory for compiled extension modules"),
('build-temp=', 't',
"directory for temporary files (build by-products)"),
('plat-name=', 'p',
"platform name to cross-compile for, if supported "
"(default: %s)" % get_platform()),
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
('include-dirs=', 'I',
"list of directories to search for header files" + sep_by),
('define=', 'D',
"C preprocessor macros to define"),
('undef=', 'U',
"C preprocessor macros to undefine"),
('libraries=', 'l',
"external C libraries to link with"),
('library-dirs=', 'L',
"directories to search for external C libraries" + sep_by),
('rpath=', 'R',
"directories to search for shared C libraries at runtime"),
('link-objects=', 'O',
"extra explicit link objects to include in the link"),
('debug', 'g',
"compile/link with debugging information"),
('force', 'f',
"forcibly build everything (ignore file timestamps)"),
('compiler=', 'c',
"specify the compiler type"),
('parallel=', 'j',
"number of parallel build jobs"),
('swig-cpp', None,
"make SWIG create C++ files (default is C)"),
('swig-opts=', None,
"list of SWIG command line options"),
('swig=', None,
"path to the SWIG executable"),
('user', None,
"add user include, library and rpath")
]
boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user']
help_options = [
('help-compiler', None,
"list available compilers", show_compilers),
]
def initialize_options(self):
self.extensions = None
self.build_lib = None
self.plat_name = None
self.build_temp = None
self.inplace = 0
self.package = None
self.include_dirs = None
self.define = None
self.undef = None
self.libraries = None
self.library_dirs = None
self.rpath = None
self.link_objects = None
self.debug = None
self.force = None
self.compiler = None
self.swig = None
self.swig_cpp = None
self.swig_opts = None
self.user = None
self.parallel = None
def finalize_options(self):
from distutils import sysconfig
self.set_undefined_options('build',
('build_lib', 'build_lib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'),
('parallel', 'parallel'),
('plat_name', 'plat_name'),
)
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
# Make sure Python's include directories (for Python.h, pyconfig.h,
# etc.) are in the include search path.
py_include = sysconfig.get_python_inc()
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
# If in a virtualenv, add its include directory
# Issue 16116
if sys.exec_prefix != sys.base_exec_prefix:
self.include_dirs.append(os.path.join(sys.exec_prefix, 'include'))
# Put the Python "system" include dir at the end, so that
# any local include dirs take precedence.
self.include_dirs.append(py_include)
if plat_py_include != py_include:
self.include_dirs.append(plat_py_include)
self.ensure_string_list('libraries')
self.ensure_string_list('link_objects')
# Life is easier if we're not forever checking for None, so
# simplify these options to empty lists if unset
if self.libraries is None:
self.libraries = []
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, str):
self.library_dirs = self.library_dirs.split(os.pathsep)
if self.rpath is None:
self.rpath = []
elif isinstance(self.rpath, str):
self.rpath = self.rpath.split(os.pathsep)
# for extensions under windows use different directories
# for Release and Debug builds.
# also Python's library directory must be appended to library_dirs
if os.name == 'nt':
# the 'libs' directory is for binary installs - we assume that
# must be the *native* platform. But we don't really support
# cross-compiling via a binary install anyway, so we let it go.
self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
if sys.base_exec_prefix != sys.prefix: # Issue 16116
self.library_dirs.append(os.path.join(sys.base_exec_prefix, 'libs'))
if self.debug:
self.build_temp = os.path.join(self.build_temp, "Debug")
else:
self.build_temp = os.path.join(self.build_temp, "Release")
# Append the source distribution include and library directories,
# this allows distutils on windows to work in the source tree
if 0:
# pypy has no config_h_filename directory
from distutils.sysconfig import get_config_h_filename
self.include_dirs.append(os.path.dirname(get_config_h_filename()))
_sys_home = getattr(sys, '_home', None)
if _sys_home:
self.library_dirs.append(_sys_home)
# Use the .lib files for the correct architecture
if self.plat_name == 'win32':
suffix = 'win32'
else:
# win-amd64 or win-ia64
suffix = self.plat_name[4:]
new_lib = os.path.join(sys.exec_prefix, 'PCbuild')
if suffix:
new_lib = os.path.join(new_lib, suffix)
# pypy has no PCBuild directory
# self.library_dirs.append(new_lib)
# for extensions under Cygwin and AtheOS Python's library directory must be
# appended to library_dirs
if sys.platform[:6] == 'cygwin' or sys.platform[:6] == 'atheos':
if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
# building third party extensions
self.library_dirs.append(os.path.join(sys.prefix, "lib",
"python" + get_python_version(),
"config"))
else:
# building python standard extensions
self.library_dirs.append('.')
# For building extensions with a shared Python library,
# Python's library directory must be appended to library_dirs
# See Issues: #1600860, #4366
if (sysconfig.get_config_var('Py_ENABLE_SHARED')):
if not sysconfig.python_build:
# building third party extensions
self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
else:
# building python standard extensions
self.library_dirs.append('.')
# The argument parsing will result in self.define being a string, but
# it has to be a list of 2-tuples. All the preprocessor symbols
# specified by the 'define' option will be set to '1'. Multiple
# symbols can be separated with commas.
if self.define:
defines = self.define.split(',')
self.define = [(symbol, '1') for symbol in defines]
# The option for macros to undefine is also a string from the
# option parsing, but has to be a list. Multiple symbols can also
# be separated with commas here.
if self.undef:
self.undef = self.undef.split(',')
if self.swig_opts is None:
self.swig_opts = []
else:
self.swig_opts = self.swig_opts.split(' ')
# Finally add the user include and library directories if requested
if self.user:
user_include = os.path.join(USER_BASE, "include")
user_lib = os.path.join(USER_BASE, "lib")
if os.path.isdir(user_include):
self.include_dirs.append(user_include)
if os.path.isdir(user_lib):
self.library_dirs.append(user_lib)
self.rpath.append(user_lib)
if isinstance(self.parallel, str):
try:
self.parallel = int(self.parallel)
except ValueError:
raise DistutilsOptionError("parallel should be an integer")
def run(self):
from distutils.ccompiler import new_compiler
# 'self.extensions', as supplied by setup.py, is a list of
# Extension instances. See the documentation for Extension (in
# distutils.extension) for details.
#
# For backwards compatibility with Distutils 0.8.2 and earlier, we
# also allow the 'extensions' list to be a list of tuples:
# (ext_name, build_info)
# where build_info is a dictionary containing everything that
# Extension instances do except the name, with a few things being
# differently named. We convert these 2-tuples to Extension
# instances as needed.
if not self.extensions:
return
# If we were asked to build any C/C++ libraries, make sure that the
# directory where we put them is in the library search path for
# linking extensions.
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.libraries.extend(build_clib.get_library_names() or [])
self.library_dirs.append(build_clib.build_clib)
# Setup the CCompiler object that we'll use to do all the
# compiling and linking
self.compiler = new_compiler(compiler=self.compiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force)
customize_compiler(self.compiler)
# If we are cross-compiling, init the compiler now (if we are not
# cross-compiling, init would not hurt, but people may rely on
# late initialization of compiler even if they shouldn't...)
if os.name == 'nt' and self.plat_name != get_platform():
self.compiler.initialize(self.plat_name)
# And make sure that any compile/link-related options (which might
# come from the command-line or from the setup script) are set in
# that CCompiler object -- that way, they automatically apply to
# all compiling and linking done here.
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name, value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
if self.libraries is not None:
self.compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
self.compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
self.compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
self.compiler.set_link_objects(self.link_objects)
# Now actually compile and link everything.
self.build_extensions()
def check_extensions_list(self, extensions):
"""Ensure that the list of extensions (presumably provided as a
command option 'extensions') is valid, i.e. it is a list of
Extension objects. We also support the old-style list of 2-tuples,
where the tuples are (ext_name, build_info), which are converted to
Extension instances here.
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if not isinstance(extensions, list):
raise DistutilsSetupError(
"'ext_modules' option must be a list of Extension instances")
for i, ext in enumerate(extensions):
if isinstance(ext, Extension):
continue # OK! (assume type-checking done
# by Extension constructor)
if not isinstance(ext, tuple) or len(ext) != 2:
raise DistutilsSetupError(
"each element of 'ext_modules' option must be an "
"Extension instance or 2-tuple")
ext_name, build_info = ext
log.warn("old-style (ext_name, build_info) tuple found in "
"ext_modules for extension '%s'"
"-- please convert to Extension instance", ext_name)
if not (isinstance(ext_name, str) and
extension_name_re.match(ext_name)):
raise DistutilsSetupError(
"first element of each tuple in 'ext_modules' "
"must be the extension name (a string)")
if not isinstance(build_info, dict):
raise DistutilsSetupError(
"second element of each tuple in 'ext_modules' "
"must be a dictionary (build info)")
# OK, the (ext_name, build_info) dict is type-safe: convert it
# to an Extension instance.
ext = Extension(ext_name, build_info['sources'])
# Easy stuff: one-to-one mapping from dict elements to
# instance attributes.
for key in ('include_dirs', 'library_dirs', 'libraries',
'extra_objects', 'extra_compile_args',
'extra_link_args'):
val = build_info.get(key)
if val is not None:
setattr(ext, key, val)
# Medium-easy stuff: same syntax/semantics, different names.
ext.runtime_library_dirs = build_info.get('rpath')
if 'def_file' in build_info:
log.warn("'def_file' element of build info dict "
"no longer supported")
# Non-trivial stuff: 'macros' split into 'define_macros'
# and 'undef_macros'.
macros = build_info.get('macros')
if macros:
ext.define_macros = []
ext.undef_macros = []
for macro in macros:
if not (isinstance(macro, tuple) and len(macro) in (1, 2)):
raise DistutilsSetupError(
"'macros' element of build info dict "
"must be 1- or 2-tuple")
if len(macro) == 1:
ext.undef_macros.append(macro[0])
elif len(macro) == 2:
ext.define_macros.append(macro)
extensions[i] = ext
def get_source_files(self):
self.check_extensions_list(self.extensions)
filenames = []
# Wouldn't it be neat if we knew the names of header files too...
for ext in self.extensions:
filenames.extend(ext.sources)
return filenames
def get_outputs(self):
# Sanity check the 'extensions' list -- can't assume this is being
# done in the same run as a 'build_extensions()' call (in fact, we
# can probably assume that it *isn't*!).
self.check_extensions_list(self.extensions)
# And build the list of output (built) filenames. Note that this
# ignores the 'inplace' flag, and assumes everything goes in the
# "build" tree.
outputs = []
for ext in self.extensions:
outputs.append(self.get_ext_fullpath(ext.name))
return outputs
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
if self.parallel:
self._build_extensions_parallel()
else:
self._build_extensions_serial()
def _build_extensions_parallel(self):
workers = self.parallel
if self.parallel is True:
workers = os.cpu_count() # may return None
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
workers = None
if workers is None:
self._build_extensions_serial()
return
with ThreadPoolExecutor(max_workers=workers) as executor:
futures = [executor.submit(self.build_extension, ext)
for ext in self.extensions]
for ext, fut in zip(self.extensions, futures):
with self._filter_build_errors(ext):
fut.result()
def _build_extensions_serial(self):
for ext in self.extensions:
with self._filter_build_errors(ext):
self.build_extension(ext)
@contextlib.contextmanager
def _filter_build_errors(self, ext):
try:
yield
except (CCompilerError, DistutilsError, CompileError) as e:
if not ext.optional:
raise
self.warn('building extension "%s" failed: %s' %
(ext.name, e))
def build_extension(self, ext):
sources = ext.sources
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'ext_modules' option (extension '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % ext.name)
sources = list(sources)
ext_path = self.get_ext_fullpath(ext.name)
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_path, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
# First, scan the sources for SWIG definition files (.i), run
# SWIG on 'em to create .c files, and modify the sources list
# accordingly.
sources = self.swig_sources(sources, ext)
# Next, compile the source code to object files.
# XXX not honouring 'define_macros' or 'undef_macros' -- the
# CCompiler API needs to change to accommodate this, and I
# want to do one thing at a time!
# Two possible sources for extra compiler arguments:
# - 'extra_compile_args' in Extension object
# - CFLAGS environment variable (not particularly
# elegant, but people seem to expect it and I
# guess it's useful)
# The environment variable should take precedence, and
# any sensible compiler will give precedence to later
# command line args. Hence we combine them in order:
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends)
# XXX outdated variable, kept here in case third-part code
# needs it.
self._built_objects = objects[:]
# Now link the object files together into a "shared object" --
# of course, first we have to figure out all the other things
# that go into the mix.
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
# Detect target language, if not provided
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects, ext_path,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language)
def swig_sources(self, sources, extension):
"""Walk the list of source files in 'sources', looking for SWIG
interface (.i) files. Run SWIG on all that are found, and
return a modified 'sources' list with SWIG source files replaced
by the generated C (or C++) files.
"""
new_sources = []
swig_sources = []
swig_targets = {}
# XXX this drops generated C/C++ files into the source tree, which
# is fine for developers who want to distribute the generated
# source -- but there should be an option to put SWIG output in
# the temp dir.
if self.swig_cpp:
log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
if self.swig_cpp or ('-c++' in self.swig_opts) or \
('-c++' in extension.swig_opts):
target_ext = '.cpp'
else:
target_ext = '.c'
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == ".i": # SWIG interface file
new_sources.append(base + '_wrap' + target_ext)
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
swig = self.swig or self.find_swig()
swig_cmd = [swig, "-python"]
swig_cmd.extend(self.swig_opts)
if self.swig_cpp:
swig_cmd.append("-c++")
# Do not override commandline arguments
if not self.swig_opts:
for o in extension.swig_opts:
swig_cmd.append(o)
for source in swig_sources:
target = swig_targets[source]
log.info("swigging %s to %s", source, target)
self.spawn(swig_cmd + ["-o", target, source])
return new_sources
def find_swig(self):
"""Return the name of the SWIG executable. On Unix, this is
just "swig" -- it should be in the PATH. Tries a bit harder on
Windows.
"""
if os.name == "posix":
return "swig"
elif os.name == "nt":
# Look for SWIG in its standard installation directory on
# Windows (or so I presume!). If we find it there, great;
# if not, act like Unix and assume it's in the PATH.
for vers in ("1.3", "1.2", "1.1"):
fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
if os.path.isfile(fn):
return fn
else:
return "swig.exe"
else:
raise DistutilsPlatformError(
"I don't know how to find (much less run) SWIG "
"on platform '%s'" % os.name)
# -- Name generators -----------------------------------------------
# (extension names, filenames, whatever)
def get_ext_fullpath(self, ext_name):
"""Returns the path of the filename for a given extension.
The file is located in `build_lib` or directly in the package
(inplace option).
"""
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
# no further work needed
# returning :
# build_dir/package/path/filename
filename = os.path.join(*modpath[:-1]+[filename])
return os.path.join(self.build_lib, filename)
# the inplace option requires to find the package directory
# using the build_py command for that
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
# returning
# package_dir/filename
return os.path.join(package_dir, filename)
def get_ext_fullname(self, ext_name):
"""Returns the fullname of a given extension name.
Adds the `package.` prefix"""
if self.package is None:
return ext_name
else:
return self.package + '.' + ext_name
def get_ext_filename(self, ext_name):
r"""Convert the name of an extension (eg. "foo.bar") into the name
of the file from which it will be loaded (eg. "foo/bar.so", or
"foo\bar.pyd").
"""
from distutils.sysconfig import get_config_var
ext_path = ext_name.split('.')
# PyPy tweak: first try to get the C extension suffix from
# 'imp'. If it fails we fall back to the 'SO' config var, like
# the previous version of this code did. This should work for
# CPython too. The point is that on PyPy with cpyext, the
# config var 'SO' is just ".so" but we want to return
# ".pypy-VERSION.so" instead.
ext_suffix = _get_c_extension_suffix()
if ext_suffix is None:
ext_suffix = get_config_var('EXT_SUFFIX') # fall-back
return os.path.join(*ext_path) + ext_suffix
def get_export_symbols(self, ext):
"""Return the list of symbols that a shared extension has to
export. This either uses 'ext.export_symbols' or, if it's not
provided, "PyInit_" + module_name. Only relevant on Windows, where
the .pyd file (DLL) must export the module "PyInit_" function.
"""
initfunc_name = "PyInit_" + ext.name.split('.')[-1]
if initfunc_name not in ext.export_symbols:
ext.export_symbols.append(initfunc_name)
return ext.export_symbols
def get_libraries(self, ext):
"""Return the list of libraries to link against when building a
shared extension. On most platforms, this is just 'ext.libraries';
on Windows, we add the Python library (eg. python20.dll).
"""
# The python library is always needed on Windows. For MSVC, this
# is redundant, since the library is mentioned in a pragma in
# pyconfig.h that MSVC groks. The other Windows compilers all seem
# to need it mentioned explicitly, though, so that's what we do.
# Append '_d' to the python import library on debug builds.
if sys.platform == "win32":
from distutils._msvccompiler import MSVCCompiler
if not isinstance(self.compiler, MSVCCompiler):
template = "python%d%d"
if self.debug:
template = template + '_d'
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
else:
return ext.libraries
elif sys.platform[:6] == "cygwin":
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
elif sys.platform[:6] == "atheos":
from distutils import sysconfig
template = "python%d.%d"
pythonlib = (template %
(sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
# Get SHLIBS from Makefile
extra = []
for lib in sysconfig.get_config_var('SHLIBS').split():
if lib.startswith('-l'):
extra.append(lib[2:])
else:
extra.append(lib)
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib, "m"] + extra
elif sys.platform == 'darwin':
# Don't use the default code below
return ext.libraries
elif sys.platform[:3] == 'aix':
# Don't use the default code below
return ext.libraries
else:
from distutils import sysconfig
if sysconfig.get_config_var('Py_ENABLE_SHARED'):
pythonlib = 'python{}.{}{}'.format(
sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff,
sysconfig.get_config_var('ABIFLAGS'))
return ext.libraries + [pythonlib]
else:
return ext.libraries
|
|
# -*- coding: utf-8 -*-
"""\
Adds environment directives:
.. environment:: Theorem
:title: Grothendick-Galois Theorem
Let ...
textcolor directive and a role (roles are not recursive, they ony can contain
a text, no other nodes, directives are recursive though)
.. textcolor:: #00FF00
This text is green
:textcolor:`<#FF0000> this text is red`.
.. endpar::
Puts '\n\n' in LaTeX and <br> in html.
(There is no other way to end a paragraph between two environments)
This code is derived from sphinx_latex package:
https://github.com/coot/sphinx_latex/blob/master/sphinx_clatex/directives.py
"""
from docutils.parsers.rst import directives
from docutils.parsers.rst import Directive
from docutils import nodes
__all__ = [ 'newtheorem', 'EnvironmentDirective', 'AlignDirective', 'TextColorDirective', 'TheoremDirectiveFactory']
class CLaTeXException(Exception): pass
# EnvironmentDirective:
class environment(nodes.Element):
pass
class EnvironmentDirective(Directive):
required_arguments = 1
optional_arguments = 0
# final_argument_whitespace = True
# directive arguments are white space separated.
option_spec = {
'class': directives.class_option,
'name': directives.unchanged,
'title' : directives.unchanged,
'html_title' : directives.unchanged,
'latex_title' : directives.unchanged,
}
has_content = True
def run(self):
self.options['envname'] = self.arguments[0]
self.assert_has_content()
environment_node = environment(rawsource='\n'.join(self.content), **self.options)
# if ('title' in self.options):
# self.state.nested_parse(self.options['title'], self.content_offset, environment_node)
self.state.nested_parse(self.content, self.content_offset, environment_node)
self.add_name(environment_node)
return [environment_node]
def visit_environment_latex(self, node):
if 'latex_title' in node:
# XXX: node['title'] should be parssed (for example there might be math inside)
self.body.append('\n\\begin{%s}[{%s}]' % (node['envname'], node['latex_title']))
elif 'title' in node:
# XXX: node['title'] should be parssed (for example there might be math inside)
self.body.append('\n\\begin{%s}[{%s}]' % (node['envname'], node['title']))
else:
self.body.append('\n\\begin{%s}' % (node['envname']))
def depart_environment_latex(self, node):
self.body.append('\\end{%s}' % node['envname'])
def visit_environment_html(self, node):
"""\
This visit method produces the following html:
The 'theorem' below will be substituted with node['envname'] and title with
node['title'] (environment node's option). Note that it differe slightly
from how LaTeX works.
<div class='environment theorem'>
<div class='environment_title theorem_title'>title</div>
<div class='environment_body theorem_body'>
...
</div>
</div>
XXX: title does not allow math roles"""
if 'label' in node:
ids = [ node['label'] ]
else:
ids = []
self.body.append(self.starttag(node, 'div', CLASS='environment %s' % node['envname'], IDS = ids))
self.body.append('<div class="environment_title %s_title">' % node['envname'])
# self.body.append(self.starttag(node, 'div', CLASS=('environment_title %s_title' % node['envname'])))
if 'html_title' in node:
self.body.append(node['html_title'])
if 'title' in node:
self.body.append(node['title'])
self.body.append('</div>')
self.body.append('<div class="environment_body %s_body">' % node['envname'])
# self.body.append(self.starttag(node, 'div', CLASS=('environment_body %s_body' % node['envname'])))
self.set_first_last(node)
def depart_environment_html(self, node):
self.body.append('</div>')
self.body.append('</div>')
#####################################################################################
#
# Definitions for align directive
#
#
######################################################################################
# AlignDirective:
class align(nodes.Element):
pass
class AlignDirective(Directive):
"""
.. align:: center
.. align:: left
.. align:: flushleft
.. align:: right
.. align:: flushright
"""
required_arguments = 1
optional_arguments = 0
has_content = True
def run(self):
if self.arguments[0] in ('left', 'flushleft'):
align_type = 'fresh-left'
elif self.arguments[0] in ('right', 'flushright'):
align_type = 'fresh-right'
else:
align_type = 'fresh-center'
self.options['align_type'] = align_type
self.options['classes'] = directives.class_option(align_type)
self.assert_has_content()
align_node = align(rawsource='\n'.join(self.content), **self.options)
self.state.nested_parse(self.content, self.content_offset, align_node)
for node in align_node:
node['classes'].extend(directives.class_option(align_type))
if ('center' not in node['classes'] and
'flushleft' not in node['classes'] and
'flushright' not in node['classes'] ):
node['classes'].extend(directives.class_option(align_type))
return [align_node]
def visit_align_latex(self, node):
self.body.append('\n\\begin{%s}' % node['align_type'])
def depart_align_latex(self, node):
self.body.append('\\end{%s}' % node['align_type'])
def visit_align_html(self, node):
# XXX: to be implemented.
pass
def depart_align_html(self, node):
# XXX: to be implemented.
pass
#####################################################################################
#
# Definitions for text in small caps role
#
#
######################################################################################
# TextSCDirective:
class TextSCDirective(Directive):
required_arguments = 0
optional_arguments = 0
has_content = True
def run(self):
self.assert_has_content()
node = textsc(rawsource='\n'.join(self.content), **self.options)
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
return [node]
class textsc(nodes.Element):
pass
def textsc_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""\
This role is interpreted in the following way:
:textsc:`text`
in latex:
\\textsc{text}
(the leading # is removed from color_spec)
in html
<span class="small_caps">text</span>
"""
text = text.strip()
textsc_node = textsc()
text_node = nodes.Text(text)
text_node.parent = textsc_node
textsc_node.children.append(text_node)
return [textsc_node], []
def visit_textsc_html(self, node):
self.body.append('<span class="small_caps">')
def depart_textsc_html(self, node):
self.body.append('</span>')
def visit_textsc_latex(self, node):
self.body.append('\n\\textsc{')
def depart_textsc_latex(self, node):
self.body.append('}')
#####################################################################################
#
# Definitions for text in color role
#
#
######################################################################################
# TextColorDirective:
class TextColorDirective(Directive):
required_arguments = 1
optional_arguments = 0
has_content = True
def run(self):
self.assert_has_content()
textcolor_node = textcolor(rawsource='\n'.join(self.content), **self.options)
textcolor_node['color_spec'] = self.arguments[0]
self.state.nested_parse(self.content, self.content_offset, textcolor_node)
self.add_name(textcolor_node)
return [textcolor_node]
class textcolor(nodes.Element):
pass
def textcolor_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
"""\
This role is interpreted in the following way:
:textcolor:`<color_spec> text `
where color spec is in HTML model, e.g. #FFFFFF, ...
in latex:
\\textcolor[HTML]{color_spec}{text}
(the leading # is removed from color_spec)
in html
<font color="color_spec">text</font>
"""
color_spec = text[1:text.index('>')]
text = (text[text.index('>')+1:]).strip()
textcolor_node = textcolor()
textcolor_node.children.append(nodes.Text(text))
textcolor_node['color_spec'] = color_spec
return [textcolor_node], []
def visit_textcolor_html(self, node):
self.body.append('<font color="%s">' % node['color_spec'])
def depart_textcolor_html(self, node):
self.body.append('</font>')
def visit_textcolor_latex(self, node):
color_spec = node['color_spec'][1:]
self.body.append('\n\\textcolor[HTML]{%s}{' % color_spec)
def depart_textcolor_latex(self, node):
self.body.append('}')
#####################################################################################
#
# Definitions for end paragraph directive
#
#
######################################################################################
# EndParDirective:
class endpar(nodes.Element):
pass
class EndParDirective(Directive):
required_arguments = 0
optional_arguments = 0
has_content = False
def run(self):
return [endpar()]
def visit_endpar_latex(self, node):
self.body.append('\n\n')
def depart_endpar_latex(self, node):
pass
def visit_endpar_html(self, node):
self.body.append('\n<br>\n')
def depart_endpar_html(self, node):
pass
#####################################################################################
#
# Definitions for theorem directive factory
#
#
######################################################################################
# TheoremDirectiveFactory:
def TheoremDirectiveFactory(thmname, thmcaption, thmnode, counter=None):
"""\
Function which returns a theorem class.
Takes four arguments:
thmname - name of the directive
thmcaption - caption name to use
thmnode - node to write to
counter - counter name, if None do not count
thmname='theorem', thmcaption='Theorem' will produce a directive:
.. theorem:: theorem_title
content
Note that caption is only used in html. With the above example you should
add:
\\newtheorem{theorem}{Theorem}
to your LaTeX preambule. The directive will produce:
in LaTeX:
\begin{theorem}[{theorem_title}] % theorem_title will be put inside {}.
content
\end{theorem}
in HTML:
<div class='environment theorem'>
<div class='environment_caption theorem_caption'>Theorem</div> <div class='environment_title theorem_title'>title</div>
<div class='environment_body theorem_body'>
content
</div>
</div>
"""
class TheoremDirective(Directive):
def __init__(self, *args, **kwargs):
self.counter = Counter(counter)
super(self.__class__, self).__init__(*args, **kwargs)
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = True
# directive arguments are white space separated.
option_spec = {
'class': directives.class_option,
'name': directives.unchanged,
}
has_content = True
def run(self):
if counter:
self.counter.stepcounter()
self.options['counter'] = self.counter.value
else:
self.options['counter'] = ''
self.options['thmname'] = thmname
self.options['thmcaption'] = thmcaption
if self.arguments:
self.options['thmtitle'] = self.arguments[0]
self.assert_has_content()
node = thmnode(rawsource='\n'.join(self.content), **self.options)
self.state.nested_parse(self.content, self.content_offset, node)
self.add_name(node)
return [node]
return TheoremDirective
def visit_theorem_latex(self, node):
if 'thmtitle' in node:
self.body.append('\n\\begin{%(thmname)s}[{%(thmtitle)s}]' % node)
else:
self.body.append('\n\\begin{%(thmname)s}' % node)
def depart_theorem_latex(self, node):
self.body.append('\\end{%(thmname)s}' % node)
def visit_theorem_html(self, node):
"""\
This visit method produces the following html:
The 'theorem' below will be substituted with node['envname'] and title with
node['title'] (environment node's option). Note that it differe slightly
from how LaTeX works.
For how it it constructed see the __doc__ of TheoremDirectiveFactory
XXX: you cannot use math in the title"""
if 'label' in node:
ids = [ node['label'] ]
else:
ids = []
self.body.append(self.starttag(node, 'div', CLASS='theoremenv %(thmname)s' % node, IDS = ids))
self.body.append('<div class="theoremenv_caption %(thmname)s_caption">%(thmcaption)s<span class="theoremenv_counter %(thmname)s_counter">%(counter)s</span>' % node)
if 'thmtitle' in node:
self.body.append('<span class="theoremenv_title %(thmname)s_title">%(thmtitle)s</span>' % node)
self.body.append('</div>')
self.body.append('<div class="theoremenv_body %(thmname)s_body">' % node)
self.set_first_last(node)
def depart_theorem_html(self, node):
self.body.append('</div>')
self.body.append('</div>')
class Counter(object):
"""\
Base class for counters. There is only one instance for a given name.
>>> c=Counter('counter')
>>> d=Counter('counter')
>>> c id d
True
This is done using __new__ method.
"""
registered_counters = {}
def __new__(cls, name, value=0, within=None):
if name in cls.registered_counters:
instance = cls.registered_counters[name]
instance._init = False
return instance
else:
instance = object.__new__(cls)
instance._init = True
return instance
def __init__(self, name, value=0, within=None):
if not self._init:
# __init__ once
return
self.name = name
self.value = value
self.register()
def register(self):
Counter.registered_counters[self.name] = self
def stepcounter(self):
self.value += 1
def addtocounter(self, value=1):
self.value += value
def setcounter(self, value):
self.value = value
def __str__(self):
return str(self.value)
def __unicode__(self):
return str(self.value)
class TheoremNode(nodes.Element):
pass
# newtheorem:
def newtheorem(app, thmname, thmcaption, counter=None):
"""\
Add new theorem. It is thought as an analog of:
\\newtheorem{theorem_name}{caption}
counter is an instance of Counter. If None (the default) the
constructed theorem will not be counted.
"""
nodename = 'thmnode_%s' % thmname
thmnode = type(nodename, (TheoremNode,), {})
globals()[nodename]=thmnode # important for pickling
app.add_node(thmnode,
html = (visit_theorem_html, depart_theorem_html),
latex = (visit_theorem_latex, depart_theorem_latex),
)
TheoremDirective = TheoremDirectiveFactory(thmname, thmcaption, thmnode, counter)
app.add_directive(thmname, TheoremDirective)
# setup:
def setup(app):
# app.add_directive('begin', EnvironmentDirective)
# app.add_node(environment,
# html = (visit_environment_html, depart_environment_html),
# latex = (visit_environment_latex, depart_environment_latex),
# )
app.add_directive('environment', EnvironmentDirective)
app.add_node(environment,
html = (visit_environment_html, depart_environment_html),
latex = (visit_environment_latex, depart_environment_latex),
)
app.add_directive('align', AlignDirective)
app.add_node(align,
html = (visit_align_html, depart_align_html),
latex = (visit_align_latex, depart_align_latex),
)
app.add_directive('textsc', TextSCDirective)
app.add_role('textsc', textsc_role)
app.add_node(textsc,
html = (visit_textsc_html, depart_textsc_html),
latex = (visit_textsc_latex, depart_textsc_latex)
)
app.add_directive('textcolor', TextColorDirective)
app.add_role('textcolor', textcolor_role)
app.add_node(textcolor,
html = (visit_textcolor_html, depart_textcolor_html),
latex = (visit_textcolor_latex, depart_textcolor_latex)
)
app.add_directive('endpar', EndParDirective)
app.add_node(endpar,
html = (visit_endpar_html, depart_endpar_html),
latex = (visit_endpar_latex, depart_endpar_latex)
)
# Add standard theorems:
newtheorem(app, 'theorem', 'Theorem', None)
newtheorem(app, 'corollary', 'Corollary', None)
newtheorem(app, 'proposition', 'Proposition', None)
newtheorem(app, 'definition', 'Definition', None)
newtheorem(app, 'lemma', 'Lemma', None)
newtheorem(app, 'axiom', 'Axiom', None)
newtheorem(app, 'example', 'Example', None)
newtheorem(app, 'exercise', 'Exercise', None)
newtheorem(app, 'remark', 'Remark', None)
newtheorem(app, 'proof', 'Proof', None)
newtheorem(app, 'think', 'Think', None)
newtheorem(app, 'formula', 'Formula', None)
newtheorem(app, 'result', 'Result', None)
# test if there is no global name which starts with 'thmnode_',
# these names are reserved for thmnodes (newtheorem()).
for name in globals().copy():
if name.startswith('thmnode_'):
raise CLaTeXException('CLaTeX Internal Error: "%s" in globals()' % name)
|
|
from generator.actions import Actions
import random
import string
import struct
import ctypes
import sys
MAX_BOARD_WIDTH = 8192
NUM_RUNNERS = 25
NUM_STEPS = 10000
NUM_BOMBS = 5
NUM_SHOTS = 125
class ConwayPixel():
def __init__(self, x=-1, y=-1, is_alive=0, bomb_set=0):
self.x = x
self.y = y
self.is_alive = is_alive
self.bomb_set = bomb_set
def __eq__(self, b):
if isinstance(b, ConwayPixel):
return (self.x == b.x and self.y == b.y and self.is_alive == b.is_alive and self.bomb_set == b.bomb_set)
return False
def __hash__(self):
return hash((self.x, self.y, self.is_alive, self.bomb_set))
def get_adjacent_coords_set(x,y):
coords = [(x-1, y-1), (x, y-1), (x+1, y-1), (x-1, y), (x+1, y), (x-1, y+1), (x, y+1), (x+1, y+1)]
coords_set = set()
for (cx, cy) in coords:
if cx < MAX_BOARD_WIDTH and cy < MAX_BOARD_WIDTH:
coords_set.add((cx, cy))
return coords_set
def random_seed():
return random.randint(1, 4294967295)
def random_steps():
return random.randint(100,1000)
def random_coordinate():
x = random.randint(0, 8192 - 1)
y = random.randint(0, 8192 - 1)
return (x,y)
class GameOfLifeAndDeath(Actions):
new_seed_text = ("Before we begin. Enter a number [1 - 4294967295]\n" +
"Or any number. We'll figure it out: ")
game_ended_menu = (" --GAME OVER--\n" +
"1. New Game\n" +
"2. Quit\n\n")
board_hidden_menu = (" --Board: HIDDEN--\n" +
"1. Shoot!\n" +
"2. Reveal Board\n" +
"3. New Game\n" +
"4. Quit\n")
board_revealed_menu = (" --Board: REVEALED--\n" +
"1. Place Bomb\n" +
"2. Reveal Board\n" +
"3. Let them Run!\n" +
"4. New Game\n" +
"5. Quit\n")
shoot_coordinate_text = "Enter Shot Coordinates Ex: 100, 100:: "
bomb_coordinate_text = "Enter Bomb Coordinates Ex: 100, 100:: "
shot_miss_text = "You missed. Better luck next time.\n"
shot_empty_text = "You're out of bullets.\n"
shot_hit_text = ("Great shot! You got one\n" +
"Scored: 100 points\n")
bomb_planted_text = "Successfully planted bomb.\n"
bomb_empty_text = "You're out of bombs.\n"
bomb_failed_text= "There is already a bomb here. Try somewhere else.\n"
run_steps_text = "Enter amount of time to run: [100-1000]:: "
new_game_text = "New Game -- Ready, set, go!\n"
high_score_text = "A new high score!!\n\n"
game_over_text = "GAME OVER!\n"
exit_text = "Thanks for Playing\nEXITING...\n"
def _read(self, string):
self.read(length=len(string), expect=string)
def _read_menu(self):
if self.steps_left <= 0:
menu = self.game_ended_menu
self._read(menu)
self._read_highscore()
else:
if self.state['board_revealed']:
menu = self.board_revealed_menu
else:
menu = self.board_hidden_menu
self._read(menu)
self._read_game_info()
self._read("Selection-: ")
def _read_highscore(self):
self._read("High Score = {0}\n\n".format(self.high_score))
def _read_game_info(self):
self._read("Current Score = {0} | Total Time Remaining: {1} | High Score = {2} \n".format(
self.current_score, self.steps_left, self.high_score))
self._read("Shots Remaining: {0} | Bombs Remaining: {1}\n\n".format(
self.shots_left, self.bombs_left))
def _living_pixels_append(self, pixel):
if pixel.x < 0 or pixel.y < 0 or pixel.x >= MAX_BOARD_WIDTH or pixel.y >= MAX_BOARD_WIDTH:
pass
else:
self.living_pixels.append(pixel)
def _bomb_pixels_append(self, pixel):
if pixel.x < 0 or pixel.y < 0 or pixel.x >= MAX_BOARD_WIDTH or pixel.y >= MAX_BOARD_WIDTH:
pass
else:
self.bomb_pixels.append(pixel)
def _place_runner(self, x, y):
self._living_pixels_append(ConwayPixel(x, y, 1))
if (x < MAX_BOARD_WIDTH/2 and y < MAX_BOARD_WIDTH/2):
self._living_pixels_append(ConwayPixel(x, y+1, 1))
self._living_pixels_append(ConwayPixel(x, y+2, 1))
self._living_pixels_append(ConwayPixel(x-1, y+2, 1))
self._living_pixels_append(ConwayPixel(x-2, y+1, 1))
elif (x >= MAX_BOARD_WIDTH/2 and y < MAX_BOARD_WIDTH/2):
self._living_pixels_append(ConwayPixel(x, y+1, 1))
self._living_pixels_append(ConwayPixel(x, y+2, 1))
self._living_pixels_append(ConwayPixel(x+1, y+2, 1))
self._living_pixels_append(ConwayPixel(x+2, y+1, 1))
elif (x < MAX_BOARD_WIDTH/2 and y >= MAX_BOARD_WIDTH/2):
self._living_pixels_append(ConwayPixel(x, y+1, 1))
self._living_pixels_append(ConwayPixel(x, y+2, 1))
self._living_pixels_append(ConwayPixel(x-1, y, 1))
self._living_pixels_append(ConwayPixel(x-2, y+1, 1))
elif (x >= MAX_BOARD_WIDTH/2 and y >= MAX_BOARD_WIDTH/2):
self._living_pixels_append(ConwayPixel(x, y+1, 1))
self._living_pixels_append(ConwayPixel(x, y+2, 1))
self._living_pixels_append(ConwayPixel(x+1, y, 1))
self._living_pixels_append(ConwayPixel(x+2, y+1, 1))
self.living_pixels = list(set(self.living_pixels))
def _rand_int(self):
temp = self.a ^ ctypes.c_uint32(self.a << 11).value
self.a = self.b
self.b = self.c
self.c = self.d
self.d = ctypes.c_uint32((self.d ^ (self.d >> 19)) ^ (temp ^ (temp >> 8))).value
return self.d
def _create_runner(self):
temp = 0
x = -1
y = -1
x = self._rand_int() % MAX_BOARD_WIDTH
y = self._rand_int() % MAX_BOARD_WIDTH
self._place_runner(x, y)
def _take_shot(self, x, y):
if self.shots_left == 0:
self._read(self.shot_empty_text)
return
self.shots_left -= 1
for i in xrange(len(self.living_pixels)):
if self.living_pixels[i].x == x and self.living_pixels[i].y == y:
self.living_pixels.pop(i)
self.current_score += 100
self._read(self.shot_hit_text)
return
self._read(self.shot_miss_text)
def _plant_bomb(self, x, y):
if self.bombs_left == 0:
self._read(self.bomb_empty_text)
return
for bomb in self.bomb_pixels:
if bomb.x == x and bomb.y == y:
self._read(self.bomb_failed_text)
return
self.bombs_left -= 1
self._bomb_pixels_append(ConwayPixel(x, y, 0, 10))
self._read(self.bomb_planted_text)
def _read_pixels_in_order(self):
ordered = sorted(self.living_pixels + self.bomb_pixels, key=lambda px: (px.x, px.y))
self._read("GAME BOARD\n")
for i in ordered:
self._read("--| (x,y) = ({0},{1}) | Alive={2} | Bomb={3} |\n".format(i.x, i.y, i.is_alive, i.bomb_set))
self._read("\n")
#Main functionality:
def _bomb_collisions(self):
total_score = 0
ordered = sorted(self.bomb_pixels, key=lambda px: (px.x, px.y))
for bomb in ordered:
bomb.bomb_set -= 1
if bomb.bomb_set != 0:
continue
score = 0
pixels_hit = 0
living_pixels_set = self._get_living_pixels_set()
adj_coords_set = get_adjacent_coords_set(bomb.x, bomb.y)
adj_coords_set.add((bomb.x, bomb.y))
bombed_pixels = living_pixels_set & adj_coords_set
for i in reversed(xrange(len(self.living_pixels))):
for (x,y) in bombed_pixels:
if self.living_pixels[i].x == x and self.living_pixels[i].y == y:
self.living_pixels.pop(i)
pixels_hit += 1
score += 100 + pixels_hit * score
total_score += score
for i in reversed(xrange(len(self.bomb_pixels))):
if self.bomb_pixels[i].bomb_set == 0:
self.bomb_pixels.pop(i)
return total_score
def _get_living_pixels_set(self):
living_pixel_set = set()
for living_pixel in self.living_pixels:
living_pixel_set.add((living_pixel.x, living_pixel.y))
return living_pixel_set
def _run_single_generation(self):
dead_pixels_to_check = set()
next_gen_pixels = []
living_pixels_set = self._get_living_pixels_set()
for pixel in self.living_pixels:
adj_live_pixels = 0
adj_coords_set = get_adjacent_coords_set(pixel.x, pixel.y)
adj_living_pixels = living_pixels_set & adj_coords_set
if len(adj_living_pixels) == 2 or len(adj_living_pixels) == 3:
next_gen_pixels.append(pixel)
dead_pixels_to_check |= adj_coords_set - living_pixels_set
for (deadx, deady) in dead_pixels_to_check:
adj_coords_set = get_adjacent_coords_set(deadx, deady)
adj_living_pixels = living_pixels_set & adj_coords_set
if len(adj_living_pixels) == 3:
next_gen_pixels.append(ConwayPixel(deadx, deady, 1))
self.living_pixels = list(set(next_gen_pixels))
def _new_game(self):
self.high_score = self.current_score if self.high_score < self.current_score else self.high_score
self.living_pixels = []
self.bomb_pixels = []
for x in xrange(NUM_RUNNERS):
self._create_runner()
self.state['board_revealed'] = False
self.current_score = 0
self.steps_left = NUM_STEPS
self.shots_left = NUM_SHOTS
self.bombs_left = NUM_BOMBS
self._read(self.new_game_text)
def start(self):
self.a = ctypes.c_uint32(111111111).value
self.b = ctypes.c_uint32(222222222).value
self.c = ctypes.c_uint32(333333333).value
self.d = ctypes.c_uint32(444444444).value
self.high_score = 0
self.current_score = 0
self.seed = str(random_seed())
self.a = ctypes.c_uint32(int(self.seed)).value
self._read(self.new_seed_text)
self.write(self.seed + '\n')
self._new_game()
def shoot(self):
self._read_menu()
self.write("1\n")
self.read(self.shoot_coordinate_text)
x, y = random_coordinate()
self.write("{0}, {1}\n".format(x, y))
self._take_shot(x,y)
def reveal_board(self):
self._read_menu()
self.write("2\n")
self._read_pixels_in_order()
self.state['board_revealed'] = True
def place_bomb(self):
self._read_menu()
self.write("1\n")
self._read(self.bomb_coordinate_text)
x, y = random_coordinate()
self.write("{0}, {1}\n".format(x, y))
self._plant_bomb(x,y)
def let_run(self):
score = 0
self._read_menu()
self.write("3\n")
self._read(self.run_steps_text)
num_steps = random_steps()
self.write("{0}\n".format(num_steps))
num_steps = self.steps_left if self.steps_left <= num_steps else num_steps
for x in xrange(num_steps):
self._run_single_generation()
score += self._bomb_collisions()
self.current_score += score
if score != 0:
score_str = "Nice bombing! You racked up {0} points.\n".format(score)
self._read(score_str)
if (self.steps_left <= num_steps):
self.steps_left = 0
self._read(self.game_over_text)
if(self.current_score > self.high_score):
self._read(self.high_score_text)
self._read_menu()
self.write("1\n")
else:
self.steps_left -= num_steps
self.state['board_revealed'] = False
def new_game(self):
self._read_menu()
if self.state['board_revealed']:
self.write("4\n")
else:
self.write("3\n")
self._new_game()
def quit(self):
self._read_menu()
if self.state['board_revealed']:
self.write("5\n")
else:
self.write("4\n")
self._read(self.exit_text)
|
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=unidiomatic-typecheck
"""Prototype decorator for defining graph functions with eager semantics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import weakref
from tensorflow.python.eager import context
from tensorflow.python.eager import function as function_lib
from tensorflow.python.eager import lift_to_graph
from tensorflow.python.framework import func_graph as func_graph_module
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.tf_export import tf_export
class UnliftedInitializerVariable(resource_variable_ops.UninitializedVariable):
"""Variable which does not lift its initializer out of function context.
Instances of this variable, when created, build a graph which runs their
initializer inside a tf.cond(is_initialized) block.
This can only be created inside a defun called from (eventually) eager
mode. That is, non-function-building graphs are not supported.
"""
def __init__(self,
initial_value=None,
trainable=None,
caching_device=None,
name=None,
dtype=None,
constraint=None,
add_initializers_to=None,
lifted_initializer_graph=None,
synchronization=None,
aggregation=None,
shape=None,
**unused_kwargs):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound
to a shape before being used here.)
trainable: If `True`, GradientTapes automatically watch uses of this
Variable.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If None, either the datatype will be kept (if initial_value is
a Tensor) or float32 will be used (if it is a Python object convertible
to a Tensor).
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
add_initializers_to: if not None and not in legacy graph mode, the
initializer tensor will be added to this map in addition to adding the
assignment to the function.
lifted_initializer_graph: FuncGraph to try to lift initializers to.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
shape: (optional) The shape of this variable. If None, the shape of
`initial_value` will be used. When setting this argument to
`tf.TensorShape(None)` (representing an unspecified shape), the variable
can be assigned with values of different shapes.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
RuntimeError: If called outside of a function definition.
"""
if not ops.inside_function():
# If we've been init_scope()d out of the function definition nothing to do
# here; we can't really do the capturing or conditional logic.
resource_variable_ops.ResourceVariable.__init__(
self, initial_value=initial_value, trainable=trainable,
caching_device=caching_device, name=name, dtype=dtype,
constraint=constraint)
return
if initial_value is None:
raise ValueError("initial_value must be specified.")
init_from_fn = callable(initial_value)
if constraint is not None and not callable(constraint):
raise ValueError("The `constraint` argument must be a callable.")
if isinstance(initial_value, trackable.CheckpointInitialValue):
self._maybe_initialize_trackable()
self._update_uid = initial_value.checkpoint_position.restore_uid
initial_value = initial_value.wrapped_value
with ops.name_scope(name, "Variable", []
if init_from_fn else [initial_value]) as scope_name:
with ops.name_scope("Initializer"), ops.device(None):
initial_value = ops.convert_to_tensor(
initial_value() if init_from_fn else initial_value,
name="initial_value", dtype=dtype)
assert initial_value is not None
# Don't use `shape or initial_value.shape` since TensorShape has
# overridden `__bool__`.
if shape is None:
shape = initial_value.shape
# Use the constructor for UninitializedVariable to start. Outside the name
# scope so we don't double up the prefix.
super(UnliftedInitializerVariable, self).__init__(
trainable=trainable,
caching_device=caching_device,
name=name,
shape=shape,
dtype=initial_value.dtype,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation,
extra_handle_data=initial_value,
**unused_kwargs)
with ops.name_scope(scope_name):
if self._in_graph_mode:
with ops.init_scope():
outer_graph = ops.get_default_graph()
func_graph = ops.get_default_graph()
function_placeholders = (
func_graph.inputs + func_graph.internal_captures)
placeholder_ops = set(
[tensor.op for tensor in function_placeholders])
lifted_initializer = lift_to_graph.lift_to_graph(
[initial_value], outer_graph,
disallowed_placeholders=placeholder_ops)[initial_value]
with ops.init_scope():
self._initial_value = lifted_initializer
with ops.name_scope("IsInitialized"):
self._is_initialized_op = (
resource_variable_ops.var_is_initialized_op(self._handle))
if initial_value is not None:
with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
self._initializer_op = resource_variable_ops.assign_variable_op(
self._handle, lifted_initializer, name=n)
else:
if add_initializers_to is not None:
add_initializers_to[self] = initial_value
def assign_fn():
with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
resource_variable_ops.assign_variable_op(
self._handle,
initial_value,
name=n)
# Returning values to keep tf.cond happy.
return ops.convert_to_tensor(1)
def not_assign_fn():
return ops.convert_to_tensor(0)
# Note: this cond is always guaranteed to run because we're inside a
# defun which will insert automatic control dependencies.
control_flow_ops.cond(
resource_variable_ops.var_is_initialized_op(self._handle),
not_assign_fn, assign_fn)
RUN_FUNCTIONS_EAGERLY = False
@tf_export("config.experimental_run_functions_eagerly")
def run_functions_eagerly(run_eagerly):
"""Enables / disables eager execution of `tf.function`s.
After calling `tf.config.experimental_run_functions_eagerly(True)` all
invocations of tf.function will run eagerly instead of running through a graph
function.
This can be useful for debugging or profiling.
Similarly, calling `tf.config.experimental_run_functions_eagerly(False)` will
revert the behavior of all functions to graph functions.
Args:
run_eagerly: Boolean. Whether to run functions eagerly.
"""
global RUN_FUNCTIONS_EAGERLY
RUN_FUNCTIONS_EAGERLY = bool(run_eagerly)
class FunctionDeleter(object):
def __init__(self, func_graph):
self.func_graph = func_graph
def __del__(self):
try:
func_graph_module.dismantle_func_graph(self.func_graph)
except: # pylint: disable=bare-except
# Note: bare except here because this can be noisy at shutdown time.
pass
class Function(object):
"""Wrapper class for the graph functions defined for a Python function.
See the documentation for `tf.function` for more information on the semantics
of defined functions.
`Function` is thread-compatible.
"""
def __init__(self,
python_function,
name,
input_signature=None,
autograph=True,
experimental_autograph_options=None,
experimental_relax_shapes=False):
"""Initializes a `Function`.
Args:
python_function: the function to be wrapped.
name: the name given to it.
input_signature: a possibly nested sequence of `TensorSpec` objects
specifying the input signature of this function. If `None`, a separate
function is instantiated for each inferred input signature.
autograph: whether `python_function` should be converted to graph mode.
See https://www.tensorflow.org/guide/autograph for more information.
experimental_autograph_options: optional tuple of
tensorflow.autograph.Feature values. Allows enabling additional
conversion options when autograph is set to True.
experimental_relax_shapes: When true, argument shapes may be relaxed to
avoid unecessary retracing.
Raises:
ValueError: if `input_signature` is not None and the `python_function`'s
argspec has keyword arguments.
"""
self._python_function = python_function
self._function_spec = function_lib.FunctionSpec.from_function_and_signature(
python_function, input_signature)
self._autograph = autograph
self._experimental_autograph_options = experimental_autograph_options
self.experimental_relax_shapes = experimental_relax_shapes
self._created_variables = None
self._stateful_fn = None
self._stateless_fn = None
self._descriptor_cache = weakref.WeakKeyDictionary()
self._name = name
def _defun_with_scope(self, scope):
"""Creates a defun wrapped inside a variable creator scope."""
weak_wrapped_fn = None
def wrapped_fn(*args, **kwds):
"""Wraps `self._python_function` in a variable creator scope."""
# We register a variable creator with reduced priority. If an outer
# variable creator is just modifying keyword arguments to the variable
# constructor, this will work harmoniously. Since the `scope` registered
# here actually creates the variable, it taking priority would otherwise
# ignore the outer creator.
#
# If an outer variable creator calls the variable constructor manually,
# for example creating a MirroredVariable, then they won't call our
# creator. This means we won't be able to trace the initialization graph,
# and so variable initializers can't depend on function arguments. This is
# better than the alternative, tracing the initialization graph but giving
# the user a variable type they didn't want.
with ops.get_default_graph()._variable_creator_scope(scope, priority=50): # pylint: disable=protected-access
# __wrapped__ allows AutoGraph to swap in a converted function. We give
# the function a weak reference to itself to avoid a reference cycle.
return weak_wrapped_fn().__wrapped__(*args, **kwds)
weak_wrapped_fn = weakref.ref(wrapped_fn)
return self._defun(tf_decorator.make_decorator(
self._python_function,
wrapped_fn))
def _defun(self, fn):
"""Returns a defun generated from the input function."""
return function_lib.defun(
fn,
input_signature=self.input_signature,
autograph=self._autograph,
experimental_autograph_options=self._experimental_autograph_options,
experimental_relax_shapes=self.experimental_relax_shapes)
def _initialize(self, args, kwds, add_initializers_to=None):
"""Initializes, on the first call.
Creates two `Function`s, one that will allow creation of variables
and one that won't.
Additionally runs a trace for the `Function` that allows creation
of variables.
Args:
args: Arguments to the underlying python callable.
kwds: Keyword arguments to the python callable.
add_initializers_to: Where to collect variable initializers, if not None.
"""
created_variables = []
lifted_initializer_graph = func_graph_module.FuncGraph("initializer")
def variable_capturing_scope(unused_next_creator, **kwds):
"""Creates UnliftedInitializerVariables and saves references to them."""
v = UnliftedInitializerVariable(
add_initializers_to=add_initializers_to,
lifted_initializer_graph=lifted_initializer_graph, **kwds)
created_variables.append(weakref.ref(v))
return v
self._created_variables = created_variables
self._stateful_fn = self._defun_with_scope(variable_capturing_scope)
self._stateful_fn._name = self._name # pylint: disable=protected-access
# Force the definition of the function for these arguments
self._lifted_initializer_graph = lifted_initializer_graph
self._graph_deleter = FunctionDeleter(self._lifted_initializer_graph)
self._concrete_stateful_fn = (
self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
*args, **kwds))
def invalid_creator_scope(*unused_args, **unused_kwds):
"""Disables variable creation."""
raise ValueError(
"tf.function-decorated function tried to create "
"variables on non-first call.")
self._stateless_fn = self._defun_with_scope(invalid_creator_scope)
self._stateless_fn._name = self._name # pylint: disable=protected-access
def _decorate(self, decorator):
"""Allows the captured Python function to be decorated in place.
This method is only safe to call when the Function has not been called by a
user. It makes sense to use this method to push a decorator into the
function rather than wrapping the function in the decorator.
We use this in tf.Module to allow user annotated `tf.functions` to remain as
`Function` objects but still automatically enter the Module name_scope
when they are evaluated like all other methods.
Args:
decorator: A callable accepting a single argument which is the function
to decorate and returning a callable result.
Raises:
ValueError: If the function has been called a ValueError is raised.
"""
if self._stateful_fn is not None or self._stateless_fn is not None:
raise ValueError(
"Functions cannot be decorated after they have been traced.")
self._python_function = decorator(self._python_function)
self._function_spec = function_lib.FunctionSpec.from_function_and_signature(
self._python_function, self.input_signature)
def __call__(self, *args, **kwds):
"""Calls the graph function."""
context.ensure_initialized()
if RUN_FUNCTIONS_EAGERLY:
return self._python_function(*args, **kwds)
if self._created_variables:
# In this case we have created variables on the first call, so we run the
# defunned version which is guaranteed to never create variables.
return self._stateless_fn(*args, **kwds) # pylint: disable=not-callable
elif self._stateful_fn is not None:
# In this case we have not created variables on the first call. So we can
# run the first trace but we should fail if variables are created.
results = self._stateful_fn(*args, **kwds)
if self._created_variables:
raise ValueError("Creating variables on a non-first call to a function"
" decorated with tf.function.")
return results
# This is the first call of __call__, so we have to initialize.
initializer_map = {}
self._initialize(args, kwds, add_initializers_to=initializer_map)
if self._created_variables:
try:
# Attempt to initialize variables eagerly and without conds by lifting
# out initialization graphs. This is the only initialization strategy
# compatible with XLA at the moment.
self._initialize_uninitialized_variables(initializer_map)
except lift_to_graph.UnliftableError:
pass # Fall through to cond-based initialization.
else:
# Lifting succeeded, so variables are initialized and we can run the
# stateless function.
return self._stateless_fn(*args, **kwds)
else:
canon_args, canon_kwds = \
self._stateful_fn._function_spec.canonicalize_function_inputs( # pylint: disable=protected-access
*args, **kwds)
# If we did not create any variables the trace we have is good enough.
return self._concrete_stateful_fn._filtered_call(canon_args, canon_kwds) # pylint: disable=protected-access
def fn_with_cond(*inner_args, **inner_kwds):
"""Conditionally runs initialization if it's needed."""
condition = True
for wr in self._created_variables:
variable = wr()
if variable is None:
raise ValueError(
"A tf.Variable created inside your tf.function has been"
" garbage-collected. Your code needs to keep Python references"
" to variables created inside `tf.function`s.\n"
"\n"
"A common way to raise this error is to create and return a"
" variable only referenced inside your function:\n"
"\n"
"@tf.function\n"
"def f():\n"
" v = tf.Variable(1.0)\n"
" return v\n"
"\n"
"v = f() # Crashes with this error message!\n"
"\n"
"The reason this crashes is that @tf.function annotated"
" function returns a **`tf.Tensor`** with the **value** of the"
" variable when the function is called rather than the"
" variable instance itself. As such there is no code holding a"
" reference to the `v` created inside the function and Python"
" garbage collects it.\n"
"\n"
"The simplest way to fix this issue is to create variables"
" outside the function and capture them:\n"
"\n"
"v = tf.Variable(1.0)\n"
"\n"
"@tf.function\n"
"def f():\n"
" return v\n"
"\n"
"f() # <tf.Tensor: ... numpy=1.>\n"
"v.assign_add(1.)\n"
"f() # <tf.Tensor: ... numpy=2.>")
condition = math_ops.logical_and(
condition, resource_variable_ops.var_is_initialized_op(
variable.handle))
# We want to call stateless_fn if possible because it avoids recomputing
# potentially expensive initializers.
return control_flow_ops.cond(
condition,
lambda: self._stateless_fn(*inner_args, **inner_kwds),
functools.partial(self._concrete_stateful_fn._filtered_call, # pylint: disable=protected-access
inner_args, inner_kwds))
# We've created variables and are unable to lift the initialization graphs,
# so we fall back to initializing with conds while running the function.
canon_args, canon_kwds = \
self._stateful_fn._function_spec.canonicalize_function_inputs( # pylint: disable=protected-access
*args, **kwds)
return function_lib.defun(fn_with_cond)(*canon_args, **canon_kwds)
@property
def python_function(self):
"""The python function wrapped in this tf.function."""
return self._python_function
@property
def input_signature(self):
return self._function_spec.input_signature
@property
def function_spec(self):
return self._function_spec
def _initialize_uninitialized_variables(self, initializer_map):
"""Make and call a `ConcreteFunction` which initializes variables."""
# Note: using defun here avoids an infinite recursion.
@function_lib.defun
def initialize_variables():
for v, init in initializer_map.items():
with ops.init_scope():
if resource_variable_ops.var_is_initialized_op(v.handle):
# Ignore variables which are already initialized at trace time.
continue
v.assign(lift_to_graph.lift_to_graph(
[init], ops.get_default_graph())[init])
with ops.init_scope():
return initialize_variables.get_concrete_function()()
def get_initialization_function(self, *args, **kwargs):
"""Returns a `ConcreteFunction` which initializes this function's variables.
Requires that this function hasn't been accessed yet through either calling
it or calling get_concrete_function. Fails if we cannot build an initializer
function which does not depend on the concrete values of the inputs to this
function.
Note that running this function will overwrite any values currently assigned
to variables, for example restores from a checkpoint.
Args:
*args: arguments to the underlying python callable.
**kwargs: keyword arguments to the python callable.
Returns:
A `ConcreteFunction` object which initializes the variables of this
function.
Raises:
RuntimeError: if called after the variables have been initialized.
"""
if self._stateful_fn is not None:
raise RuntimeError(
"get_initialization_function cannot be called after the function "
"has been used")
# Here we trace the function, collect the initializers, and attempt to
# extract them and run them eagerly. Fail only if we cannot do so.
initializer_map = {}
self._initialize(args, kwargs, add_initializers_to=initializer_map)
# Note: using defun here avoids an infinite recursion.
@function_lib.defun
def initialize_variables():
for v, init in initializer_map.items():
v.assign(lift_to_graph.lift_to_graph(
[init], ops.get_default_graph())[init])
return initialize_variables.get_concrete_function()
def _list_all_concrete_functions_for_serialization(self):
"""Returns all concrete functions for serialization.
Returns:
A list of instances of `Function`.
"""
if self.input_signature is not None:
self.get_concrete_function()
concrete_functions = []
# pylint: disable=protected-access
if self._stateful_fn:
concrete_functions.extend(
self._stateful_fn._function_cache.all_values())
if self._stateless_fn:
concrete_functions.extend(
self._stateless_fn._function_cache.all_values())
# pylint: enable=protected-access
deduplicated_concrete_functions = []
seen_signatures = []
# We are using a list so that:
# - the returned collection is deterministic, and
# - we can use a custom equality operator (is_same_structure).
# This is run only at serialization time on likely very small inputs so we
# are not concerned about O(n^2) runtime.
for concrete_function in concrete_functions:
signature = concrete_function.structured_input_signature
flattened = nest.flatten(signature)
if any(
isinstance(arg, func_graph_module.UnknownArgument)
for arg in flattened):
logging.info("Unsupported signature for serialization: %s.", signature)
continue
equal_to_signature = functools.partial(
function_lib.is_same_structure, signature, check_values=True)
if not any(equal_to_signature(s) for s in seen_signatures):
deduplicated_concrete_functions.append(concrete_function)
seen_signatures.append(signature)
return deduplicated_concrete_functions
def get_concrete_function(self, *args, **kwargs):
"""Returns a `ConcreteFunction` specialized to inputs and execution context.
If this `Function` was created with an `input_signature`, `args` and
`kwargs` may be omitted. With an input signature there is only one
concrete function associated with this `Function`.
If there is no fixed `input_signature` associated with this
`Function`, positional and keyword arguments to `get_concrete_function`
follow the same rules as input signature specification, with `tf.TensorSpec`
objects describing `tf.Tensor`s which will be passed to the concrete
function.
Each `tf.Tensor` argument to the concrete function must have a unique name,
either because it is the only one associated with a named argument of the
Python function or because an explicit `name=` was passed to its
`tf.TensorSpec` object. These names become the argument names for the
concrete function.
Arguments to the concrete function may always be specified as keyword
arguments, naming the Tensor input. Positional arguments may be used instead
when each preceding argument to the Python function is a Tensor.
```python
@tf.function
def f(x):
return x
f_concrete = f.get_concrete_function(tf.TensorSpec([], tf.float64))
f_concrete(tf.constant(1.))
f_concrete(x=tf.constant(1.))
```
Nested structures containing Tensors may be specified when retrieving
concrete functions. Structures with multiple Tensors are expanded into
multiple arguments of the concrete function. Since multiple concrete
function arguments are associated with one argument to the original
function, these Tensors must be named explicitly. Tensors in nested
structures may not be passed using positional arguments when calling the
concrete function.
```python
f_concrete2 = f.get_concrete_function(
(tf.TensorSpec(None, tf.float64, name="first"),
tf.TensorSpec([], tf.float32, name="second")))
# Keyword arguments are required when identifying Tensors in nested
# structures.
f_concrete2(first=tf.constant([1.]), second=tf.constant(0.))
```
Functions with fixed input signatures have only one concrete function
associated with them, which can be retrieved without specifying any
arguments. As before Tensors must have unique names, either inferred from
the argument names in the original Python function or specified
explicitly.
```python
@tf.function(input_signature=(tf.TensorSpec(None, tf.float32)))
def f_sig(y):
return y
f_sig_concrete = f.get_concrete_function()
f_sig_concrete(tf.constant(1.))
f_sig_concrete(y=tf.constant(1.))
```
Args:
*args: inputs to specialize on.
**kwargs: inputs to specialize on.
Returns:
A TensorFlow function which takes exactly one `tf.Tensor` per argument.
Raises:
ValueError: if this object has not yet been called on concrete values.
"""
if self._stateful_fn is None:
initializer_map = {}
self._initialize(args, kwargs, add_initializers_to=initializer_map)
self._initialize_uninitialized_variables(initializer_map)
if self._created_variables:
# In this case we have created variables on the first call, so we run the
# defunned version which is guaranteed to never create variables.
return self._stateless_fn.get_concrete_function(*args, **kwargs)
elif self._stateful_fn is not None:
# In this case we have not created variables on the first call. So we can
# run the first trace but we should fail if variables are created.
concrete = self._stateful_fn.get_concrete_function(*args, **kwargs)
if self._created_variables:
raise ValueError("Creating variables on a non-first call to a function"
" decorated with tf.function.")
return concrete
def __get__(self, instance, owner):
"""Makes it possible to defun instance methods."""
del owner
# `instance` here is the instance that this `Function` was accessed through
# e.g., for
#
# class Foo(object):
#
# @function.defun
# def bar(self):
# ...
#
# foo = Foo()
# foo.bar() # `foo.bar` is a `Function` instance
#
# then `instance` will be `foo` (and `owner` will be `Foo`). We create a
# new instance of `Function` here to allow different instances each
# to create variables once, thereby allowing methods to be decorated with
# tf.function. Keeps a cache to avoid retracing the function every time the
# descriptor is accessed.
if instance not in self._descriptor_cache:
if instance is None:
return self
self._descriptor_cache[instance] = (
function_lib.class_method_to_instance_method(self, instance))
return self._descriptor_cache[instance]
@tf_export("function")
def function(func=None,
input_signature=None,
autograph=True,
experimental_autograph_options=None,
experimental_relax_shapes=False):
"""Creates a callable TensorFlow graph from a Python function.
`function` constructs a callable that executes a TensorFlow graph
(`tf.Graph`) created by tracing the TensorFlow operations in `func`.
This allows the TensorFlow runtime to apply optimizations and exploit
parallelism in the computation defined by `func`.
_Example Usage_
```python
def f(x, y):
return tf.reduce_mean(tf.multiply(x ** 2, 3) + y)
g = tf.function(f)
x = tf.constant([[2.0, 3.0]])
y = tf.constant([[3.0, -2.0]])
# `f` and `g` will return the same value, but `g` will be executed as a
# TensorFlow graph.
assert f(x, y).numpy() == g(x, y).numpy()
# Tensors and tf.Variables used by the Python function are captured in the
# graph.
@tf.function
def h():
return f(x, y)
assert (h().numpy() == f(x, y).numpy()).all()
# Data-dependent control flow is also captured in the graph. Supported
# control flow statements include `if`, `for`, `while`, `break`, `continue`,
# `return`.
@tf.function
def g(x):
if tf.reduce_sum(x) > 0:
return x * x
else:
return -x // 2
# print and TensorFlow side effects are supported, but exercise caution when
# using Python side effects like mutating objects, saving to files, etc.
l = []
@tf.function
def g(x):
for i in x:
print(i) # Works
tf.compat.v1.assign(v, i) # Works
tf.compat.v1.py_func(lambda i: l.append(i))(i) # Works
l.append(i) # Caution! Doesn't work.
```
Note that unlike other TensorFlow operations, we don't convert python
numerical inputs to tensors. Moreover, a new graph is generated for each
distinct python numerical value, for example calling `g(2)` and `g(3)` will
generate two new graphs (while only one is generated if you call
`g(tf.constant(2))` and `g(tf.constant(3))`). Therefore, python numerical
inputs should be restricted to arguments that will have few distinct values,
such as hyperparameters like the number of layers in a neural network. This
allows TensorFlow to optimize each variant of the neural network.
_Referencing `tf.Variable`s_
The Python function `func` may reference stateful objects (such as
`tf.Variable`).
These are captured as implicit inputs to the callable returned by `function`.
For example:
```python
c = tf.Variable(0)
@tf.function
def f(x):
c.assign_add(1)
return x + tf.compat.v1.to_float(c)
assert int(c) == 0
assert f(1.0) == 2.0
assert int(c) == 1
assert f(1.0) == 3.0
assert int(c) == 2
```
`function` can be applied to methods of an object. For example:
```python
class Dense(object):
def __init__(self):
self.W = tf.Variable(tf.compat.v1.glorot_uniform_initializer()((10, 10)))
self.b = tf.Variable(tf.zeros(10))
@tf.function
def compute(self, x):
return tf.matmul(x, self.W) + self.b
d1 = Dense()
d2 = Dense()
x = tf.random.uniform((10, 10))
# d1 and d2 are using distinct variables
assert not (d1.compute(x).numpy() == d2.compute(x).numpy()).all()
```
_Usage with `tf.keras`_
The `call` methods of a `tf.keras.Model` subclass can be decorated with
`function` in order to apply graph execution optimizations on it.
For example:
```python
class MyModel(tf.keras.Model):
def __init__(self, keep_probability=0.2):
super(MyModel, self).__init__()
self.dense1 = tf.keras.layers.Dense(4)
self.dense2 = tf.keras.layers.Dense(5)
self.keep_probability = keep_probability
@tf.function
def call(self, inputs, training=True):
y = self.dense2(self.dense1(inputs))
if training:
return tf.nn.dropout(y, self.keep_probability)
else:
return y
model = MyModel()
model(x, training=True) # executes a graph, with dropout
model(x, training=False) # executes a graph, without dropout
```
_Input Signatures_
`function` instantiates a separate graph for every unique set of input
shapes and datatypes. For example, the following code snippet will result
in three distinct graphs being traced, as each input has a different
shape.
```python
@tf.function
def f(x): return tf.add(x, 1.)
scalar = tf.constant(1.0)
vector = tf.constant([1.0, 1.0])
matrix = tf.constant([[3.0]])
f(scalar)
f(vector)
f(matrix)
```
An "input signature" can be optionally provided to `function` to control
the graphs traced. The input signature specifies the shape and type of each
`Tensor` argument to the function using a `tf.TensorSpec` object. For example,
the following code snippet ensures that a single graph is created where the
input `Tensor` is required to be a floating point tensor with no restrictions
on shape.
```python
@tf.function(input_signature=[tf.TensorSpec(shape=None, dtype=tf.float32)])
def f(x): return tf.add(x, 1.)
```
When an `input_signature` is specified, the callable will convert the inputs
to the specified TensorSpecs.
_Tracing and staging_
When `autograph` is `True`, all Python control flow that depends on `Tensor`
values is staged into a TensorFlow graph. When `autograph` is `False`, the
function is traced and control flow is not allowed to depend on data.
Note that `function` only stages TensorFlow operations, all Python code that
`func` executes and does not depend on data will shape the _construction_ of
the graph.
For example, consider the following:
```python
import numpy as np
def add_noise():
return tf.eye(5) + np.random.randn(5, 5)
traced = tf.function(add_noise)
```
`add_noise()` will return a different output every time it is invoked.
However, `traced()` will return the same value every time it is called,
since a particular random value generated by the `np.random.randn` call will
be inserted in the traced/staged TensorFlow graph as a constant. In this
particular example, replacing `np.random.randn(5, 5)` with
`tf.random.normal((5, 5))` will result in the same behavior for `add_noise()`
and `traced()`.
_Python Side-Effects_
A corollary of the previous discussion on tracing is the following: If a
Python function `func` has Python side-effects, then executing `func` multiple
times may not be semantically equivalent to executing `F = tf.function(func)`
multiple times; this difference is due to the fact that `function` only
captures the subgraph of TensorFlow operations that is constructed when `func`
is invoked to trace a graph.
The same is true if code with Python side effects is used inside control flow,
such as a loop. If your code uses side effects that are not intended to
control graph construction, wrap them inside `tf.compat.v1.py_func`.
_Retracing_
A single tf.function object might need to map to multiple computation graphs
under the hood. This should be visible only as performance (tracing graphs has
a nonzero computational and memory cost) but should not affect the correctness
of the program. A traced function should return the same result as it would
when run eagerly, assuming no unintended Python side-effects.
Calling a `tf.function` with tensor arguments of different dtypes should lead
to at least one computational graph per distinct set of dtypes. Alternatively,
always calling a `tf.function` with tensor arguments of the same shapes and
dtypes and the same non-tensor arguments should not lead to additional
retracings of your function.
Other than that, TensorFlow reserves the right to retrace functions as many
times as needed, to ensure that traced functions behave as they would when run
eagerly and to provide the best end-to-end performance. For example, the
behavior of how many traces TensorFlow will do when the function is repeatedly
called with different python scalars as arguments is left undefined to allow
for future optimizations.
To control the tracing behavior, use the following tools:
- different `tf.function` objects are guaranteed to not share traces; and
- specifying a signature or using concrete function objects returned from
get_concrete_function() guarantees that only one function graph will be
built.
Args:
func: function to be compiled. If `func` is None, returns a decorator that
can be invoked with a single argument - `func`. The end result is
equivalent to providing all the arguments up front. In other words,
`tf.function(input_signature=...)(func)` is equivalent to
`tf.function(func, input_signature=...)`. The former can be used to
decorate Python functions, for example:
@tf.function(input_signature=...)
def foo(...): ...
input_signature: A possibly nested sequence of `tf.TensorSpec` objects
specifying the shapes and dtypes of the Tensors that will be supplied to
this function. If `None`, a separate function is instantiated for each
inferred input signature. If input_signature is specified, every input to
`func` must be a `Tensor`, and `func` cannot accept `**kwargs`.
autograph: Whether autograph should be applied on `func` before tracing a
graph. This allows for dynamic control flow (Python if's, loops etc.)
in the traced graph. See https://www.tensorflow.org/guide/autograph for
more information.
experimental_autograph_options: Experimental knobs (in the form of a tuple
of tensorflow.autograph.Feature values) to control behavior when
autograph=True.
experimental_relax_shapes: When true, argument shapes may be relaxed to
avoid unecessary retracing.
Returns:
If `func` is not None, returns a callable that will execute the compiled
function (and return zero or more `tf.Tensor` objects).
If `func` is None, returns a decorator that, when invoked with a single
`func` argument, returns a callable equivalent to the case above.
Raises:
TypeError: If `input_signature` is neither `None` nor a sequence of
`TensorSpec` objects.
"""
if input_signature is not None:
function_lib.validate_signature(input_signature)
def decorated(inner_function):
try:
name = inner_function.__name__
except AttributeError:
name = "function"
return tf_decorator.make_decorator(
inner_function,
Function(
inner_function,
name,
input_signature=input_signature,
autograph=autograph,
experimental_autograph_options=experimental_autograph_options,
experimental_relax_shapes=experimental_relax_shapes))
# This code path is for the `foo = tf.function(foo, ...)` use case
if func is not None:
return decorated(func)
# This code path is for the
#
# @tf.function(...)
# def foo(...):
# ...
#
# use case, which is equivalent to `foo = tf.function(...)(foo)`
return decorated
|
|
import sublime
import sublime_plugin
try:
import thread
st_version = 2
except ImportError:
from threading import Thread
st_version = 3
import subprocess
import os
import stat
import functools
import re
sublime_guard_controller = None
class GuardController(object):
def __init__(self):
self.proc = None
self.running = False
self.auto_show_enabled = True
self.clear_when_find_this_text = None
def set_listener(self, listener):
self.listener = listener
if st_version == 2:
self.output_view = self.listener.window.get_output_panel('guard')
else:
self.output_view = self.listener.window.create_output_panel('guard')
self.enable_word_wrap()
self.set_color_scheme()
self.load_config()
return self
def open_file_paths(self):
return [view.file_name() for view in self.listener.window.views() if view.file_name()]
def open_folder_paths(self):
return self.listener.window.folders()
def path_has_guardfile(self, path):
return os.path.exists(path + '/Guardfile')
def find_project_root_path(self):
project_root_path = None
for path in self.open_folder_paths():
print ("Checking ... " + path)
if (self.path_has_guardfile(path)):
project_root_path = path
break
return project_root_path
def enable_word_wrap(self):
self.output_view.settings().set("word_wrap", True)
def set_color_scheme(self):
self.output_view.settings().set("syntax", "Packages/Guard/GuardOutput.tmLanguage")
self.output_view.settings().set("color_scheme", "Packages/Guard/GuardOutput.tmTheme")
def enable_auto_show(self):
self.auto_show_enabled = True
def disable_auto_show(self):
self.auto_show_enabled = False
def set_permissions(self, path):
os.chmod(path, stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP | stat.S_IXOTH | stat.S_IROTH)
def start_guard(self):
project_root_path = self.find_project_root_path()
if (project_root_path == None):
sublime.error_message("Failed to find Guardfile in any of the open folders.")
else:
package_path = sublime.packages_path()
self.set_permissions(package_path + "/Guard/guard_wrapper")
self.set_permissions(package_path + "/Guard/run_guard.sh")
cmd_array = [package_path + "/Guard/guard_wrapper", package_path + "/Guard/run_guard.sh", project_root_path]
self.proc = subprocess.Popen(cmd_array, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.running = True
self.show_guard_view_and_enable_autoshow()
if self.proc.stdout:
if st_version == 2:
thread.start_new_thread(self.read_stdout, ())
else:
Thread(target=self.read_stdout, args=()).start()
if self.proc.stderr:
if st_version == 2:
thread.start_new_thread(self.read_stderr, ())
else:
Thread(target=self.read_stderr, args=()).start()
def read_stdout(self):
while True:
data = os.read(self.proc.stdout.fileno(), 2 ** 15)
if data != "":
sublime.set_timeout(functools.partial(self.append_data, data), 0)
else:
self.proc.stdout.close()
self.running = False
break
def read_stderr(self):
while True:
data = os.read(self.proc.stderr.fileno(), 2 ** 15)
if data != "":
sublime.set_timeout(functools.partial(self.append_data, data), 0)
else:
self.proc.stderr.close()
self.running = False
break
def append_data(self, data):
if (self.auto_show_enabled):
self.show_guard_view()
clean_data = data.decode("utf-8")
clean_data = self.normalize_line_endings(clean_data)
clean_data = self.remove_terminal_color_codes(clean_data)
# actually append the data
if st_version == 2:
self.output_view.set_read_only(False)
edit = self.output_view.begin_edit()
# clear the output window when a predefined text is found.
if (self.clear_when_find_this_text and self.clear_when_find_this_text.search(clean_data)):
self.output_view.erase(edit, sublime.Region(0, self.output_view.size()))
self.output_view.insert(edit, self.output_view.size(), clean_data)
# scroll to the end of the new insert
self.scroll_to_end_of_guard_view()
self.output_view.end_edit(edit)
self.output_view.set_read_only(True)
else:
self.output_view.run_command('guard_message', {'string': clean_data})
self.scroll_to_end_of_guard_view()
def normalize_line_endings(self, data):
return data.replace('\r\n', '\n').replace('\r', '\n')
def remove_terminal_color_codes(self, data):
color_regex = re.compile("\\033\[[0-9;m]*", re.UNICODE)
return color_regex.sub("", data)
def scroll_to_end_of_guard_view(self):
(cur_row, _) = self.output_view.rowcol(self.output_view.size())
self.output_view.show(self.output_view.text_point(cur_row, 0))
def show_guard_view_and_enable_autoshow(self):
self.enable_auto_show()
self.show_guard_view()
def show_guard_view(self):
self.listener.window.run_command('show_panel', {'panel': 'output.guard'})
def hide_guard_view(self):
self.disable_auto_show()
self.listener.window.run_command('hide_panel', {'panel': 'output.guard'})
def stop_guard(self):
self.proc.stdin.write(b'e\n')
self.proc.stdin.flush()
self.running = False
def is_guard_running(self):
return self.running
def reload_guard(self):
self.proc.stdin.write(b'r\n')
self.proc.stdin.flush()
def run_all_tests(self):
self.proc.stdin.write(b'\n')
self.proc.stdin.flush()
def output_help(self):
self.proc.stdin.write(b'h\n')
self.proc.stdin.flush()
def toggle_notifications(self):
self.proc.stdin.write(b'n\n')
self.proc.stdin.flush()
def pause(self):
self.proc.stdin.write(b'p\n')
self.proc.stdin.flush()
def load_config(self):
s = sublime.load_settings("Guard.sublime-settings")
clear_text = s.get("clear_when_find_this_text")
if (clear_text):
self.clear_when_find_this_text = re.compile(clear_text)
else:
self.clear_when_find_this_text = None
def GuardControllerSingleton():
global sublime_guard_controller
if sublime_guard_controller == None:
sublime_guard_controller = GuardController()
return sublime_guard_controller
else:
return sublime_guard_controller
class StartGuardCommand(sublime_plugin.WindowCommand):
def run(self):
GuardControllerSingleton().set_listener(self).start_guard()
def is_enabled(self):
return not GuardControllerSingleton().is_guard_running()
class StopGuardCommand(sublime_plugin.WindowCommand):
def run(self):
GuardControllerSingleton().set_listener(self).stop_guard()
def is_enabled(self):
return GuardControllerSingleton().is_guard_running()
class HideGuardCommand(sublime_plugin.WindowCommand):
def run(self):
GuardControllerSingleton().set_listener(self).hide_guard_view()
def is_enabled(self):
return True
class ShowGuardCommand(sublime_plugin.WindowCommand):
def run(self):
GuardControllerSingleton().set_listener(self).show_guard_view_and_enable_autoshow()
def is_enabled(self):
return True
class ReloadGuardCommand(sublime_plugin.WindowCommand):
def run(self):
GuardControllerSingleton().set_listener(self).reload_guard()
def is_enabled(self):
return GuardControllerSingleton().is_guard_running()
class RunAllTestsGuardCommand(sublime_plugin.WindowCommand):
def run(self):
GuardControllerSingleton().set_listener(self).run_all_tests()
def is_enabled(self):
return GuardControllerSingleton().is_guard_running()
class RunAllTestsAndShowGuardCommand(sublime_plugin.WindowCommand):
def run(self):
GuardControllerSingleton().set_listener(self).show_guard_view()
GuardControllerSingleton().set_listener(self).run_all_tests()
def is_enabled(self):
return GuardControllerSingleton().is_guard_running()
class OutputHelpGuardCommand(sublime_plugin.WindowCommand):
def run(self):
GuardControllerSingleton().set_listener(self).output_help()
def is_enabled(self):
return GuardControllerSingleton().is_guard_running()
class ToggleNotificationsGuardCommand(sublime_plugin.WindowCommand):
def run(self):
GuardControllerSingleton().set_listener(self).toggle_notifications()
def is_enabled(self):
return GuardControllerSingleton().is_guard_running()
class PauseGuardCommand(sublime_plugin.WindowCommand):
def run(self):
GuardControllerSingleton().set_listener(self).pause()
def is_enabled(self):
return GuardControllerSingleton().is_guard_running()
class GuardMessageCommand(sublime_plugin.TextCommand):
"""
A command to write a message to the Guard messaging buffer
"""
def run(self, edit, string=''):
self.view.insert(edit, self.view.size(), string)
|
|
import os
from attacksurfacemeter.environments import Environments
from attacksurfacemeter.granularity import Granularity
from attacksurfacemeter.loaders.cflow_line_parser import CflowLineParser
from attacksurfacemeter.loaders.gprof_line_parser import GprofLineParser
from attacksurfacemeter.loaders.javacg_line_parser import JavaCGLineParser
class Call():
"""Represents a function or method in a system."""
_android_input_methods = []
_android_output_methods = []
_c_std_lib_functions = []
_c_input_functions = []
_c_output_functions = []
_c_dangerous_sys_calls = []
def __init__(self, name, signature, environment,
granularity=Granularity.FUNC):
"""Call constructor.
Parameters
----------
name : str
The name of the function represented by the object.
signature : str
A piece of information associated with the function represented by
this object. In the current implementation, it is the name of the
file where the function is defined.
environment : str
The environment of the function. See
attacksurfacemeter.environments.Environments for available choices.
granularity : str
The granularity of the call graph into which the instance of Call
will be added to. See attacksurfacemeter.granularity.Granularity
for available choices.
Returns
-------
call : Call
An instance of Call.
"""
self._function_name = name
self._function_signature = signature
self._environment = environment
if granularity not in [Granularity.FILE, Granularity.FUNC]:
raise Exception('Unsupported granularity {}'.format(granularity))
self._granularity = granularity
@classmethod
def from_cflow(cls, cflow_line, granularity=Granularity.FUNC):
"""Instantiate Call by parsing a line from cflow call graph.
Parameters
----------
cflow_line : str
A line of string from the cflow call graph.
granularity : str
The granularity of the call graph into which the instance of Call
will be added to. See attacksurfacemeter.granularity.Granularity
for available choices.
Returns
-------
new_instance : Call
An instance of Call.
"""
cflow_line_parser = CflowLineParser.get_instance(cflow_line)
new_instance = cls(
cflow_line_parser.get_function_name(),
cflow_line_parser.get_function_signature(),
Environments.C,
granularity
)
new_instance.level = cflow_line_parser.get_level()
return new_instance
@classmethod
def from_gprof(cls, gprof_line, granularity=Granularity.FUNC):
"""Instantiate Call by parsing a line from gprof call graph.
Parameters
----------
gprof_line : str
A line of string from the gprof call graph.
granularity : str
The granularity of the call graph into which the instance of Call
will be added to. See attacksurfacemeter.granularity.Granularity
for available choices.
Returns
-------
new_instance : Call
An instance of Call.
"""
gprof_line_parser = GprofLineParser.get_instance(gprof_line)
new_instance = cls(
gprof_line_parser.get_function_name(),
gprof_line_parser.get_function_signature(),
Environments.C,
granularity
)
return new_instance
@classmethod
def from_javacg(cls, javacg_line, granularity=Granularity.FUNC):
"""Instantiate Call by parsing a line from Java call graph.
Parameters
----------
javacg_line : str
A line of string from the Java call graph.
granularity : str
The granularity of the call graph into which the instance of Call
will be added to. See attacksurfacemeter.granularity.Granularity
for available choices.
Returns
-------
new_instance : Call
An instance of Call.
"""
javacg_line_parser = JavaCGLineParser.get_instance(javacg_line)
new_instance = cls(
javacg_line_parser.get_function_name(),
javacg_line_parser.get_function_signature(),
Environments.ANDROID,
granularity
)
new_instance.class_name = javacg_line_parser.get_class()
new_instance.package_name = javacg_line_parser.get_package()
return new_instance
def __repr__(self):
"""Return a string representation of the Call.
Returns
-------
call : str
A String representation of the Call.
"""
if self._environment == Environments.ANDROID:
return self._function_signature + '.' + self._function_name
else:
return self.identity
def __str__(self):
"""Return a string representation of the Call.
Returns
-------
call : str
A String representation of the Call.
"""
return self.__repr__()
def __hash__(self):
"""Return a number that uniquely identifies this instance.
Returns
-------
hash : int
A number that represents the calculated hash of this instance.
"""
return hash(self.identity)
def __eq__(self, other):
"""Override == operator to allow comparing two Call instances.
Parameters
----------
other : Call
An instance of Call to compare this instance to.
Returns
-------
is_equal : bool
True if this instance is equal to other, False otherwise.
"""
return self.identity == other.identity
def __ne__(self, other):
"""Override != operator to allow comparing two Call instances.
Parameters
----------
other : Call
An instance of Call to compare this instance to.
Returns
-------
is_notequal : bool
True if this instance is not equal to other, False otherwise.
"""
return self.identity != other.identity
@staticmethod
def _get_android_input_methods():
if not Call._android_input_methods:
Call._android_input_methods = Call._load_function_list(
'android_input_methods'
)
return Call._android_input_methods
@staticmethod
def _get_android_output_methods():
if not Call._android_output_methods:
Call._android_output_methods = Call._load_function_list(
'android_output_methods'
)
return Call._android_output_methods
@staticmethod
def _get_c_input_functions():
if not Call._c_input_functions:
Call._c_input_functions = Call._load_function_list(
'c_input_functions'
)
return Call._c_input_functions
@staticmethod
def _get_c_output_functions():
if not Call._c_output_functions:
Call._c_output_functions = Call._load_function_list(
'c_output_functions'
)
return Call._c_output_functions
@staticmethod
def _get_c_std_lib_functions():
if not Call._c_std_lib_functions:
Call._c_std_lib_functions = Call._load_function_list(
'c_std_lib_functions'
)
return Call._c_std_lib_functions
@staticmethod
def _get_c_dangerous_sys_calls():
if not Call._c_dangerous_sys_calls:
Call._c_dangerous_sys_calls = Call._load_function_list(
'c_dangerous_sys_calls'
)
return Call._c_dangerous_sys_calls
@staticmethod
def _load_function_list(function_list_file):
file_name = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'data',
function_list_file
)
with open(file_name) as f:
functions = f.read().splitlines()
return functions
def is_input(self):
"""Return True if the function is standard input, False otherwise.
The list of standard input functions is taken from Appendix A of
"Pratyusa, K. Manadhata, and M. Wing Jeannette. "An Attack Surface
Metric." PhD diss., PhD thesis, Carnegie Mellon University, 2008."
See file data/c_input_functions for the list of input functions.
Parameters
----------
None
Returns
-------
is_input : bool
True if the function is standard input, False otherwise.
"""
is_input = False
if self._environment == Environments.C:
input_functions = Call._get_c_input_functions()
is_input = (
not self._function_signature and
self._function_name in input_functions
)
elif self._environment == Environments.ANDROID:
input_functions = Call._get_android_input_methods()
is_input = (
self._function_signature + "." + self._function_name
) in input_functions
return is_input
def is_output(self):
"""Return True if the function is standard output, False otherwise.
The list of standard output functions is taken from Appendix A of
"Pratyusa, K. Manadhata, and M. Wing Jeannette. "An Attack Surface
Metric." PhD diss., PhD thesis, Carnegie Mellon University, 2008."
See file data/c_output_functions for the list of output functions.
Parameters
----------
None
Returns
-------
is_output : bool
True if function is standard output, False otherwise.
"""
is_output = False
if self._environment == Environments.C:
output_functions = Call._get_c_output_functions()
is_output = (
not self._function_signature and
self._function_name in output_functions
)
elif self._environment == Environments.ANDROID:
output_functions = Call._get_android_output_methods()
is_output = (
self._function_signature + "." + self._function_name
) in output_functions
return is_output
def is_dangerous(self):
"""Return True if the function is a dangerous, False otherwise.
The list of dangerous system calls is taken from "Bernaschi, M.,
Gabrielli, E., & Mancini, L. V. (2000, November). Operating system
enhancements to prevent the misuse of system calls. In Proceedings of
the 7th ACM conference on Computer and communications security (pp.
174-183). ACM."
See file data/c_dangerous_sys_calls for the list of dangerous system
calls available in the C programming language.
Parameters
----------
None
Returns
-------
is_dangerous : bool
True if the function is dangerous, False otherwise.
"""
c_dangerous_sys_calls = Call._get_c_dangerous_sys_calls()
is_dangerous = (
not self._function_signature and
self._function_name in c_dangerous_sys_calls
)
return is_dangerous
def in_stdlib(self):
"""Return True if the function is part of C library, False otherwise.
The list of C standard library functions is taken from the list at
http://www.gnu.org/software/libc/manual/html_node/Function-Index.html
See file data/c_std_lib_functions for the list of C standard library
functions.
Parameters
----------
None
Returns
-------
in_stdlib : bool
True if function is part of C library, False otherwise.
"""
c_std_lib_functions = Call._get_c_std_lib_functions()
in_stdlib = (
not self._function_signature and
self._function_name in c_std_lib_functions
)
return in_stdlib
@property
def identity(self):
"""Return a string that uniquely identifies this object.
Parameters
----------
None
Returns
-------
identity : str
The unique representation of this object.
"""
identity = None
if self._granularity == Granularity.FUNC:
identity = self._function_name
if self._function_signature:
identity += ' ' + self._function_signature
elif self._granularity == Granularity.FILE:
identity = self._function_signature
return identity
@property
def function_name(self):
"""Return the name of the function represented by this Call.
Parameters
----------
None
Returns
-------
name : str
The name of the function represented by this object.
"""
return self._function_name
@property
def function_signature(self):
"""Return the signature of the function represented by this object.
Parameters
----------
None
Returns
-------
signature : str
The signature of the function represented by this object.
"""
return self._function_signature
@property
def environment(self):
"""Return the environment of the function represented by this object.
Parameters
----------
None
Returns
-------
environment : str
The environment of the function represented by this object.
"""
return self._environment
|
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main pipeline code.
"""
import datetime
import urllib
from googleapiclient import discovery
from googleapiclient.http import MediaFileUpload
import httplib2
from oauth2client.service_account import ServiceAccountCredentials
import params
import requests
from retrying import retry
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
from google.cloud import bigquery
GA_ACCOUNT_ID = params.GA_ACCOUNT_ID
GA_PROPERTY_ID = params.GA_PROPERTY_ID
GA_DATASET_ID = params.GA_DATASET_ID
GA_IMPORT_METHOD = params.GA_IMPORT_METHOD
BQML_PREDICT_QUERY = params.BQML_PREDICT_QUERY
GA_MP_STANDARD_HIT_DETAILS = params.GA_MP_STANDARD_HIT_DETAILS
ENABLED_LOGGING = params.ENABLE_BQ_LOGGING
ENABLED_EMAIL = params.ENABLE_SENDGRID_EMAIL_REPORTING
LOGS_BQ_TABLE = "{0}.{1}.{2}".format(params.GCP_PROJECT_ID,
params.BQ_DATASET_NAME,
params.BQ_TABLE_NAME)
SENDGRID_API_KEY = params.SENDGRID_API_KEY
TO_EMAIL = params.TO_EMAIL
FROM_EMAIL = params.FROM_EMAIL
SUBJECT = params.SUBJECT
HTML_CONTENT = params.HTML_CONTENT
SERVICE_ACCOUNT_FILE = "svc_key.json"
CSV_LOCATION = "/tmp/data.csv"
GA_MP_ENDPOINT = "https://www.google-analytics.com/batch"
GA_SCOPES = ["https://www.googleapis.com/auth/analytics.readonly",
"https://www.googleapis.com/auth/analytics.edit",
"https://www.googleapis.com/auth/analytics"]
GA_API_NAME = "analytics"
GA_API_VERSION = "v3"
CLOUD_SCOPES = ["https://www.googleapis.com/auth/cloud-platform"]
def authorize_ga_api():
"""Fetches the GA API obj.
Returns:
ga_api: GA API obj.
"""
ga_credentials = ServiceAccountCredentials.from_json_keyfile_name(
SERVICE_ACCOUNT_FILE, GA_SCOPES)
http = ga_credentials.authorize(http=httplib2.Http())
ga_api = discovery.build(GA_API_NAME, GA_API_VERSION, http=http)
return ga_api
def read_from_bq():
"""Reads the prediction query from Bigquery using BQML.
Returns:
dataframe: BQML model results dataframe.
"""
bq_client = bigquery.Client()
query_job = bq_client.query(BQML_PREDICT_QUERY)
results = query_job.result()
dataframe = results.to_dataframe()
if GA_IMPORT_METHOD == "di":
# assumes columns in BQ are named as ga_<name> e.g. ga_dimension1
# converts them to ga:clientId, ga:dimension1
dataframe.columns = [col_name.replace("_", ":")
for col_name in dataframe.columns.values]
return dataframe
def write_df_to_csv(df):
"""Converts BQML model results to CSV.
Args:
df: final results dataframe for GA export.
"""
csv_string = df.to_csv(index=False)
with open(CSV_LOCATION, "w+") as f:
f.write(csv_string)
def write_to_ga_via_di(ga_api):
"""Write the prediction results into GA via data import.
Args:
ga_api: Google Analytics Management API object.
"""
media = MediaFileUpload(CSV_LOCATION,
mimetype="application/octet-stream",
resumable=False)
ga_api.management().uploads().uploadData(
accountId=GA_ACCOUNT_ID,
webPropertyId=GA_PROPERTY_ID,
customDataSourceId=GA_DATASET_ID,
media_body=media).execute()
def delete_ga_prev_uploads(ga_api):
"""Delete previous GA data import files.
Args:
ga_api: Google Analytics Management API object.
"""
response = ga_api.management().uploads().list(
accountId=GA_ACCOUNT_ID,
webPropertyId=GA_PROPERTY_ID,
customDataSourceId=GA_DATASET_ID).execute()
uploads = response["items"]
cids = [upload["id"] for upload in uploads[1:]]
delete_request_body = {"customDataImportUids": cids}
ga_api.management().uploads().deleteUploadData(
accountId=GA_ACCOUNT_ID,
webPropertyId=GA_PROPERTY_ID,
customDataSourceId=GA_DATASET_ID,
body=delete_request_body).execute()
def send_mp_hit(payload_send, success_requests, failed_requests):
"""Send hit to Measurement Protocol endpoint.
Args:
payload_send: Measurement Protocol hit package to send
success_requests: list of successful batch requests to GA
failed_requests: list of failed batch requests to GA
Returns:
boolean
"""
# Submit a POST request to Measurement Protocol endpoint
prepared = requests.Request("POST",
GA_MP_ENDPOINT,
data=payload_send).prepare()
print("Sending measurement protcol request to url " + prepared.url)
response = requests.Session().send(prepared)
if response.status_code not in range(200, 299):
print("Measurement Protocol submission unsuccessful status code: " +
str(response.status_code))
failed_requests.append(payload_send)
return success_requests, failed_requests
print("Measurement Protocol submission status code: " +
str(response.status_code))
success_requests.append(payload_send)
return success_requests, failed_requests
def prepare_payloads_for_batch_request(payloads):
"""Merges payloads to send them in a batch request.
Args:
payloads: list of payload, each payload being a dictionary.
Returns:
concatenated url-encoded payloads. For example:
param1=value10¶m2=value20
param1=value11¶m2=value21
"""
assert isinstance(payloads, list) or isinstance(payloads, tuple)
payloads_utf8 = [sorted([(k, str(p[k]).encode("utf-8")) for k in p],
key=lambda t: t[0]) for p in payloads]
return "\n".join(map(lambda p: urllib.parse.urlencode(p), payloads_utf8))
def write_to_ga_via_mp(df):
"""Write the prediction results into GA via Measurement Protocol.
Args:
df: BQML model results dataframe
"""
i = 0
success_requests, failed_requests, payload_list = list(), list(), list()
for row_index, bq_results_row in df.iterrows():
i += 1
hit_data = {}
for (ga_key, value) in GA_MP_STANDARD_HIT_DETAILS.items():
if value:
hit_data[ga_key] = value
# add additional information from BQ
for (column_name, column_value) in bq_results_row.iteritems():
hit_data[column_name] = column_value
payload_list.append(hit_data)
if i%20 == 0:
# batch the hits up
payload_send = prepare_payloads_for_batch_request(payload_list)
print("Payload to send: " + payload_send)
success_requests, failed_requests = send_mp_hit(payload_send,
success_requests,
failed_requests)
payload_list = list()
i = 0
# Issue last batch call
if i > 0:
print("Sending remaining items to GA")
payload_send = prepare_payloads_for_batch_request(payload_list)
print("Payload to send: " + payload_send)
success_requests, failed_requests = send_mp_hit(payload_send,
success_requests,
failed_requests)
print("Completed all GA calls. Total successful batches: " +
str(len(success_requests)) + " and failed batches: " +
str(len(failed_requests)))
if failed_requests:
print("Failed request details: " + failed_requests)
# Retry 2^x * 1000 milliseconds between each retry, up to 10 seconds
# ,then 10 seconds afterwards - for 5 attempts
@retry(stop_max_attempt_number=5,
wait_exponential_multiplier=1000, wait_exponential_max=10000)
def write_to_bq_logs(status, message):
"""Write to BQ Logs.
Args:
status: status of the workflow run - SUCCESS or ERROR
message: Error message, if there's an error
"""
bq_client = bigquery.Client()
timestamp_utc = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
bq_insert_template_query = """INSERT INTO `{0}` VALUES ("{1}","{2}","{3}")"""
write_logs_query = bq_insert_template_query.format(LOGS_BQ_TABLE,
timestamp_utc,
status, message)
bq_client.query(write_logs_query)
# Retry 2^x * 1000 milliseconds between each retry, up to 10 seconds
# ,then 10 seconds afterwards - for 5 attempts
@retry(stop_max_attempt_number=5,
wait_exponential_multiplier=1000, wait_exponential_max=10000)
def send_email(error_message):
"""Delete previous GA data import files.
Args:
error_message: Error message.
Raises:
Exception: An exception for failed emails.
"""
timestamp_utc = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
email_content = HTML_CONTENT.format(timestamp_utc, error_message)
message = Mail(from_email=FROM_EMAIL,
to_emails=TO_EMAIL,
subject=SUBJECT,
html_content=email_content)
sg = SendGridAPIClient(SENDGRID_API_KEY)
response = sg.send(message)
if str(response.status_code)[0] != "2":
raise Exception("Email not sent.")
def trigger_workflow(request):
"""Code to trigger workflow.
Args:
request: HTTP request object.
Returns:
workflow_status: Success or Error.
"""
timestamp_utc = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
try:
df = read_from_bq()
if GA_IMPORT_METHOD == "di":
write_df_to_csv(df)
ga_api = authorize_ga_api()
write_to_ga_via_di(ga_api)
delete_ga_prev_uploads(ga_api)
elif GA_IMPORT_METHOD == "mp":
write_to_ga_via_mp(df)
else:
raise Exception("GA Export Method not found.")
if ENABLED_LOGGING:
write_to_bq_logs(status="SUCCESS", message="")
message = "{0},SUCCESS".format(timestamp_utc)
return message
except Exception as e:
if ENABLED_LOGGING:
write_to_bq_logs(status="ERROR", message=str(e))
if ENABLED_EMAIL:
send_email(error_message=str(e))
message = "{0},ERROR,{1}".format(timestamp_utc, str(e))
return message
print(trigger_workflow(request=None))
|
|
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
from functools import partial
from inspect import isclass
from operator import attrgetter
import six
import sqlalchemy as sa
from sqlalchemy.engine.interfaces import Dialect
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import mapperlib
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.exc import UnmappedInstanceError
from sqlalchemy.orm.properties import ColumnProperty, RelationshipProperty
from sqlalchemy.orm.query import _ColumnEntity
from sqlalchemy.orm.session import object_session
from sqlalchemy.orm.util import AliasedInsp
from sqlalchemy_utils.utils import is_sequence
def get_class_by_table(base, table, data=None):
"""
Return declarative class associated with given table. If no class is found
this function returns `None`. If multiple classes were found (polymorphic
cases) additional `data` parameter can be given to hint which class
to return.
::
class User(Base):
__tablename__ = 'entity'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
get_class_by_table(Base, User.__table__) # User class
This function also supports models using single table inheritance.
Additional data paratemer should be provided in these case.
::
class Entity(Base):
__tablename__ = 'entity'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
type = sa.Column(sa.String)
__mapper_args__ = {
'polymorphic_on': type,
'polymorphic_identity': 'entity'
}
class User(Entity):
__mapper_args__ = {
'polymorphic_identity': 'user'
}
# Entity class
get_class_by_table(Base, Entity.__table__, {'type': 'entity'})
# User class
get_class_by_table(Base, Entity.__table__, {'type': 'user'})
:param base: Declarative model base
:param table: SQLAlchemy Table object
:param data: Data row to determine the class in polymorphic scenarios
:return: Declarative class or None.
"""
found_classes = set(
c for c in base._decl_class_registry.values()
if hasattr(c, '__table__') and c.__table__ is table
)
if len(found_classes) > 1:
if not data:
raise ValueError(
"Multiple declarative classes found for table '{0}'. "
"Please provide data parameter for this function to be able "
"to determine polymorphic scenarios.".format(
table.name
)
)
else:
for cls in found_classes:
mapper = sa.inspect(cls)
polymorphic_on = mapper.polymorphic_on.name
if polymorphic_on in data:
if data[polymorphic_on] == mapper.polymorphic_identity:
return cls
raise ValueError(
"Multiple declarative classes found for table '{0}'. Given "
"data row does not match any polymorphic identity of the "
"found classes.".format(
table.name
)
)
elif found_classes:
return found_classes.pop()
return None
def get_type(expr):
"""
Return the associated type with given Column, InstrumentedAttribute,
ColumnProperty, RelationshipProperty or other similar SQLAlchemy construct.
For constructs wrapping columns this is the column type. For relationships
this function returns the relationship mapper class.
:param expr:
SQLAlchemy Column, InstrumentedAttribute, ColumnProperty or other
similar SA construct.
::
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
class Article(Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
author_id = sa.Column(sa.Integer, sa.ForeignKey(User.id))
author = sa.orm.relationship(User)
get_type(User.__table__.c.name) # sa.String()
get_type(User.name) # sa.String()
get_type(User.name.property) # sa.String()
get_type(Article.author) # User
.. versionadded: 0.30.9
"""
if hasattr(expr, 'type'):
return expr.type
elif isinstance(expr, InstrumentedAttribute):
expr = expr.property
if isinstance(expr, ColumnProperty):
return expr.columns[0].type
elif isinstance(expr, RelationshipProperty):
return expr.mapper.class_
raise TypeError("Couldn't inspect type.")
def get_column_key(model, column):
"""
Return the key for given column in given model.
:param model: SQLAlchemy declarative model object
::
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column('_name', sa.String)
get_column_key(User, User.__table__.c._name) # 'name'
.. versionadded: 0.26.5
.. versionchanged: 0.27.11
Throws UnmappedColumnError instead of ValueError when no property was
found for given column. This is consistent with how SQLAlchemy works.
"""
mapper = sa.inspect(model)
try:
return mapper.get_property_by_column(column).key
except sa.orm.exc.UnmappedColumnError:
for key, c in mapper.columns.items():
if c.name == column.name and c.table is column.table:
return key
raise sa.orm.exc.UnmappedColumnError(
'No column %s is configured on mapper %s...' %
(column, mapper)
)
def get_mapper(mixed):
"""
Return related SQLAlchemy Mapper for given SQLAlchemy object.
:param mixed: SQLAlchemy Table / Alias / Mapper / declarative model object
::
from sqlalchemy_utils import get_mapper
get_mapper(User)
get_mapper(User())
get_mapper(User.__table__)
get_mapper(User.__mapper__)
get_mapper(sa.orm.aliased(User))
get_mapper(sa.orm.aliased(User.__table__))
Raises:
ValueError: if multiple mappers were found for given argument
.. versionadded: 0.26.1
"""
if isinstance(mixed, sa.orm.query._MapperEntity):
mixed = mixed.expr
elif isinstance(mixed, sa.Column):
mixed = mixed.table
elif isinstance(mixed, sa.orm.query._ColumnEntity):
mixed = mixed.expr
if isinstance(mixed, sa.orm.Mapper):
return mixed
if isinstance(mixed, sa.orm.util.AliasedClass):
return sa.inspect(mixed).mapper
if isinstance(mixed, sa.sql.selectable.Alias):
mixed = mixed.element
if isinstance(mixed, AliasedInsp):
return mixed.mapper
if isinstance(mixed, sa.orm.attributes.InstrumentedAttribute):
mixed = mixed.class_
if isinstance(mixed, sa.Table):
mappers = [
mapper for mapper in mapperlib._mapper_registry
if mixed in mapper.tables
]
if len(mappers) > 1:
raise ValueError(
"Multiple mappers found for table '%s'." % mixed.name
)
elif not mappers:
raise ValueError(
"Could not get mapper for table '%s'." % mixed.name
)
else:
return mappers[0]
if not isclass(mixed):
mixed = type(mixed)
return sa.inspect(mixed)
def get_bind(obj):
"""
Return the bind for given SQLAlchemy Engine / Connection / declarative
model object.
:param obj: SQLAlchemy Engine / Connection / declarative model object
::
from sqlalchemy_utils import get_bind
get_bind(session) # Connection object
get_bind(user)
"""
if hasattr(obj, 'bind'):
conn = obj.bind
else:
try:
conn = object_session(obj).bind
except UnmappedInstanceError:
conn = obj
if not hasattr(conn, 'execute'):
raise TypeError(
'This method accepts only Session, Engine, Connection and '
'declarative model objects.'
)
return conn
def get_primary_keys(mixed):
"""
Return an OrderedDict of all primary keys for given Table object,
declarative class or declarative class instance.
:param mixed:
SA Table object, SA declarative class or SA declarative class instance
::
get_primary_keys(User)
get_primary_keys(User())
get_primary_keys(User.__table__)
get_primary_keys(User.__mapper__)
get_primary_keys(sa.orm.aliased(User))
get_primary_keys(sa.orm.aliased(User.__table__))
.. versionchanged: 0.25.3
Made the function return an ordered dictionary instead of generator.
This change was made to support primary key aliases.
Renamed this function to 'get_primary_keys', formerly 'primary_keys'
.. seealso:: :func:`get_columns`
"""
return OrderedDict(
(
(key, column) for key, column in get_columns(mixed).items()
if column.primary_key
)
)
def get_tables(mixed):
"""
Return a set of tables associated with given SQLAlchemy object.
Let's say we have three classes which use joined table inheritance
TextItem, Article and BlogPost. Article and BlogPost inherit TextItem.
::
get_tables(Article) # set([Table('article', ...), Table('text_item')])
get_tables(Article())
get_tables(Article.__mapper__)
If the TextItem entity is using with_polymorphic='*' then this function
returns all child tables (article and blog_post) as well.
::
get_tables(TextItem) # set([Table('text_item', ...)], ...])
.. versionadded: 0.26.0
:param mixed:
SQLAlchemy Mapper, Declarative class, Column, InstrumentedAttribute or
a SA Alias object wrapping any of these objects.
"""
if isinstance(mixed, sa.Table):
return [mixed]
elif isinstance(mixed, sa.Column):
return [mixed.table]
elif isinstance(mixed, sa.orm.attributes.InstrumentedAttribute):
return mixed.parent.tables
elif isinstance(mixed, sa.orm.query._ColumnEntity):
mixed = mixed.expr
mapper = get_mapper(mixed)
polymorphic_mappers = get_polymorphic_mappers(mapper)
if polymorphic_mappers:
tables = sum((m.tables for m in polymorphic_mappers), [])
else:
tables = mapper.tables
return tables
def get_columns(mixed):
"""
Return a collection of all Column objects for given SQLAlchemy
object.
The type of the collection depends on the type of the object to return the
columns from.
::
get_columns(User)
get_columns(User())
get_columns(User.__table__)
get_columns(User.__mapper__)
get_columns(sa.orm.aliased(User))
get_columns(sa.orm.alised(User.__table__))
:param mixed:
SA Table object, SA Mapper, SA declarative class, SA declarative class
instance or an alias of any of these objects
"""
if isinstance(mixed, sa.Table):
return mixed.c
if isinstance(mixed, sa.orm.util.AliasedClass):
return sa.inspect(mixed).mapper.columns
if isinstance(mixed, sa.sql.selectable.Alias):
return mixed.c
if isinstance(mixed, sa.orm.Mapper):
return mixed.columns
if not isclass(mixed):
mixed = mixed.__class__
return sa.inspect(mixed).columns
def table_name(obj):
"""
Return table name of given target, declarative class or the
table name where the declarative attribute is bound to.
"""
class_ = getattr(obj, 'class_', obj)
try:
return class_.__tablename__
except AttributeError:
pass
try:
return class_.__table__.name
except AttributeError:
pass
def getattrs(obj, attrs):
return map(partial(getattr, obj), attrs)
def quote(mixed, ident):
"""
Conditionally quote an identifier.
::
from sqlalchemy_utils import quote
engine = create_engine('sqlite:///:memory:')
quote(engine, 'order')
# '"order"'
quote(engine, 'some_other_identifier')
# 'some_other_identifier'
:param mixed: SQLAlchemy Session / Connection / Engine / Dialect object.
:param ident: identifier to conditionally quote
"""
if isinstance(mixed, Dialect):
dialect = mixed
else:
dialect = get_bind(mixed).dialect
return dialect.preparer(dialect).quote(ident)
def query_labels(query):
"""
Return all labels for given SQLAlchemy query object.
Example::
query = session.query(
Category,
db.func.count(Article.id).label('articles')
)
query_labels(query) # ['articles']
:param query: SQLAlchemy Query object
"""
return [
entity._label_name for entity in query._entities
if isinstance(entity, _ColumnEntity) and entity._label_name
]
def get_query_entities(query):
"""
Return a list of all entities present in given SQLAlchemy query object.
Examples::
from sqlalchemy_utils import get_query_entities
query = session.query(Category)
get_query_entities(query) # [<Category>]
query = session.query(Category.id)
get_query_entities(query) # [<Category>]
This function also supports queries with joins.
::
query = session.query(Category).join(Article)
get_query_entities(query) # [<Category>, <Article>]
.. versionchanged: 0.26.7
This function now returns a list instead of generator
:param query: SQLAlchemy Query object
"""
exprs = [
d['expr']
if is_labeled_query(d['expr']) or isinstance(d['expr'], sa.Column)
else d['entity']
for d in query.column_descriptions
]
return [
get_query_entity(expr) for expr in exprs
] + [
get_query_entity(entity) for entity in query._join_entities
]
def is_labeled_query(expr):
return (
isinstance(expr, sa.sql.elements.Label) and
isinstance(
list(expr.base_columns)[0],
(sa.sql.selectable.Select, sa.sql.selectable.ScalarSelect)
)
)
def get_query_entity(expr):
if isinstance(expr, sa.orm.attributes.InstrumentedAttribute):
return expr.parent.class_
elif isinstance(expr, sa.Column):
return expr.table
elif isinstance(expr, AliasedInsp):
return expr.entity
return expr
def get_query_entity_by_alias(query, alias):
entities = get_query_entities(query)
if not alias:
return entities[0]
for entity in entities:
if isinstance(entity, sa.orm.util.AliasedClass):
name = sa.inspect(entity).name
else:
name = get_mapper(entity).tables[0].name
if name == alias:
return entity
def get_polymorphic_mappers(mixed):
if isinstance(mixed, AliasedInsp):
return mixed.with_polymorphic_mappers
else:
return mixed.polymorphic_map.values()
def get_query_descriptor(query, entity, attr):
if attr in query_labels(query):
return attr
else:
entity = get_query_entity_by_alias(query, entity)
if entity:
descriptor = get_descriptor(entity, attr)
if (
hasattr(descriptor, 'property') and
isinstance(descriptor.property, sa.orm.RelationshipProperty)
):
return
return descriptor
def get_descriptor(entity, attr):
mapper = sa.inspect(entity)
for key, descriptor in get_all_descriptors(mapper).items():
if attr == key:
prop = (
descriptor.property
if hasattr(descriptor, 'property')
else None
)
if isinstance(prop, ColumnProperty):
if isinstance(entity, sa.orm.util.AliasedClass):
for c in mapper.selectable.c:
if c.key == attr:
return c
else:
# If the property belongs to a class that uses
# polymorphic inheritance we have to take into account
# situations where the attribute exists in child class
# but not in parent class.
return getattr(prop.parent.class_, attr)
else:
# Handle synonyms, relationship properties and hybrid
# properties
try:
return getattr(mapper.class_, attr)
except AttributeError:
pass
def get_all_descriptors(expr):
insp = sa.inspect(expr)
polymorphic_mappers = get_polymorphic_mappers(insp)
if polymorphic_mappers:
attrs = dict(get_mapper(expr).all_orm_descriptors)
for submapper in polymorphic_mappers:
for key, descriptor in submapper.all_orm_descriptors.items():
if key not in attrs:
attrs[key] = descriptor
return attrs
return get_mapper(expr).all_orm_descriptors
def get_hybrid_properties(model):
"""
Returns a dictionary of hybrid property keys and hybrid properties for
given SQLAlchemy declarative model / mapper.
Consider the following model
::
from sqlalchemy.ext.hybrid import hybrid_property
class Category(Base):
__tablename__ = 'category'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.Unicode(255))
@hybrid_property
def lowercase_name(self):
return self.name.lower()
@lowercase_name.expression
def lowercase_name(cls):
return sa.func.lower(cls.name)
You can now easily get a list of all hybrid property names
::
from sqlalchemy_utils import get_hybrid_properties
get_hybrid_properties(Category).keys() # ['lowercase_name']
.. versionchanged: 0.26.7
This function now returns a dictionary instead of generator
:param model: SQLAlchemy declarative model or mapper
"""
return dict(
(key, prop)
for key, prop in sa.inspect(model).all_orm_descriptors.items()
if isinstance(prop, hybrid_property)
)
def get_declarative_base(model):
"""
Returns the declarative base for given model class.
:param model: SQLAlchemy declarative model
"""
for parent in model.__bases__:
try:
parent.metadata
return get_declarative_base(parent)
except AttributeError:
pass
return model
def getdotattr(obj_or_class, dot_path, condition=None):
"""
Allow dot-notated strings to be passed to `getattr`.
::
getdotattr(SubSection, 'section.document')
getdotattr(subsection, 'section.document')
:param obj_or_class: Any object or class
:param dot_path: Attribute path with dot mark as separator
"""
last = obj_or_class
for path in str(dot_path).split('.'):
getter = attrgetter(path)
if is_sequence(last):
tmp = []
for element in last:
value = getter(element)
if is_sequence(value):
tmp.extend(value)
else:
tmp.append(value)
last = tmp
elif isinstance(last, InstrumentedAttribute):
last = getter(last.property.mapper.class_)
elif last is None:
return None
else:
last = getter(last)
if condition is not None:
if is_sequence(last):
last = [v for v in last if condition(v)]
else:
if not condition(last):
return None
return last
def is_deleted(obj):
return obj in sa.orm.object_session(obj).deleted
def has_changes(obj, attrs=None, exclude=None):
"""
Simple shortcut function for checking if given attributes of given
declarative model object have changed during the session. Without
parameters this checks if given object has any modificiations. Additionally
exclude parameter can be given to check if given object has any changes
in any attributes other than the ones given in exclude.
::
from sqlalchemy_utils import has_changes
user = User()
has_changes(user, 'name') # False
user.name = u'someone'
has_changes(user, 'name') # True
has_changes(user) # True
You can check multiple attributes as well.
::
has_changes(user, ['age']) # True
has_changes(user, ['name', 'age']) # True
This function also supports excluding certain attributes.
::
has_changes(user, exclude=['name']) # False
has_changes(user, exclude=['age']) # True
.. versionchanged: 0.26.6
Added support for multiple attributes and exclude parameter.
:param obj: SQLAlchemy declarative model object
:param attrs: Names of the attributes
:param exclude: Names of the attributes to exclude
"""
if attrs:
if isinstance(attrs, six.string_types):
return (
sa.inspect(obj)
.attrs
.get(attrs)
.history
.has_changes()
)
else:
return any(has_changes(obj, attr) for attr in attrs)
else:
if exclude is None:
exclude = []
return any(
attr.history.has_changes()
for key, attr in sa.inspect(obj).attrs.items()
if key not in exclude
)
def is_loaded(obj, prop):
"""
Return whether or not given property of given object has been loaded.
::
class Article(Base):
__tablename__ = 'article'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
content = sa.orm.deferred(sa.Column(sa.String))
article = session.query(Article).get(5)
# name gets loaded since its not a deferred property
assert is_loaded(article, 'name')
# content has not yet been loaded since its a deferred property
assert not is_loaded(article, 'content')
.. versionadded: 0.27.8
:param obj: SQLAlchemy declarative model object
:param prop: Name of the property or InstrumentedAttribute
"""
return not isinstance(
getattr(sa.inspect(obj).attrs, prop).loaded_value,
sa.util.langhelpers._symbol
)
def identity(obj_or_class):
"""
Return the identity of given sqlalchemy declarative model class or instance
as a tuple. This differs from obj._sa_instance_state.identity in a way that
it always returns the identity even if object is still in transient state (
new object that is not yet persisted into database). Also for classes it
returns the identity attributes.
::
from sqlalchemy import inspect
from sqlalchemy_utils import identity
user = User(name=u'John Matrix')
session.add(user)
identity(user) # None
inspect(user).identity # None
session.flush() # User now has id but is still in transient state
identity(user) # (1,)
inspect(user).identity # None
session.commit()
identity(user) # (1,)
inspect(user).identity # (1, )
You can also use identity for classes::
identity(User) # (User.id, )
.. versionadded: 0.21.0
:param obj: SQLAlchemy declarative model object
"""
return tuple(
getattr(obj_or_class, column_key)
for column_key in get_primary_keys(obj_or_class).keys()
)
def naturally_equivalent(obj, obj2):
"""
Returns whether or not two given SQLAlchemy declarative instances are
naturally equivalent (all their non primary key properties are equivalent).
::
from sqlalchemy_utils import naturally_equivalent
user = User(name=u'someone')
user2 = User(name=u'someone')
user == user2 # False
naturally_equivalent(user, user2) # True
:param obj: SQLAlchemy declarative model object
:param obj2: SQLAlchemy declarative model object to compare with `obj`
"""
for column_key, column in sa.inspect(obj.__class__).columns.items():
if column.primary_key:
continue
if not (getattr(obj, column_key) == getattr(obj2, column_key)):
return False
return True
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Function for loading TensorFlow plugins."""
import errno
import hashlib
import importlib
import os
import platform
import sys
from tensorflow.python.client import pywrap_tf_session as py_tf
from tensorflow.python.eager import context
from tensorflow.python.framework import _pywrap_python_op_gen
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export('load_op_library')
def load_op_library(library_filename):
"""Loads a TensorFlow plugin, containing custom ops and kernels.
Pass "library_filename" to a platform-specific mechanism for dynamically
loading a library. The rules for determining the exact location of the
library are platform-specific and are not documented here. When the
library is loaded, ops and kernels registered in the library via the
`REGISTER_*` macros are made available in the TensorFlow process. Note
that ops with the same name as an existing op are rejected and not
registered with the process.
Args:
library_filename: Path to the plugin.
Relative or absolute filesystem path to a dynamic library file.
Returns:
A python module containing the Python wrappers for Ops defined in
the plugin.
Raises:
RuntimeError: when unable to load the library or get the python wrappers.
"""
lib_handle = py_tf.TF_LoadLibrary(library_filename)
try:
wrappers = _pywrap_python_op_gen.GetPythonWrappers(
py_tf.TF_GetOpList(lib_handle))
finally:
# Delete the library handle to release any memory held in C
# that are no longer needed.
py_tf.TF_DeleteLibraryHandle(lib_handle)
# Get a unique name for the module.
module_name = hashlib.sha1(wrappers).hexdigest()
if module_name in sys.modules:
return sys.modules[module_name]
module_spec = importlib.machinery.ModuleSpec(module_name, None)
module = importlib.util.module_from_spec(module_spec)
# pylint: disable=exec-used
exec(wrappers, module.__dict__)
# Allow this to be recognized by AutoGraph.
setattr(module, '_IS_TENSORFLOW_PLUGIN', True)
sys.modules[module_name] = module
return module
@deprecation.deprecated(date=None,
instructions='Use `tf.load_library` instead.')
@tf_export(v1=['load_file_system_library'])
def load_file_system_library(library_filename):
"""Loads a TensorFlow plugin, containing file system implementation.
Pass `library_filename` to a platform-specific mechanism for dynamically
loading a library. The rules for determining the exact location of the
library are platform-specific and are not documented here.
Args:
library_filename: Path to the plugin.
Relative or absolute filesystem path to a dynamic library file.
Returns:
None.
Raises:
RuntimeError: when unable to load the library.
"""
py_tf.TF_LoadLibrary(library_filename)
def _is_shared_object(filename):
"""Check the file to see if it is a shared object, only using extension."""
if platform.system() == 'Linux':
if filename.endswith('.so'):
return True
else:
index = filename.rfind('.so.')
if index == -1:
return False
else:
# A shared object with the API version in filename
return filename[index + 4].isdecimal()
elif platform.system() == 'Darwin':
return filename.endswith('.dylib')
elif platform.system() == 'Windows':
return filename.endswith('.dll')
else:
return False
@tf_export('load_library')
def load_library(library_location):
"""Loads a TensorFlow plugin.
"library_location" can be a path to a specific shared object, or a folder.
If it is a folder, all shared objects that are named "libtfkernel*" will be
loaded. When the library is loaded, kernels registered in the library via the
`REGISTER_*` macros are made available in the TensorFlow process.
Args:
library_location: Path to the plugin or the folder of plugins.
Relative or absolute filesystem path to a dynamic library file or folder.
Returns:
None
Raises:
OSError: When the file to be loaded is not found.
RuntimeError: when unable to load the library.
"""
if os.path.exists(library_location):
if os.path.isdir(library_location):
directory_contents = os.listdir(library_location)
kernel_libraries = [
os.path.join(library_location, f) for f in directory_contents
if _is_shared_object(f)]
else:
kernel_libraries = [library_location]
for lib in kernel_libraries:
py_tf.TF_LoadLibrary(lib)
else:
raise OSError(
errno.ENOENT,
'The file or folder to load kernel libraries from does not exist.',
library_location)
def load_pluggable_device_library(library_location):
"""Loads a TensorFlow PluggableDevice plugin.
"library_location" can be a path to a specific shared object, or a folder.
If it is a folder, all shared objects will be loaded. when the library is
loaded, devices/kernels registered in the library via StreamExecutor C API
and Kernel/Op Registration C API are made available in TensorFlow process.
Args:
library_location: Path to the plugin or folder of plugins. Relative or
absolute filesystem path to a dynamic library file or folder.
Raises:
OSError: When the file to be loaded is not found.
RuntimeError: when unable to load the library.
"""
if os.path.exists(library_location):
if os.path.isdir(library_location):
directory_contents = os.listdir(library_location)
pluggable_device_libraries = [
os.path.join(library_location, f)
for f in directory_contents
if _is_shared_object(f)
]
else:
pluggable_device_libraries = [library_location]
for lib in pluggable_device_libraries:
py_tf.TF_LoadPluggableDeviceLibrary(lib)
# Reinitialized physical devices list after plugin registration.
context.context().reinitialize_physical_devices()
else:
raise OSError(
errno.ENOENT,
'The file or folder to load pluggable device libraries from does not '
'exist.', library_location)
@tf_export('experimental.register_filesystem_plugin')
def register_filesystem_plugin(plugin_location):
"""Loads a TensorFlow FileSystem plugin.
Args:
plugin_location: Path to the plugin. Relative or absolute filesystem plugin
path to a dynamic library file.
Returns:
None
Raises:
OSError: When the file to be loaded is not found.
RuntimeError: when unable to load the library.
"""
if os.path.exists(plugin_location):
py_tf.TF_RegisterFilesystemPlugin(plugin_location)
else:
raise OSError(errno.ENOENT,
'The file to load file system plugin from does not exist.',
plugin_location)
|
|
import os
import bento.core.node
from bento.errors import \
InvalidPackage
from bento.core import \
PackageDescription
from bento.core.pkg_objects import \
Executable
from bento.utils.utils import \
is_string
_PKG_TO_DIST = {
"ext_modules": lambda pkg: [v for v in \
pkg.extensions.values()],
"platforms": lambda pkg: [v for v in pkg.platforms],
"packages": lambda pkg: [v for v in pkg.packages],
"py_modules": lambda pkg: [v for v in pkg.py_modules],
}
_META_PKG_TO_DIST = {}
def _setup():
for k in ["name", "url", "author", "author_email", "maintainer",
"maintainer_email", "license", "download_url"]:
def _f(attr):
return lambda pkg: getattr(pkg, attr)
_META_PKG_TO_DIST[k] = _f(k)
_META_PKG_TO_DIST["long_description"] = lambda pkg: pkg.description
_META_PKG_TO_DIST["description"] = lambda pkg: pkg.summary
def _version_(pkg):
if pkg.version is None:
return "UNKNOWN"
else:
return pkg.version
_META_PKG_TO_DIST["version"] = _version_
_setup()
_PKG_TO_DIST.update(_META_PKG_TO_DIST)
def pkg_to_distutils_meta(pkg):
"""Obtain meta data information from pkg into a dictionary which may be
used directly as an argument for setup function in distutils."""
d = {}
for k, v in _META_PKG_TO_DIST.items():
d[k] = v(pkg)
return d
def pkg_to_distutils_meta_pkg_info(pkg):
meta = pkg_to_distutils_meta(pkg)
meta["summary"] = meta.pop("description")
meta["description"] = meta.pop("long_description")
return meta
def pkg_to_distutils(pkg):
"""Convert PackageDescription instance to a dict which may be used
as argument to distutils/setuptools setup function."""
d = {}
for k, v in _PKG_TO_DIST.items():
d[k] = v(pkg)
return d
def validate_package(pkg_name, base_node):
"""Given a python package name, check whether it is indeed an existing
package.
Package is looked relatively to base_node."""
# XXX: this function is wrong - use the code from setuptools
pkg_dir = pkg_name.replace(".", os.path.sep)
pkg_node = base_node.find_node(pkg_dir)
if pkg_node is None:
raise InvalidPackage("directory %s in %s does not exist" % (pkg_dir, base_node.abspath()))
init = pkg_node.find_node('__init__.py')
if init is None:
raise InvalidPackage(
"Missing __init__.py in package %s (in directory %s)"
% (pkg_name, base_node.abspath()))
return pkg_node
def find_package(pkg_name, base_node):
"""Given a python package name, find all its modules relatively to
base_node."""
pkg_node = validate_package(pkg_name, base_node)
ret = []
for f in pkg_node.listdir():
if f.endswith(".py"):
node = pkg_node.find_node(f)
ret.append(node.path_from(base_node))
return ret
def validate_packages(pkgs, top):
ret_pkgs = []
for pkg in pkgs:
try:
validate_package(pkg, top)
except InvalidPackage:
# FIXME: add the package as data here
pass
else:
ret_pkgs.append(pkg)
return ret_pkgs
def distutils_to_package_description(dist):
root = bento.core.node.Node("", None)
top = root.find_dir(os.getcwd())
data = {}
data['name'] = dist.get_name()
data['version'] = dist.get_version()
data['author'] = dist.get_author()
data['author_email'] = dist.get_author_email()
data['maintainer'] = dist.get_contact()
data['maintainer_email'] = dist.get_contact_email()
data['summary'] = dist.get_description()
data['description'] = dist.get_long_description().replace("#", "\#")
data['license'] = dist.get_license()
data['platforms'] = dist.get_platforms()
data['download_url'] = dist.get_download_url()
data['url'] = dist.get_url()
# XXX: reliable way to detect whether Distribution was monkey-patched by
# setuptools
try:
reqs = getattr(dist, "install_requires")
# FIXME: how to detect this correctly
if is_string(reqs):
reqs = [reqs]
data['install_requires'] = reqs
except AttributeError:
pass
if dist.py_modules is None:
data['py_modules'] = []
else:
data['py_modules'] = dist.py_modules
if dist.packages is None:
packages = []
else:
packages = dist.packages
data['packages'] = validate_packages(packages, top)
if dist.ext_modules:
data['extensions'] = dict([(e.name, e) for e in dist.ext_modules])
else:
data['extensions'] = {}
data['classifiers'] = dist.get_classifiers()
data["executables"] = {}
entry_points = entry_points_from_dist(dist)
if entry_points:
console_scripts = entry_points.get("console_scripts", [])
for entry in console_scripts:
exe = Executable.from_representation(entry)
data["executables"][exe.name] = exe
return PackageDescription(**data)
_DIST_CONV_DICT = {
"long_description": lambda meta: meta.description,
"description": lambda meta: meta.summary,
# TODO: keywords not implemented yet
"keywords": lambda meta: [],
"fullname": lambda meta: "%s-%s" % (meta.name, meta.version),
"contact": lambda meta: (meta.maintainer or
meta.author or
"UNKNOWN"),
"contact_email": lambda meta: (meta.maintainer_email or
meta.author_email or
"UNKNOWN"),
"requires": lambda meta: meta.install_requires,
"provides": lambda meta: [],
"obsoletes": lambda meta: []
}
def to_distutils_meta(meta):
from bento.compat.dist \
import \
DistributionMetadata
ret = DistributionMetadata()
for m in ret._METHOD_BASENAMES:
try:
val = _DIST_CONV_DICT[m](meta)
except KeyError:
val = getattr(meta, m)
setattr(ret, m, val)
return ret
def write_pkg_info(pkg, file):
dist_meta = to_distutils_meta(pkg)
dist_meta.write_pkg_file(file)
def entry_points_from_dist(dist):
if hasattr(dist, "entry_points"):
from pkg_resources import split_sections
if is_string(dist.entry_points):
entry_points = {}
sections = split_sections(dist.entry_points)
for group, lines in sections:
group = group.strip()
entry_points[group] = lines
else:
entry_points = dist.entry_points
else:
entry_points = {}
return entry_points
|
|
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_log import log as logging
import webob.exc
from neutron.api import extensions
from neutron.api.v2 import base
from neutron.api.v2 import resource
from neutron.common import exceptions
from neutron.common import rpc as n_rpc
from neutron.extensions import agent
from neutron.i18n import _LE
from neutron import manager
from neutron import policy
from neutron import wsgi
from networking_cisco.plugins.cisco.common import cisco_constants
from networking_cisco.plugins.cisco.extensions import ciscohostingdevicemanager
PATH_PREFIX = "/dev_mgr"
LOG = logging.getLogger(__name__)
class InvalidCfgAgent(agent.AgentNotFound):
message = _("Agent %(agent_id)s is not a Cisco cfg agent or has been "
"disabled")
class HostingDeviceAssignedToCfgAgent(exceptions.Conflict):
message = _("The hosting device %(hosting_device_id)s is already assigned "
"to Cisco cfg agent %(agent_id)s.")
class HostingDeviceSchedulingFailed(exceptions.Conflict):
message = _("Failed to assign hosting device %(hosting_device_id)s to "
"Cisco cfg agent %(agent_id)s.")
class HostingDeviceNotAssignedToCfgAgent(exceptions.NotFound):
message = _("The hosting device %(hosting_device_id)s is currently not "
"assigned to Cisco cfg agent %(agent_id)s.")
CFG_AGENT_SCHEDULER_ALIAS = 'cisco-cfg-agent-scheduler'
CFG_AGENT_HOSTING_DEVICE = 'cfg-agent-hosting-device'
CFG_AGENT_HOSTING_DEVICES = CFG_AGENT_HOSTING_DEVICE + 's'
HOSTING_DEVICE_CFG_AGENT = 'hosting-device-cfg-agent'
HOSTING_DEVICE_CFG_AGENTS = HOSTING_DEVICE_CFG_AGENT + 's'
class HostingDeviceSchedulerController(wsgi.Controller):
def get_plugin(self):
plugin = manager.NeutronManager.get_service_plugins().get(
cisco_constants.DEVICE_MANAGER)
if not plugin:
LOG.error(_LE('No Device manager service plugin registered to '
'handle hosting device scheduling'))
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
return plugin
def index(self, request, **kwargs):
plugin = self.get_plugin()
policy.enforce(request.context, "get_%s" % CFG_AGENT_HOSTING_DEVICES,
{})
return plugin.list_hosting_devices_handled_by_cfg_agent(
request.context, kwargs['agent_id'])
def create(self, request, body, **kwargs):
plugin = self.get_plugin()
policy.enforce(request.context, "create_%s" % CFG_AGENT_HOSTING_DEVICE,
{})
cfg_agent_id = kwargs['agent_id']
hosting_device_id = body['hosting_device_id']
result = plugin.assign_hosting_device_to_cfg_agent(
request.context, cfg_agent_id, hosting_device_id)
notify(request.context, 'agent.hosting_device.add', hosting_device_id,
cfg_agent_id)
return result
def delete(self, request, **kwargs):
plugin = self.get_plugin()
policy.enforce(request.context, "delete_%s" % CFG_AGENT_HOSTING_DEVICE,
{})
cfg_agent_id = kwargs['agent_id']
hosting_device_id = kwargs['id']
result = plugin.unassign_hosting_device_from_cfg_agent(
request.context, cfg_agent_id, hosting_device_id)
notify(request.context, 'agent.hosting_device.remove',
hosting_device_id, cfg_agent_id)
return result
class CfgAgentsHandlingHostingDeviceController(wsgi.Controller):
def get_plugin(self):
plugin = manager.NeutronManager.get_service_plugins().get(
cisco_constants.DEVICE_MANAGER)
if not plugin:
LOG.error(_LE('No device manager service plugin registered to '
'handle hosting device scheduling'))
msg = _('The resource could not be found.')
raise webob.exc.HTTPNotFound(msg)
return plugin
def index(self, request, **kwargs):
plugin = self.get_plugin()
policy.enforce(request.context, "get_%s" % HOSTING_DEVICE_CFG_AGENTS,
{})
return plugin.list_cfg_agents_handling_hosting_device(
request.context, kwargs['hosting_device_id'])
class Ciscocfgagentscheduler(extensions.ExtensionDescriptor):
"""Extension class supporting configuration agent scheduler."""
@classmethod
def get_name(cls):
return "Cisco Configuration Agent Scheduler"
@classmethod
def get_alias(cls):
return CFG_AGENT_SCHEDULER_ALIAS
@classmethod
def get_description(cls):
return "Schedule hosting devices among Cisco configuration agents"
@classmethod
def get_namespace(cls):
return ("http://docs.openstack.org/ext/" +
CFG_AGENT_SCHEDULER_ALIAS + "/api/v1.0")
@classmethod
def get_updated(cls):
return "2014-03-31T10:00:00-00:00"
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
parent = dict(member_name="agent",
collection_name="agents")
controller = resource.Resource(HostingDeviceSchedulerController(),
base.FAULT_MAP)
exts.append(extensions.ResourceExtension(CFG_AGENT_HOSTING_DEVICES,
controller, parent))
parent = dict(member_name=ciscohostingdevicemanager.DEVICE,
collection_name=ciscohostingdevicemanager.DEVICES)
controller = resource.Resource(
CfgAgentsHandlingHostingDeviceController(), base.FAULT_MAP)
exts.append(extensions.ResourceExtension(HOSTING_DEVICE_CFG_AGENTS,
controller, parent,
PATH_PREFIX))
return exts
def get_extended_resources(self, version):
return {}
class CfgAgentSchedulerPluginBase(object):
"""REST API to operate the cfg agent scheduler.
All of method must be in an admin context.
"""
@abc.abstractmethod
def assign_hosting_device_to_cfg_agent(self, context, id,
hosting_device_id):
pass
@abc.abstractmethod
def unassign_hosting_device_from_cfg_agent(self, context, id,
hosting_device_id):
pass
@abc.abstractmethod
def list_hosting_devices_handled_by_cfg_agent(self, context, id):
pass
@abc.abstractmethod
def list_cfg_agents_handling_hosting_device(self, context,
hosting_device_id):
pass
def notify(context, action, hosting_device_id, cfg_agent_id):
info = {'id': cfg_agent_id, 'hosting_device_id': hosting_device_id}
notifier = n_rpc.get_notifier('hosting_device')
notifier.info(context, action, {'cfg_agent': info})
|
|
import os,sys,re
import copy
import random
from collections import namedtuple
from loguru import logger
from uuparser import utils
class OptionsManager(object):
def __init__(self,options):
"""
input: parser options
object to harmonise the way we deal with the parser
"""
# load these straight away to make sure they're always available
#TODO: options compatibility TB vs GB
utils.load_iso_dict(options.json_isos)
utils.load_reverse_iso_dict(options.json_isos)
if options.include:
if not options.predict and not options.datadir:
raise Exception("You need to specify --datadir")
elif options.shared_task and not options.testdir:
raise Exception("You need to specify --testdir")
if options.predict and not (options.datadir or options.testdir or
options.testfile):
raise Exception("You need to specify --testdir")
if not options.predict:
if not options.include and not options.trainfile:
raise Exception("If not using the --include option, you must specify your training data with --trainfile")
else:
if not options.include and not options.testfile:
raise Exception("If not using the --include option, you must specify your test data with --testfile")
if not options.modeldir:
options.modeldir = options.outdir # set model directory to output directory by default
model = os.path.join(options.modeldir,options.model)
# in monoling case we check later on language by language basis
if options.multiling and not os.path.exists(model):
raise Exception(f"Model not found. Path tried: {model}")
if not options.outdir:
raise Exception("You must specify an output directory via the --outdir option")
elif not os.path.exists(options.outdir): # create output directory if it doesn't exist
logger.info(f"Creating output directory {options.outdir}")
os.mkdir(options.outdir)
if not options.graph_based and (not options.predict and not
(options.rlFlag or options.rlMostFlag or
options.headFlag)):
raise Exception("Must include either head, rl or rlmost (For example, if you specified --disable-head and --disable-rlmost, you must specify --userl)")
if not options.graph_based and (options.rlFlag and options.rlMostFlag):
logger.warning('Switching off rlMostFlag to allow rlFlag to take precedence')
options.rlMostFlag = False
if options.word_emb_size == 0 and options.pos_emb_size == 0 and\
options.char_lstm_output_size == 0 and not options.external_embedding:
raise Exception("All embeddings switched off: must use one of words, pos tags, chars, or external embeddings")
if not options.multiling:
options.tbank_emb_size = 0
options.conllu = True #default
def create_experiment_list(self,options):
"""
Create a list of experiments.
This list is designed to be looped over in the main body of our program.
"""
experiment = namedtuple('Experiment','treebanks, outdir, modeldir')
experiments = [] # will be a list of namedtuples
if not options.include:
treebanks = self.create_vanila_treebank_list(options)
experiments.append(experiment(treebanks, options.outdir, options.modeldir))
else:
treebanks = self.create_UD_treebank_list(options)
if options.multiling: # one experiment with several treebanks
experiments.append(experiment(treebanks, options.outdir, options.modeldir))
else: # several experiments with one treebank each
for treebank in treebanks:
experiments.append(experiment([treebank],treebank.outdir,treebank.modeldir))
return experiments
def create_vanila_treebank_list(self,options):
"""
Create list of vanilla (i.e. non-UD) treebanks. Currently only one treebank is supported, so the list will always
have one element. This is for consistency with the UD treebanks case where multi-monlingual experiments are allowed
"""
treebank = utils.Treebank(options.trainfile, \
options.devfile, options.testfile)
treebank.iso_id = None
treebank.outdir = options.outdir
treebank.modeldir = options.modeldir
#just one model specified by train/dev and/or test
if options.predict:
if not os.path.exists(options.testfile):
raise Exception("Test file " + options.testfile + " not found")
else:
options.conllu = (os.path.splitext(options.testfile.lower())[1] == '.conllu') # test if file in conllu format
treebank.test_gold = options.testfile
else:
self.prepareDev(treebank,options)
if options.devfile:
options.conllu = (os.path.splitext(options.devfile.lower())[1] == '.conllu')
elif options.create_dev:
options.conllu = (os.path.splitext(options.trainfile.lower())[1] == '.conllu')
if options.debug:
self.createDebugData(treebank,options)
return [treebank] # make it a list of one element just for the sake of consistency with the "include" case
def create_UD_treebank_list(self,options):
"""
Create list of UD Treebanks for experiments.
Output will either be a list where each element is a single treebank (monolingual or multi-monolingual case)
or a list where the first element is a list of treebanks (multilingual case).
This makes it easier to loop over the outer list in our main parser function
"""
options.conllu = True # file is in conllu format
all_treebanks = utils.get_all_treebanks(options) # returns a UD treebank for all possible UD languages
treebank_dict = {treebank.iso_id: treebank for treebank in all_treebanks}
treebanks = [] # the treebanks we need
iso_list = utils.parse_list_arg(options.include) # languages requested by the user via the include flag
for iso in iso_list:
proxy_tbank = None
m = re.search(r'^(.*):(.*)$',iso)
if m:
iso = m.group(1)
proxy_tbank = m.group(2)
if iso in treebank_dict:
treebank = treebank_dict[iso]
treebank.proxy_tbank = proxy_tbank
if not options.shared_task:
treebank.outdir= os.path.join(options.outdir,treebank.iso_id)
else:
treebank.outdir = options.outdir
if not os.path.exists(treebank.outdir): # create language-specific output folder if it doesn't exist
logger.info(f"Creating language-specific output directory {treebank.outdir}")
os.mkdir(treebank.outdir)
else:
logger.info(
f"Warning: language-specific subdirectory {treebank.outdir} already exists, contents may be overwritten"
)
if not options.predict:
self.prepareDev(treebank,options)
if options.debug: # it is important that prepareDev be called before createDebugData
self.createDebugData(treebank,options)
if options.predict and not options.multiling:
treebank.modeldir = os.path.join(options.modeldir,treebank.iso_id)
model = os.path.join(treebank.modeldir,options.model)
if not os.path.exists(model):
raise Exception(f"Model not found. Path tried: {model}")
else:
treebank.modeldir = None
treebanks.append(treebank)
else:
logger.warning(f"Skipping invalid language code {iso}")
return treebanks
# creates dev data by siphoning off a portion of the training data (when necessary)
# sets up treebank for prediction and model selection on dev data
def prepareDev(self,treebank,options):
treebank.pred_dev = options.pred_dev # even if options.pred_dev is True, might change treebank.pred_dev to False later if no dev data available
if not treebank.devfile or not os.path.exists(treebank.devfile):
if options.create_dev: # create some dev data from the training data
train_data = list(utils.read_conll(treebank.trainfile))
tot_sen = len(train_data)
if tot_sen > options.min_train_sents: # need to have at least min_train_sents to move forward
dev_file = os.path.join(treebank.outdir,'dev-split' + '.conllu') # location for the new dev file
train_file = os.path.join(treebank.outdir,'train-split' + '.conllu') # location for the new train file
dev_len = int(0.01*options.dev_percent*tot_sen)
logger.info(f"Taking {dev_len} of {tot_sen} sentences from training data as new dev data for {treebank.name}")
random.shuffle(train_data)
dev_data = train_data[:dev_len]
utils.write_conll(dev_file,dev_data) # write the new dev data to file
train_data = train_data[dev_len:] # put the rest of the training data in a new file too
utils.write_conll(train_file,train_data)
# update some variables with the new file locations
treebank.dev_gold = dev_file
treebank.devfile = dev_file
treebank.trainfile = train_file
else: # not enough sentences
logger.warning(
f"Not enough sentences in training data to create dev set for {treebank.name}"
f" (minimum required --min-train-size: {options.min_train_sents})"
)
treebank.pred_dev = False
else: # option --create-dev not set
logger.warning(f"No dev data for {treebank.name}, consider adding option --create-dev to create dev data from training set")
treebank.pred_dev = False
if options.model_selection and not treebank.pred_dev:
logger.warning(f"Can't do model selection for {treebank.name} as prediction on dev data is off")
# if debug options is set, we read in the training, dev and test files as appropriate, cap the number of sentences and store
# new files with these smaller data sets
def createDebugData(self,treebank,options):
ext = '.conllu' if options.conllu else '.conll'
logger.info('Creating smaller data sets for debugging')
if not options.predict:
train_data = list(utils.read_conll(treebank.trainfile,maxSize=options.debug_train_sents,hard_lim=True))
train_file = os.path.join(treebank.outdir,'train-debug' + ext) # location for the new train file
utils.write_conll(train_file,train_data) # write the new dev data to file
treebank.trainfile = train_file
if treebank.devfile and os.path.exists(treebank.devfile) and options.pred_dev:
dev_data = list(utils.read_conll(treebank.devfile,maxSize=options.debug_dev_sents,hard_lim=True))
dev_file = os.path.join(treebank.outdir,'dev-debug' + ext) # location for the new dev file
utils.write_conll(dev_file,dev_data) # write the new dev data to file
# have to create a separate debug gold file if not the same as input file
if treebank.dev_gold != treebank.devfile:
dev_gold_data = list(utils.read_conll(treebank.dev_gold,maxSize=options.debug_dev_sents,hard_lim=True))
dev_gold_file = os.path.join(treebank.outdir,'dev-gold-debug' + ext) # location for the new dev file
utils.write_conll(dev_gold_file,dev_gold_data) # write the new dev gold data to file
treebank.dev_gold = dev_gold_file
else:
treebank.dev_gold = dev_file
treebank.devfile = dev_file # important to do this last
else:
test_data = list(utils.read_conll(treebank.testfile,maxSize=options.debug_test_sents,hard_lim=True))
test_file = os.path.join(treebank.outdir,'test-debug' + ext) # location for the new dev file
utils.write_conll(test_file,test_data) # write the new dev data to file
if treebank.test_gold != treebank.testfile:
test_gold_data = list(utils.read_conll(treebank.test_gold,maxSize=options.debug_test_sents,hard_lim=True))
test_gold_file = os.path.join(treebank.outdir,'test-gold-debug' + ext) # location for the new dev file
utils.write_conll(test_gold_file,test_gold_data) # write the new dev data to file
treebank.test_gold = test_gold_file
else:
treebank.test_gold = test_file
treebank.testfile = test_file
|
|
'''
Copyright 2019 Trustees of the University of Pennsylvania
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import xml.etree.ElementTree as ET
import requests
from ieeg.ieeg_auth import IeegAuth
class IeegApi:
"""
The IEEG REST API
"""
_get_id_by_dataset_name_path = "/timeseries/getIdByDataSnapshotName/"
_get_time_series_details_path = '/timeseries/getDataSnapshotTimeSeriesDetails/'
_get_counts_by_layer_path = '/timeseries/getCountsByLayer/'
_get_annotations_path = '/timeseries/getTsAnnotations/'
_get_data_path = '/timeseries/getUnscaledTimeSeriesSetBinaryRaw/'
_derive_dataset_path = '/timeseries/deriveDataSnapshotFull/'
_get_montages_path = '/datasets/%s/montages'
_add_annotations_path = '/timeseries/addAnnotationsToDataSnapshot/'
_delete_annotation_layer_path = '/timeseries/removeTsAnnotationsByLayer/'
_json_content = 'application/json'
_xml_content = 'application/xml'
_send_json = {'Content-Type': _json_content}
_send_xml = {'Content-Type': _xml_content}
_accept_json = {'Accept': _json_content}
_accept_xml = {'Accept': _xml_content}
_send_accept_json = {
'Content-Type': _json_content, 'Accept': _json_content}
def __init__(self, username, password,
use_https=True, host='www.ieeg.org', port=None, verify_ssl=True):
self.http = requests.Session()
self.http.hooks['response'].append(
IeegApi.raise_ieeg_exception)
self.http.auth = IeegAuth(username, password)
self.http.verify = verify_ssl
self.scheme = 'https' if use_https else 'http'
self.host = host
self.port = port
authority = host + ':' + str(port) if port else host
self.base_url = self.scheme + '://' + authority + '/services'
@staticmethod
def raise_ieeg_exception(response, *args, **kwargs):
"""
Raises error if http status code is not 200
"""
# Get a runtime error if the unused args are removed from sig.
#pylint: disable=unused-argument
if response.status_code != requests.codes.ok:
content_type = response.headers.get('Content-Type')
if content_type == IeegApi._json_content:
raise IeegServiceError.from_json(
response.status_code, response.json())
if content_type == IeegApi._xml_content:
raise IeegServiceError.from_xml(
response.status_code, response.text)
raise IeegConnectionError(response.text)
def close(self):
"""
Closes HTTP resources
"""
self.http.close()
def get_dataset_id_by_name(self, dataset_name):
"""
Returns a Response with a dataset's id given its name
"""
url = self.base_url + IeegApi._get_id_by_dataset_name_path + dataset_name
response = self.http.get(url, headers=IeegApi._accept_json)
return response
def get_time_series_details(self, dataset_id):
"""
Returns Response with time series details in XML format
"""
url = self.base_url + IeegApi._get_time_series_details_path + dataset_id
response = self.http.get(url, headers=IeegApi._accept_xml)
return response
def get_annotation_layers(self, dataset):
"""
Returns Response with Annotation layers and counts in JSON format.
"""
url_str = self.base_url + IeegApi._get_counts_by_layer_path + dataset.snap_id
response = self.http.get(url_str, headers=IeegApi._accept_json)
return response
def get_annotations(self, dataset, layer_name,
start_offset_usecs=None, first_result=None, max_results=None):
"""
Returns a Response containing a JSON formatted list of annotations in the given
layer ordered by start time.
Given a Dataset ds with no new annotations being added, if ds.get_annotations('my_layer')
returns 152 annotations, then ds.get_annotations('my_layer', max_results=100) will return
the first 100 of those and ds.get_annotations('my_layer', first_result=100, max_results=100)
will return the final 52.
:param layer_name: The annotation layer to return
:param start_offset_usec:
If specified all returned annotations will have a start offset >= start_offset_usec
:param first_result: If specified, the zero-based index of the first annotation to return.
:param max_results: If specified, the maximum number of annotations to return.
:returns: a list of annotations in the given layer ordered by start offset.
"""
url_str = self.base_url + IeegApi._get_annotations_path + \
dataset.snap_id + '/' + layer_name
params = {'startOffsetUsec': start_offset_usecs,
'firstResult': first_result, 'maxResults': max_results}
response = self.http.get(
url_str, headers=IeegApi._accept_json, params=params)
return response
def derive_dataset(self, dataset, derived_dataset_name, tool_name):
"""
Returns a Response containing the portal id of a new Dataset.
The new Dataset will have the name given in the derived_dataset_name
and be a copy of the given dataset.
:param dataset: The dataset to copy
:param derived_dataset_name: The name of the new dataset
:param tool_name: The name of the tool creating the new dataset
:returns: the portal id of the new dataset
"""
url_str = self.base_url + IeegApi._derive_dataset_path + dataset.snap_id
params = {'friendlyName': derived_dataset_name,
'toolName': tool_name}
response = self.http.post(
url_str, headers=IeegApi._accept_json, params=params)
return response
def get_data(self, dataset, start, duration, channels):
"""
Returns data from the IEEG platform
:param start: Start time (usec)
:param duration: Number of usec to request samples from
:param channels: Integer indices of the channels we want
:return: a Response with binary content.
"""
# Build Data Content XML
wrapper1 = ET.Element('timeSeriesIdAndDChecks')
wrapper2 = ET.SubElement(wrapper1, 'timeSeriesIdAndDChecks')
i = 0
for ts_details in dataset.ts_array:
if i in channels:
el1 = ET.SubElement(wrapper2, 'timeSeriesIdAndCheck')
el2 = ET.SubElement(el1, 'dataCheck')
el2.text = ts_details.findall('dataCheck')[0].text
el3 = ET.SubElement(el1, 'id')
el3.text = ts_details.findall('revisionId')[0].text
i += 1
data = ET.tostring(wrapper1, encoding="us-ascii",
method="xml").decode('utf-8')
data = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>' + data
params = {'start': start, 'duration': duration}
url_str = self.base_url + IeegApi._get_data_path + dataset.snap_id
response = self.http.post(url_str,
params=params, data=data, headers=IeegApi._send_xml)
return response
def get_montages(self, dataset_id):
"""
Returns the montages for the given dataset.
"""
url_str = self.base_url + IeegApi._get_montages_path % dataset_id
response = self.http.get(url_str, headers=IeegApi._accept_json)
return response
def add_annotations(self, dataset, annotations):
"""
Adds annotations to the given snapshot.
:returns: a Response with String body (the datset id)
"""
# request_body is oddly verbose because it was originally designed as XML.
ts_revids = set()
ts_annotations = []
for annotation in annotations:
if annotation.parent != dataset:
raise ValueError(
'Annotation does not belong to this dataset. It belongs to dataset '
+ annotation.parent.snap_id)
annotated_revids = [
detail.portal_id for detail in annotation.annotated]
ts_annotation = {
'timeseriesRevIds': {'timeseriesRevId': annotated_revids},
'annotator': annotation.annotator,
'type': annotation.type,
'description': annotation.description,
'layer': annotation.layer,
'startTimeUutc': annotation.start_time_offset_usec,
'endTimeUutc': annotation.end_time_offset_usec
}
if annotation.portal_id:
ts_annotation['revId'] = annotation.portal_id
ts_annotations.append(ts_annotation)
ts_revids.update(annotated_revids)
timeseries = [{'revId': ts_revid, 'label': dataset.ts_details_by_id[ts_revid].channel_label}
for ts_revid in ts_revids]
request_body = {'timeseriesannotations': {
'timeseries': {
'timeseries': timeseries
},
'annotations': {
'annotation': ts_annotations
}
}}
url_str = self.base_url + IeegApi._add_annotations_path + dataset.snap_id
response = self.http.post(url_str,
json=request_body,
headers=IeegApi._send_accept_json)
return response
def move_annotation_layer(self, dataset, from_layer, to_layer):
"""
Moves annotations in the given dataset from from_layer to to_layer.
:returns: a Response with JSON body. Has number of moved annotations.
"""
req_path = ('/timeseries/datasets/'
+ dataset.snap_id
+ '/tsAnnotations/'
+ from_layer)
url_str = self.base_url + req_path
query_params = {'toLayerName': to_layer}
response = self.http.post(
url_str, params=query_params, headers=IeegApi._accept_json)
return response
def delete_annotation_layer(self, dataset, layer):
"""
Deletes annotations in layer from the given dataset.
:returns: a Response with JSON body. Has number of delelted annotations.
"""
url_str = self.base_url + IeegApi._delete_annotation_layer_path + \
dataset.snap_id + '/' + layer
response = self.http.post(url_str, headers=IeegApi._accept_json)
return response
class IeegConnectionError(Exception):
"""
A simple exception for connectivity errors
"""
class IeegServiceError(IeegConnectionError):
"""
An error resopnse was recieved from the server.
"""
def __init__(self, http_status_code, ieeg_error_code, message):
self.http_status_code = http_status_code
self.ieeg_error_code = ieeg_error_code
super(IeegServiceError, self).__init__(message)
@staticmethod
def from_json(http_status, json_ieeg_ws_exception_body):
"""
Returns IeegServiceError from the given json content
"""
content = json_ieeg_ws_exception_body.get('IeegWsException')
if not content:
return IeegConnectionError(json_ieeg_ws_exception_body)
ieeg_error_code = content['errorCode']
message = content['message']
return IeegServiceError(http_status, ieeg_error_code, message)
@staticmethod
def from_xml(http_status, xml_ieeg_ws_exception_body):
"""
Returns IeegServiceError from the given xml content
"""
content = ET.fromstring(xml_ieeg_ws_exception_body)
ieeg_error_code_element = content.find('errorCode')
if not ieeg_error_code_element:
return IeegConnectionError(xml_ieeg_ws_exception_body)
ieeg_error_code = ieeg_error_code_element.text
message = content.find('message').text
return IeegServiceError(http_status, ieeg_error_code, message)
|
|
import copy
import types
import sys
import os
from itertools import izip
import django.db.models.manipulators # Imported to register signal handler.
import django.db.models.manager # Ditto.
from django.core import validators
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, FieldError
from django.db.models.fields import AutoField, ImageField, FieldDoesNotExist
from django.db.models.fields.related import OneToOneRel, ManyToOneRel, OneToOneField
from django.db.models.query import delete_objects, Q
from django.db.models.options import Options
from django.db import connection, transaction
from django.db.models import signals
from django.db.models.loading import register_models, get_model
from django.dispatch import dispatcher
from django.utils.datastructures import SortedDict
from django.utils.functional import curry
from django.utils.encoding import smart_str, force_unicode, smart_unicode
from django.conf import settings
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
class ModelBase(type):
"Metaclass for all models"
def __new__(cls, name, bases, attrs):
# If this isn't a subclass of Model, don't do anything special.
try:
parents = [b for b in bases if issubclass(b, Model)]
except NameError:
# 'Model' isn't defined yet, meaning we're looking at Django's own
# Model class, defined below.
parents = []
if not parents:
return super(ModelBase, cls).__new__(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = type.__new__(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
new_class.add_to_class('_meta', Options(meta))
if not abstract:
new_class.add_to_class('DoesNotExist',
subclass_exception('DoesNotExist', ObjectDoesNotExist, module))
new_class.add_to_class('MultipleObjectsReturned',
subclass_exception('MultipleObjectsReturned', MultipleObjectsReturned, module))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
old_default_mgr = None
if getattr(new_class, '_default_manager', None):
# We have a parent who set the default manager.
if new_class._default_manager.model._meta.abstract:
old_default_mgr = new_class._default_manager
new_class._default_manager = None
if getattr(new_class._meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
model_module = sys.modules[new_class.__module__]
new_class._meta.app_label = model_module.__name__.split('.')[-2]
# Bail out early if we have already created this class.
m = get_model(new_class._meta.app_label, name, False)
if m is not None:
return m
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# Do the appropriate setup for any model parents.
o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields
if isinstance(f, OneToOneField)])
for base in parents:
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
if not base._meta.abstract:
if base in o2o_map:
field = o2o_map[base]
field.primary_key = True
new_class._meta.setup_pk(field)
else:
attr_name = '%s_ptr' % base._meta.module_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
new_class.add_to_class(attr_name, field)
new_class._meta.parents[base] = field
else:
# The abstract base class case.
names = set([f.name for f in new_class._meta.local_fields + new_class._meta.many_to_many])
for field in base._meta.local_fields + base._meta.local_many_to_many:
if field.name in names:
raise FieldError('Local field %r in class %r clashes with field of similar name from abstract base class %r'
% (field.name, name, base.__name__))
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
if old_default_mgr and not new_class._default_manager:
new_class._default_manager = old_default_mgr._copy_to_model(new_class)
new_class._prepare()
register_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return get_model(new_class._meta.app_label, name, False)
def add_to_class(cls, name, value):
if hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
# Creates some methods once self._meta has been populated.
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
setattr(opts.order_with_respect_to.rel.to, 'get_%s_order' % cls.__name__.lower(), curry(method_get_order, cls))
setattr(opts.order_with_respect_to.rel.to, 'set_%s_order' % cls.__name__.lower(), curry(method_set_order, cls))
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join([f.attname for f in opts.fields]))
if hasattr(cls, 'get_absolute_url'):
cls.get_absolute_url = curry(get_absolute_url, opts, cls.get_absolute_url)
dispatcher.send(signal=signals.class_prepared, sender=cls)
class Model(object):
__metaclass__ = ModelBase
def __init__(self, *args, **kwargs):
dispatcher.send(signal=signals.pre_init, sender=self.__class__, args=args, kwargs=kwargs)
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
fields_iter = iter(self._meta.fields)
if not kwargs:
# The ordering of the izip calls matter - izip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in izip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
for val, field in izip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.rel, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
if kwargs:
if isinstance(field.rel, ManyToOneRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = getattr(rel_obj, field.rel.get_related_field().attname)
except AttributeError:
raise TypeError("Invalid value: %r should be a %s instance, not a %s" %
(field.name, field.rel.to, type(rel_obj)))
else:
val = kwargs.pop(field.attname, field.get_default())
else:
val = field.get_default()
setattr(self, field.attname, val)
if kwargs:
for prop in kwargs.keys():
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError, "'%s' is an invalid keyword argument for this function" % kwargs.keys()[0]
dispatcher.send(signal=signals.post_init, sender=self.__class__, instance=self)
def from_sequence(cls, values):
"""
An alternate class constructor, primarily for internal use.
Creates a model instance from a sequence of values (which corresponds
to all the non-many-to-many fields in creation order. If there are more
fields than values, the remaining (final) fields are given their
default values.
ForeignKey fields can only be initialised using id values, not
instances, in this method.
"""
dispatcher.send(signal=signals.pre_init, sender=cls, args=values,
kwargs={})
obj = Empty()
obj.__class__ = cls
field_iter = iter(obj._meta.fields)
for val, field in izip(values, field_iter):
setattr(obj, field.attname, val)
for field in field_iter:
setattr(obj, field.attname, field.get_default())
dispatcher.send(signal=signals.post_init, sender=cls, instance=obj)
return obj
from_sequence = classmethod(from_sequence)
def __repr__(self):
return smart_str(u'<%s: %s>' % (self.__class__.__name__, unicode(self)))
def __str__(self):
if hasattr(self, '__unicode__'):
return force_unicode(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
return isinstance(other, self.__class__) and self._get_pk_val() == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._get_pk_val())
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def save(self):
"""
Save the current instance. Override this in a subclass if you want to
control the saving process.
"""
self.save_base()
save.alters_data = True
def save_base(self, raw=False, cls=None):
"""
Does the heavy-lifting involved in saving. Subclasses shouldn't need to
override this method. It's separate from save() in order to hide the
need for overrides of save() to pass around internal-only parameters
('raw' and 'cls').
"""
if not cls:
cls = self.__class__
meta = self._meta
signal = True
dispatcher.send(signal=signals.pre_save, sender=self.__class__,
instance=self, raw=raw)
else:
meta = cls._meta
signal = False
for parent, field in meta.parents.items():
self.save_base(raw, parent)
setattr(self, field.attname, self._get_pk_val(parent._meta))
non_pks = [f for f in meta.local_fields if not f.primary_key]
# First, try an UPDATE. If that doesn't update anything, do an INSERT.
pk_val = self._get_pk_val(meta)
# Note: the comparison with '' is required for compatibility with
# oldforms-style model creation.
pk_set = pk_val is not None and smart_unicode(pk_val) != u''
record_exists = True
manager = cls._default_manager
if pk_set:
# Determine whether a record with the primary key already exists.
if manager.filter(pk=pk_val).extra(select={'a': 1}).values('a').order_by():
# It does already exist, so do an UPDATE.
if non_pks:
values = [(f, None, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks]
manager.filter(pk=pk_val)._update(values)
else:
record_exists = False
if not pk_set or not record_exists:
if not pk_set:
values = [(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True))) for f in meta.local_fields if not isinstance(f, AutoField)]
else:
values = [(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True))) for f in meta.local_fields]
if meta.order_with_respect_to:
field = meta.order_with_respect_to
values.append((meta.get_field_by_name('_order')[0], manager.filter(**{field.name: getattr(self, field.attname)}).count()))
record_exists = False
update_pk = bool(meta.has_auto_field and not pk_set)
if values:
# Create a new record.
result = manager._insert(values, return_id=update_pk)
else:
# Create a new record with defaults for everything.
result = manager._insert([(meta.pk, connection.ops.pk_default_value())], return_id=update_pk, raw_values=True)
if update_pk:
setattr(self, meta.pk.attname, result)
transaction.commit_unless_managed()
if signal:
dispatcher.send(signal=signals.post_save, sender=self.__class__,
instance=self, created=(not record_exists), raw=raw)
def validate(self):
"""
First coerces all fields on this instance to their proper Python types.
Then runs validation on every field. Returns a dictionary of
field_name -> error_list.
"""
error_dict = {}
invalid_python = {}
for f in self._meta.fields:
try:
setattr(self, f.attname, f.to_python(getattr(self, f.attname, f.get_default())))
except validators.ValidationError, e:
error_dict[f.name] = e.messages
invalid_python[f.name] = 1
for f in self._meta.fields:
if f.name in invalid_python:
continue
errors = f.validate_full(getattr(self, f.attname, f.get_default()), self.__dict__)
if errors:
error_dict[f.name] = errors
return error_dict
def _collect_sub_objects(self, seen_objs):
"""
Recursively populates seen_objs with all objects related to this object.
When done, seen_objs will be in the format:
{model_class: {pk_val: obj, pk_val: obj, ...},
model_class: {pk_val: obj, pk_val: obj, ...}, ...}
"""
pk_val = self._get_pk_val()
if pk_val in seen_objs.setdefault(self.__class__, {}):
return
seen_objs.setdefault(self.__class__, {})[pk_val] = self
for related in self._meta.get_all_related_objects():
rel_opts_name = related.get_accessor_name()
if isinstance(related.field.rel, OneToOneRel):
try:
sub_obj = getattr(self, rel_opts_name)
except ObjectDoesNotExist:
pass
else:
sub_obj._collect_sub_objects(seen_objs)
else:
for sub_obj in getattr(self, rel_opts_name).all():
sub_obj._collect_sub_objects(seen_objs)
def delete(self):
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (self._meta.object_name, self._meta.pk.attname)
# Find all the objects than need to be deleted
seen_objs = SortedDict()
self._collect_sub_objects(seen_objs)
# Actually delete the objects
delete_objects(seen_objs)
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_unicode(dict(field.choices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
op = is_next and 'gt' or 'lt'
order = not is_next and '-' or ''
param = smart_str(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q|Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.filter(**kwargs).filter(q).order_by('%s%s' % (order, field.name), '%spk' % order)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist, "%s matching query does not exist." % self.__class__._meta.object_name
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
qn = connection.ops.quote_name
op = is_next and '>' or '<'
order = not is_next and '-_order' or '_order'
order_field = self._meta.order_with_respect_to
# FIXME: When querysets support nested queries, this can be turned
# into a pure queryset operation.
where = ['%s %s (SELECT %s FROM %s WHERE %s=%%s)' % \
(qn('_order'), op, qn('_order'),
qn(self._meta.db_table), qn(self._meta.pk.column))]
params = [self.pk]
obj = self._default_manager.filter(**{order_field.name: getattr(self, order_field.attname)}).extra(where=where, params=params).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def _get_FIELD_filename(self, field):
if getattr(self, field.attname): # value is not blank
return os.path.join(settings.MEDIA_ROOT, getattr(self, field.attname))
return ''
def _get_FIELD_url(self, field):
if getattr(self, field.attname): # value is not blank
import urlparse
return urlparse.urljoin(settings.MEDIA_URL, getattr(self, field.attname)).replace('\\', '/')
return ''
def _get_FIELD_size(self, field):
return os.path.getsize(self._get_FIELD_filename(field))
def _save_FIELD_file(self, field, filename, raw_contents, save=True):
directory = field.get_directory_name()
try: # Create the date-based directory if it doesn't exist.
os.makedirs(os.path.join(settings.MEDIA_ROOT, directory))
except OSError: # Directory probably already exists.
pass
filename = field.get_filename(filename)
# If the filename already exists, keep adding an underscore to the name of
# the file until the filename doesn't exist.
while os.path.exists(os.path.join(settings.MEDIA_ROOT, filename)):
try:
dot_index = filename.rindex('.')
except ValueError: # filename has no dot
filename += '_'
else:
filename = filename[:dot_index] + '_' + filename[dot_index:]
# Write the file to disk.
setattr(self, field.attname, filename)
full_filename = self._get_FIELD_filename(field)
fp = open(full_filename, 'wb')
fp.write(raw_contents)
fp.close()
# Save the width and/or height, if applicable.
if isinstance(field, ImageField) and (field.width_field or field.height_field):
from django.utils.images import get_image_dimensions
width, height = get_image_dimensions(full_filename)
if field.width_field:
setattr(self, field.width_field, width)
if field.height_field:
setattr(self, field.height_field, height)
# Save the object because it has changed unless save is False
if save:
self.save()
_save_FIELD_file.alters_data = True
def _get_FIELD_width(self, field):
return self._get_image_dimensions(field)[0]
def _get_FIELD_height(self, field):
return self._get_image_dimensions(field)[1]
def _get_image_dimensions(self, field):
cachename = "__%s_dimensions_cache" % field.name
if not hasattr(self, cachename):
from django.utils.images import get_image_dimensions
filename = self._get_FIELD_filename(field)
setattr(self, cachename, get_image_dimensions(filename))
return getattr(self, cachename)
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list):
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
for i, j in enumerate(id_list):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
transaction.commit_unless_managed()
def method_get_order(ordered_obj, self):
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
order_name = ordered_obj._meta.order_with_respect_to.name
pk_name = ordered_obj._meta.pk.name
return [r[pk_name] for r in
ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
##############################################
# HELPER FUNCTIONS (CURRIED MODEL FUNCTIONS) #
##############################################
def get_absolute_url(opts, func, self, *args, **kwargs):
return settings.ABSOLUTE_URL_OVERRIDES.get('%s.%s' % (opts.app_label, opts.module_name), func)(self, *args, **kwargs)
########
# MISC #
########
class Empty(object):
pass
if sys.version_info < (2, 5):
# Prior to Python 2.5, Exception was an old-style class
def subclass_exception(name, parent, unused):
return types.ClassType(name, (parent,), {})
else:
def subclass_exception(name, parent, module):
return type(name, (parent,), {'__module__': module})
|
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Structured flags commonly used in experiment binaries.
Structured flags are often used to construct complex structures via multiple
simple flags (e.g. an optimizer can be created by controlling learning rate and
other hyper parameters).
"""
import sys
from typing import Optional, Sequence
from absl import flags
from fedjax.core import client_datasets
from fedjax.core import optimizers
from fedjax.training import federated_experiment
from fedjax.training import tasks
FLAGS = flags.FLAGS
class NamedFlags:
"""A group of flags with an optional named prefix."""
def __init__(self, name: Optional[str]):
self._name = name
if name is None:
self._prefix = ''
self._description = ''
else:
self._prefix = name + '_'
self._description = f' for {name}'
# Two special things:
# - Add prefix/description when registering flags.
# - Setting module_name so that these flags become visible for -help.
def _enum(self, name: str, default: Optional[str], enum_values: Sequence[str],
help_: str):
flags.DEFINE_enum(
self._prefix + name,
default,
enum_values,
help_ + self._description,
module_name=sys.argv[0])
def _string(self, name: str, default: Optional[str], help_: str):
flags.DEFINE_string(
self._prefix + name,
default,
help_ + self._description,
module_name=sys.argv[0])
def _float(self, name: str, default: Optional[float], help_: str):
flags.DEFINE_float(
self._prefix + name,
default,
help_ + self._description,
module_name=sys.argv[0])
def _integer(self, name: str, default: Optional[int], help_: str):
flags.DEFINE_integer(
self._prefix + name,
default,
help_ + self._description,
module_name=sys.argv[0])
def _get_flag(self, flag):
return getattr(FLAGS, self._prefix + flag)
class OptimizerFlags(NamedFlags):
"""Constructs a fedjax.Optimizer from flags."""
SUPPORTED = ('sgd', 'momentum', 'adam', 'rmsprop', 'adagrad')
def __init__(self,
name: Optional[str] = None,
default_optimizer: str = 'sgd'):
super().__init__(name)
self._enum('optimizer', default_optimizer, self.SUPPORTED, 'Optimizer')
self._float('learning_rate', 0.005, 'Server step size')
# Momentum parameters.
self._float('momentum', 0.0, 'Momentum parameter')
# Adam parameters
self._float('adam_beta1', 0.9, 'Adam beta 1 parameter')
self._float('adam_beta2', 0.99, 'Adam beta 2 parameter')
self._float('adam_epsilon', 1e-3, 'Adam epsilon parameter')
# RMSprop parameters.
self._float('rmsprop_decay', 0.9, 'RMSProp decay parameter')
self._float('rmsprop_epsilon', 1e-3, 'RMSprop epsilon parameter')
# Adagrad parameters.
self._float(
'adagrad_epsilon', 1e-6,
'Adagrad epsilon parameter that is added to second moment' +
self._description)
def get(self) -> optimizers.Optimizer:
"""Gets the specified optimizer."""
optimizer_name = self._get_flag('optimizer')
learning_rate = self._get_flag('learning_rate')
if optimizer_name == 'sgd':
return optimizers.sgd(learning_rate)
elif optimizer_name == 'momentum':
return optimizers.sgd(learning_rate, self._get_flag('momentum'))
elif optimizer_name == 'adam':
return optimizers.adam(learning_rate, self._get_flag('adam_beta1'),
self._get_flag('adam_beta2'),
self._get_flag('adam_epsilon'))
elif optimizer_name == 'rmsprop':
return optimizers.rmsprop(learning_rate, self._get_flag('rmsprop_decay'),
self._get_flag('rmsprop_epsilon'))
elif optimizer_name == 'adagrad':
return optimizers.adagrad(
learning_rate, eps=self._get_flag('adagrad_epsilon'))
else:
raise ValueError(f'Unsupported optimizer {optimizer_name!r} from '
f'--{self._prefix}optimizer.')
class ShuffleRepeatBatchHParamsFlags(NamedFlags):
"""Constructs ShuffleRepeatBatchHParams from flags."""
def __init__(self, name: Optional[str] = None, default_batch_size: int = 128):
super().__init__(name)
defaults = client_datasets.ShuffleRepeatBatchHParams(batch_size=-1)
# TODO(wuke): Support other fields.
self._integer('batch_size', default_batch_size, 'Batch size')
self._integer('num_epochs', defaults.num_epochs, 'Number of epochs')
self._integer('num_steps', defaults.num_steps, 'Number of steps')
def get(self):
return client_datasets.ShuffleRepeatBatchHParams(
batch_size=self._get_flag('batch_size'),
num_epochs=self._get_flag('num_epochs'),
num_steps=self._get_flag('num_steps'))
class PaddedBatchHParamsFlags(NamedFlags):
"""Constructs PaddedBatchHParams from flags."""
def __init__(self, name: Optional[str] = None, default_batch_size: int = 128):
super().__init__(name)
# TODO(wuke): Support other fields.
self._integer('batch_size', default_batch_size, 'Batch size')
def get(self):
return client_datasets.PaddedBatchHParams(
batch_size=self._get_flag('batch_size'))
class BatchHParamsFlags(NamedFlags):
"""Constructs BatchHParams from flags."""
def __init__(self, name: Optional[str] = None, default_batch_size: int = 128):
super().__init__(name)
# TODO(wuke): Support other fields.
self._integer('batch_size', default_batch_size, 'Batch size')
def get(self):
return client_datasets.BatchHParams(batch_size=self._get_flag('batch_size'))
class FederatedExperimentConfigFlags(NamedFlags):
"""Constructs FederatedExperimentConfig from flags."""
def __init__(self, name: Optional[str] = None):
super().__init__(name)
defaults = federated_experiment.FederatedExperimentConfig(
root_dir='', num_rounds=-1)
self._string('root_dir', None, 'Root directory of experiment outputs')
self._integer('num_rounds', None, 'Number of federated training rounds')
self._integer(
'checkpoint_frequency', defaults.checkpoint_frequency,
'Checkpoint frequency in rounds' +
'. If <= 0, no checkpointing is done.')
self._integer('num_checkpoints_to_keep', defaults.num_checkpoints_to_keep,
'Maximum number of checkpoints to keep')
self._integer(
'eval_frequency', defaults.eval_frequency,
'Evaluation frequency in rounds' + '. If <= 0, no evaluation is done.')
def get(self):
return federated_experiment.FederatedExperimentConfig(
root_dir=self._get_flag('root_dir'),
num_rounds=self._get_flag('num_rounds'),
checkpoint_frequency=self._get_flag('checkpoint_frequency'),
num_checkpoints_to_keep=self._get_flag('num_checkpoints_to_keep'),
eval_frequency=self._get_flag('eval_frequency'))
class TaskFlags(NamedFlags):
"""Constructs a standard task tuple from flags."""
def __init__(self, name: Optional[str] = None):
super().__init__(name)
self._enum('task', None, tasks.ALL_TASKS, 'Which task to run')
self._string('data_mode', 'sqlite', 'Data loading mode')
self._string('cache_dir', None,
'Cache directory when loading SQLite federated data')
def get(self):
return tasks.get_task(
self._get_flag('task'), self._get_flag('data_mode'),
self._get_flag('cache_dir'))
|
|
import datetime
from django.core.cache import cache
from django.db import models
from django.utils import timezone
from django.contrib.auth.hashers import make_password
from seahub.base.fields import LowerCaseCharField
from seahub.utils import normalize_file_path, normalize_dir_path, gen_token, \
normalize_cache_key
from seahub.utils.ip import get_remote_ip
from seahub.settings import SHARE_ACCESS_PASSWD_TIMEOUT
class AnonymousShare(models.Model):
"""
Model used for sharing repo to unregistered email.
"""
repo_owner = LowerCaseCharField(max_length=255)
repo_id = models.CharField(max_length=36)
anonymous_email = LowerCaseCharField(max_length=255)
token = models.CharField(max_length=25, unique=True)
def _get_cache_key(request, prefix, token):
"""Return cache key of certain ``prefix``. If user is logged in, use
username and token, otherwise use combination of request ip and user agent
and token.
Arguments:
- `prefix`:
"""
if request.user.is_authenticated():
key = normalize_cache_key(request.user.username, 'SharedLink_', token)
else:
ip = get_remote_ip(request)
# Memcached key length limit is 250 chars, and user agent somethings may
# be long which will cause error.
agent = request.META.get('HTTP_USER_AGENT', '')[:150]
key = normalize_cache_key(ip + agent, 'SharedLink_', token)
return key
def set_share_link_access(request, token):
"""Remember which share download/upload links user can access without
providing password.
"""
key = _get_cache_key(request, 'SharedLink_', token)
cache.set(key, True, SHARE_ACCESS_PASSWD_TIMEOUT)
def check_share_link_access(request, token):
"""Check whether user can access share link without providing password.
"""
key = _get_cache_key(request, 'SharedLink_', token)
return cache.get(key, False)
class FileShareManager(models.Manager):
def _add_file_share(self, username, repo_id, path, s_type,
password=None, expire_date=None):
if password is not None:
password_enc = make_password(password)
else:
password_enc = None
token = gen_token(max_length=10)
fs = super(FileShareManager, self).create(
username=username, repo_id=repo_id, path=path, token=token,
s_type=s_type, password=password_enc, expire_date=expire_date)
fs.save()
return fs
def _get_file_share_by_path(self, username, repo_id, path):
fs = list(super(FileShareManager, self).filter(repo_id=repo_id).filter(
username=username).filter(path=path))
if len(fs) > 0:
return fs[0]
else:
return None
def _get_valid_file_share_by_token(self, token):
"""Return share link that exists and not expire, otherwise none.
"""
try:
fs = self.get(token=token)
except self.model.DoesNotExist:
return None
if fs.expire_date is None:
return fs
else:
if timezone.now() > fs.expire_date:
return None
else:
return fs
########## public methods ##########
def create_file_link(self, username, repo_id, path, password=None,
expire_date=None):
"""Create download link for file.
"""
path = normalize_file_path(path)
return self._add_file_share(username, repo_id, path, 'f', password,
expire_date)
def get_file_link_by_path(self, username, repo_id, path):
path = normalize_file_path(path)
return self._get_file_share_by_path(username, repo_id, path)
def get_valid_file_link_by_token(self, token):
return self._get_valid_file_share_by_token(token)
def create_dir_link(self, username, repo_id, path, password=None,
expire_date=None):
"""Create download link for directory.
"""
path = normalize_dir_path(path)
return self._add_file_share(username, repo_id, path, 'd', password,
expire_date)
def get_dir_link_by_path(self, username, repo_id, path):
path = normalize_dir_path(path)
return self._get_file_share_by_path(username, repo_id, path)
def get_valid_dir_link_by_token(self, token):
return self._get_valid_file_share_by_token(token)
class FileShare(models.Model):
"""
Model used for file or dir shared link.
"""
username = LowerCaseCharField(max_length=255, db_index=True)
repo_id = models.CharField(max_length=36, db_index=True)
path = models.TextField()
token = models.CharField(max_length=10, unique=True)
ctime = models.DateTimeField(default=datetime.datetime.now)
view_cnt = models.IntegerField(default=0)
s_type = models.CharField(max_length=2, db_index=True, default='f') # `f` or `d`
password = models.CharField(max_length=128, null=True)
expire_date = models.DateTimeField(null=True)
objects = FileShareManager()
def is_file_share_link(self):
return True if self.s_type == 'f' else False
def is_dir_share_link(self):
return False if self.is_file_share_link() else True
def is_encrypted(self):
return True if self.password is not None else False
class OrgFileShareManager(models.Manager):
def set_org_file_share(self, org_id, file_share):
"""Set a share link as org share link.
Arguments:
- `org_id`:
- `file_share`:
"""
ofs = self.model(org_id=org_id, file_share=file_share)
ofs.save(using=self._db)
return ofs
class OrgFileShare(models.Model):
"""
Model used for organization file or dir shared link.
"""
org_id = models.IntegerField(db_index=True)
file_share = models.OneToOneField(FileShare)
objects = OrgFileShareManager()
objects = OrgFileShareManager()
class UploadLinkShareManager(models.Manager):
def create_upload_link_share(self, username, repo_id, path,
password=None, expire_date=None):
path = normalize_dir_path(path)
token = gen_token(max_length=10)
if password is not None:
password_enc = make_password(password)
else:
password_enc = None
uls = super(UploadLinkShareManager, self).create(
username=username, repo_id=repo_id, path=path, token=token,
password=password_enc, expire_date=expire_date)
uls.save()
return uls
def get_valid_upload_link_by_token(self, token):
"""Return upload link that exists and not expire, otherwise none.
"""
try:
fs = self.get(token=token)
except self.model.DoesNotExist:
return None
if fs.expire_date is None:
return fs
else:
if timezone.now() > fs.expire_date:
return None
else:
return fs
class UploadLinkShare(models.Model):
"""
Model used for shared upload link.
"""
username = LowerCaseCharField(max_length=255, db_index=True)
repo_id = models.CharField(max_length=36, db_index=True)
path = models.TextField()
token = models.CharField(max_length=10, unique=True)
ctime = models.DateTimeField(default=datetime.datetime.now)
view_cnt = models.IntegerField(default=0)
password = models.CharField(max_length=128, null=True)
expire_date = models.DateTimeField(null=True)
objects = UploadLinkShareManager()
def is_encrypted(self):
return True if self.password is not None else False
class PrivateFileDirShareManager(models.Manager):
def add_private_file_share(self, from_user, to_user, repo_id, path, perm):
"""
"""
path = normalize_file_path(path)
token = gen_token(max_length=10)
pfs = self.model(from_user=from_user, to_user=to_user, repo_id=repo_id,
path=path, s_type='f', token=token, permission=perm)
pfs.save(using=self._db)
return pfs
def add_read_only_priv_file_share(self, from_user, to_user, repo_id, path):
"""
"""
return self.add_private_file_share(from_user, to_user, repo_id,
path, 'r')
def get_private_share_in_file(self, username, repo_id, path):
"""Get a file that private shared to ``username``.
"""
path = normalize_file_path(path)
ret = super(PrivateFileDirShareManager, self).filter(
to_user=username, repo_id=repo_id, path=path, s_type='f')
return ret[0] if len(ret) > 0 else None
def add_private_dir_share(self, from_user, to_user, repo_id, path, perm):
"""
"""
path = normalize_dir_path(path)
token = gen_token(max_length=10)
pfs = self.model(from_user=from_user, to_user=to_user, repo_id=repo_id,
path=path, s_type='d', token=token, permission=perm)
pfs.save(using=self._db)
return pfs
def get_private_share_in_dir(self, username, repo_id, path):
"""Get a directory that private shared to ``username``.
"""
path = normalize_dir_path(path)
ret = super(PrivateFileDirShareManager, self).filter(
to_user=username, repo_id=repo_id, path=path, s_type='d')
return ret[0] if len(ret) > 0 else None
def get_priv_file_dir_share_by_token(self, token):
return super(PrivateFileDirShareManager, self).get(token=token)
def delete_private_file_dir_share(self, from_user, to_user, repo_id, path):
"""
"""
super(PrivateFileDirShareManager, self).filter(
from_user=from_user, to_user=to_user, repo_id=repo_id,
path=path).delete()
def list_private_share_out_by_user(self, from_user):
"""List files/directories private shared from ``from_user``.
"""
return super(PrivateFileDirShareManager, self).filter(
from_user=from_user)
def list_private_share_in_by_user(self, to_user):
"""List files/directories private shared to ``to_user``.
"""
return super(PrivateFileDirShareManager, self).filter(
to_user=to_user)
def list_private_share_in_dirs_by_user_and_repo(self, to_user, repo_id):
"""List directories private shared to ``to_user`` base on ``repo_id``.
"""
return super(PrivateFileDirShareManager, self).filter(
to_user=to_user, repo_id=repo_id, s_type='d')
class PrivateFileDirShare(models.Model):
from_user = LowerCaseCharField(max_length=255, db_index=True)
to_user = LowerCaseCharField(max_length=255, db_index=True)
repo_id = models.CharField(max_length=36, db_index=True)
path = models.TextField()
token = models.CharField(max_length=10, unique=True)
permission = models.CharField(max_length=5) # `r` or `rw`
s_type = models.CharField(max_length=5, default='f') # `f` or `d`
objects = PrivateFileDirShareManager()
|
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
Copyright (c) 2011 Tyler Kenendy <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .topping import Topping
from jawa.constants import String
import traceback
# We can identify almost every class we need just by
# looking for consistent strings.
MATCHES = (
(['Fetching addPacket for removed entity', 'Fetching packet for removed entity'], 'entity.trackerentry'),
(['#%04d/%d%s', 'attribute.modifier.equals.'], 'itemstack'),
(['disconnect.lost'], 'nethandler.client'),
(['Outdated server!', 'multiplayer.disconnect.outdated_client'],
'nethandler.server'),
(['Corrupt NBT tag'], 'nbtcompound'),
([' is already assigned to protocol '], 'packet.connectionstate'),
(
['The received encoded string buffer length is ' \
'less than zero! Weird string!'],
'packet.packetbuffer'
),
(['Data value id is too big'], 'metadata'),
(['X#X'], 'recipe.superclass'),
(['Skipping BlockEntity with id '], 'tileentity.superclass'),
(
['ThreadedAnvilChunkStorage ({}): All chunks are saved'],
'anvilchunkloader'
),
(['has invalidly named property'], 'blockstatecontainer'),
((['HORIZONTAL'], True), 'enumfacing.plane'),
((['bubble'], True), 'particletypes')
)
# Enforce a lower priority on some matches, since some classes may match both
# these and other strings, which we want to be grouped with the other string
# if it exists, and with this if it doesn't
MAYBE_MATCHES = (
(['Skipping Entity with id'], 'entity.list'),
)
# In some cases there really isn't a good way to verify that it's a specific
# class and we need to just depend on it coming first (bad!)
# The biome class specifically is an issue because in 18w06a, the old name is
# present in the biome's own class, but the ID is still in the register class.
# This stops being an issue later into 1.13 when biome names become translatable.
# Similarly, in 1.13, "bubble" is ambiguous between the particle class and
# particle list, but the particletypes topping works with the first result in that case.
# In 1.18-pre8, the "Getting block state" message now appears in both rendering
# code and world code, but in both cases the return type is correct.
IGNORE_DUPLICATES = [ "biome.register", "particletypes", "blockstate" ]
def check_match(value, match_list):
exact = False
if isinstance(match_list, tuple):
match_list, exact = match_list
for match in match_list:
if exact:
if value != match:
continue
else:
if match not in value:
continue
return True
return False
def identify(classloader, path, verbose):
"""
The first pass across the jar will identify all possible classes it
can, maping them by the 'type' it implements.
We have limited information available to us on this pass. We can only
check for known signatures and predictable constants. In the next pass,
we'll have the initial mapping from this pass available to us.
"""
possible_match = None
for c in classloader.search_constant_pool(path=path, type_=String):
value = c.string.value
for match_list, match_name in MATCHES:
if check_match(value, match_list):
class_file = classloader[path]
return match_name, class_file.this.name.value
for match_list, match_name in MAYBE_MATCHES:
if check_match(value, match_list):
class_file = classloader[path]
possible_match = (match_name, class_file.this.name.value)
# Continue searching through the other constants in the class
if 'BaseComponent' in value:
class_file = classloader[path]
# We want the interface for chat components, but it has no
# string constants, so we need to use the abstract class and then
# get its first implemented interface.
# As of 20w17a, there is another interface in the middle that we don't
# want, but the interface we do want extends Brigadier's Message interface.
# So, loop up until a good-looking interface is present.
# In other versions, the interface extends Iterable. In some versions, it extends both.
while len(class_file.interfaces) in (1, 2):
parent = class_file.interfaces[0].name.value
if "com/mojang/brigadier" in parent or "java/lang/Iterable" == parent:
break
class_file = classloader[parent]
else:
# There wasn't the same number of interfaces, can't do anything really
if verbose:
print(class_file, "(parent of " + path + ", BaseComponent) has an unexpected number of interfaces:", class_file.interfaces)
# Just hope for the best with the current class file
return 'chatcomponent', class_file.this.name.value
if value == 'ambient.cave':
# This is found in both the sounds list class and sounds event class.
# However, the sounds list class also has a constant specific to it.
# Note that this method will not work in 1.8, but the list class doesn't exist then either.
class_file = classloader[path]
for c2 in class_file.constants.find(type_=String):
if c2 == 'Accessed Sounds before Bootstrap!':
return 'sounds.list', class_file.this.name.value
else:
return 'sounds.event', class_file.this.name.value
if value == 'piston_head':
# piston_head is a technical block, which is important as that means it has no item form.
# This constant is found in both the block list class and the class containing block registrations.
class_file = classloader[path]
for c2 in class_file.constants.find(type_=String):
if c2 == 'Accessed Blocks before Bootstrap!':
return 'block.list', class_file.this.name.value
else:
return 'block.register', class_file.this.name.value
if value == 'diamond_pickaxe':
# Similarly, diamond_pickaxe is only an item. This exists in 3 classes, though:
# - The actual item registration code
# - The item list class
# - The item renderer class (until 1.13), which we don't care about
class_file = classloader[path]
for c2 in class_file.constants.find(type_=String):
if c2 == 'textures/misc/enchanted_item_glint.png':
# Item renderer, which we don't care about
return
if c2 == 'Accessed Items before Bootstrap!':
return 'item.list', class_file.this.name.value
else:
return 'item.register', class_file.this.name.value
if value in ('Ice Plains', 'mutated_ice_flats', 'ice_spikes'):
# Finally, biomes. There's several different names that were used for this one biome
# Only classes are the list class and the one with registration. Note that the list didn't exist in 1.8.
class_file = classloader[path]
for c2 in class_file.constants.find(type_=String):
if c2 == 'Accessed Biomes before Bootstrap!':
return 'biome.list', class_file.this.name.value
else:
return 'biome.register', class_file.this.name.value
if value == 'minecraft':
class_file = classloader[path]
# Look for two protected final strings
def is_protected_final(m):
return m.access_flags.acc_protected and m.access_flags.acc_final
find_args = {
"type_": "Ljava/lang/String;",
"f": is_protected_final
}
fields = class_file.fields.find(**find_args)
if len(list(fields)) == 2:
return 'identifier', class_file.this.name.value
if value == 'PooledMutableBlockPosition modified after it was released.':
# Keep on going up the class hierarchy until we find a logger,
# which is declared in the main BlockPos class
# We can't hardcode a specific number of classes to go up, as
# in some versions PooledMutableBlockPos extends BlockPos directly,
# but in others have PooledMutableBlockPos extend MutableBlockPos.
# Also, this is the _only_ string constant available to us.
# Finally, note that PooledMutableBlockPos was introduced in 1.9.
# This technique will not work in 1.8.
cf = classloader[path]
logger_type = "Lorg/apache/logging/log4j/Logger;"
while not cf.fields.find_one(type_=logger_type):
if cf.super_.name == "java/lang/Object":
cf = None
break
cf = classloader[cf.super_.name.value]
if cf:
return 'position', cf.this.name.value
if value == 'Getting block state':
# This message is found in Chunk, in the method getBlockState.
# We could also theoretically identify BlockPos from this method,
# but currently identify only allows marking one class at a time.
class_file = classloader[path]
for method in class_file.methods:
for ins in method.code.disassemble():
if ins.mnemonic in ("ldc", "ldc_w"):
if ins.operands[0] == 'Getting block state':
return 'blockstate', method.returns.name
else:
if verbose:
print("Found chunk as %s, but didn't find the method that returns blockstate" % path)
if value == 'particle.notFound':
# This is in ParticleArgument, which is used for commands and
# implements brigadier's ArgumentType<IParticleData>.
class_file = classloader[path]
if len(class_file.interfaces) == 1 and class_file.interfaces[0].name == "com/mojang/brigadier/arguments/ArgumentType":
sig = class_file.attributes.find_one(name="Signature").signature.value
inner_type = sig[sig.index("<") + 1 : sig.rindex(">")][1:-1]
return "particle", inner_type
elif verbose:
print("Found ParticleArgument as %s, but it didn't implement the expected interface" % path)
# May (will usually) be None
return possible_match
class IdentifyTopping(Topping):
"""Finds important superclasses needed by other toppings."""
PROVIDES = [
"identify.anvilchunkloader",
"identify.biome.list",
"identify.biome.register",
"identify.block.list",
"identify.block.register",
"identify.blockstatecontainer",
"identify.blockstate",
"identify.chatcomponent",
"identify.entity.list",
"identify.entity.trackerentry",
"identify.enumfacing.plane",
"identify.identifier",
"identify.item.list",
"identify.item.register",
"identify.itemstack",
"identify.metadata",
"identify.nbtcompound",
"identify.nethandler.client",
"identify.nethandler.server",
"identify.packet.connectionstate",
"identify.packet.packetbuffer",
"identify.particle",
"identify.particletypes",
"identify.position",
"identify.recipe.superclass",
"identify.resourcelocation",
"identify.sounds.event",
"identify.sounds.list",
"identify.tileentity.superclass"
]
DEPENDS = []
@staticmethod
def act(aggregate, classloader, verbose=False):
classes = aggregate.setdefault("classes", {})
for path in classloader.path_map.keys():
if not path.endswith(".class"):
continue
result = identify(classloader, path[:-len(".class")], verbose)
if result:
if result[0] in classes:
if result[0] in IGNORE_DUPLICATES:
continue
raise Exception(
"Already registered %(value)s to %(old_class)s! "
"Can't overwrite it with %(new_class)s" % {
"value": result[0],
"old_class": classes[result[0]],
"new_class": result[1]
})
classes[result[0]] = result[1]
if len(classes) == len(IdentifyTopping.PROVIDES):
# If everything has been found, we don't need to keep
# searching, so stop early for performance
break
# Add classes that might not be recognized in some versions
# since the registration class is also the list class
if "sounds.list" not in classes and "sounds.event" in classes:
classes["sounds.list"] = classes["sounds.event"]
if "block.list" not in classes and "block.register" in classes:
classes["block.list"] = classes["block.register"]
if "item.list" not in classes and "item.register" in classes:
classes["item.list"] = classes["item.register"]
if "biome.list" not in classes and "biome.register" in classes:
classes["biome.list"] = classes["biome.register"]
if verbose:
print("identify classes: %s" % classes)
|
|
#!/usr/bin/python
# Purpose: accept table and aguments for run_oozie_workflow.py
import sys, os, fileinput, errno, commands, re, string, envvars, time,getpass
from datetime import datetime
from datetime import timedelta
import shutil
from optparse import OptionParser
import subprocess
from subprocess import Popen, PIPE
def main():
global return_code
return_code = 0
start_line = "".join('*' for i in range(100))
print(start_line)
print("run_ingest.py -> Started : " + datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
table,field,field_type,field_rdbms_format,field_hadoop_format,lower_bound,upper_bound,common_properties,app,sub_app,env,env_ver,group,ingest_type, common_date = arg_handle()
print "field_type=" , field_type
# Get envvars from oozie_common_properties file
envvars.populate(env,env_ver,app,sub_app)
#Get Final Properties final name and path from variables
final_properties = envvars.list['lfs_app_wrk'] + '/' + env + '_' + app.replace("/","_") + '_' + table + '.properties'
# Remove if the file exists
silentremove(final_properties)
# open the final properties file in write append mode
properties_file = open(final_properties, 'wb')
# Build the table properties file name and path from variables run_ingest only calls wf_db_ingest workflow
table_properties = envvars.list['lfs_app_workflows'] + '/wf_db_ingest/' + table + '.properties'
rm_ctlM = "sed -i -e 's/\r$//' "+table_properties
rc,status = commands.getstatusoutput(rm_ctlM)
print("run_ingest.py -> removing ^M characters in file: "+rm_ctlM+" Status:"+str(rc))
# get time stamp to load the table
hdfs_load_ts = "'" + str(common_date).replace("_"," ") +"'"
common_date_tfmt = datetime.strptime(common_date,'%Y-%m-%d_%H:%M:%S.%f')
log_time = common_date_tfmt.strftime('%Y-%m-%d_%H-%M-%S')
log_date = common_date_tfmt.strftime('%Y-%m-%d')
log_folder = envvars.list['lfs_app_logs'] + "/"+log_date
log_file = log_folder +"/run_job-" + group + '_' + log_time + '.log'
envvars.list['hdfs_load_ts'] = hdfs_load_ts
#load evironment variables for app specific
envvars.load_file(table_properties)
# Concatenate global properties file and table properties file
shutil.copyfileobj(open(common_properties, 'rb'), properties_file)
shutil.copyfileobj(open(table_properties, 'rb'), properties_file)
#Get Databese name from environment variables
db = envvars.list['hv_db']
table = envvars.list['hv_table']
sys.stdout.flush()
if ingest_type == 'sync':
sourceStats = get_stats_sqoop(table,envvars.list['where_column'])
targetStats = get_stats_impala(db,table, envvars.list['where_column'])
#print("Source Result:"+str(sourceStats))
#print("Target Result:"+str(targetStats))
whereClause = ""
whereHClause = ""
for key in sourceStats:
if key in targetStats:
if sourceStats[key] != targetStats[key]:
if whereClause == "":
whereClause = whereClause + envvars.list['where_column']+"=to_timestamp('"+key+"', 'yyyy-mm-dd hh24:mi:ss.FF')"
else:
whereClause = whereClause + " or " + envvars.list['where_column']+"=to_timestamp('"+key+"', 'yyyy-mm-dd hh24:mi:ss.FF')"
else:
if whereClause == "":
whereClause = whereClause + envvars.list['where_column']+"=to_timestamp('"+key+"', 'yyyy-mm-dd hh24:mi:ss.FF')"
else:
whereClause = whereClause + " or " + envvars.list['where_column']+"=to_timestamp('"+key+"', 'yyyy-mm-dd hh24:mi:ss.FF')"
dynamic_properties = '\n'.join(['\nenv=' + env,
'app=' + app,
'sub_app=' +sub_app,
'group=' +group,
'happ=' + envvars.list['happ'],
'where='+whereClause,
'log_file='+log_file,
'hdfs_load_ts=' + hdfs_load_ts])
elif ingest_type == 'incr':
if field is None:
print("run_ingest.py -> ERROR: Incremental SQOOP cannot be performed with out where column ")
return_code = 2
sys.exit(return_code)
print("run_ingest.py -> DownloadTyp: Partial Download based on where condition ")
# Check if the lower_date range is passed from jobnames.list file
if lower_bound is None or lower_bound == "":
# lower_date range is not found check for presence of exclusions file
lower_bound,upper_bound = get_exception_args(envvars.list['lfs_app_config'],table)
# lower_date is still none get lower date from impala table
if lower_bound is None and field is not None and db is not None:
lower_bound = get_min_bound_impala(db, table, field,field_type)
if lower_bound is None or lower_bound == "":
print("run_ingest.py -> LowerBound: Cannot be determined. Use Sync option")
return_code = 2
sys.exit(return_code)
else:
print("run_ingest.py -> LowerBound: Min date "+lower_bound+" is determined from Impala table")
elif lower_bound is None and field is None:
print("run_ingest.py -> Arguments error: lower_bound or field or entry in exception file is expected")
return_code = 2
sys.exit(return_code)
else:
print("run_ingest.py -> LowerBound : Min date is determined from exclusions file")
else:
print("run_ingest.py -> LowerBound : Min date is determined from jobnames.list file")
if upper_bound is None or upper_bound == "":
curr_dt = str(datetime.now().date())
if field.strip().lower() == "msrmnt_prd_id":
print "run_ingest.py -> Upper_bound : BDW table date used "+str(curr_dt)
upper_bound = get_bdw_date_from_id(db, curr_dt)
elif field_type.lower() == "timestamp":
upper_bound = str(datetime.strptime(str(datetime.now()), "%Y-%m-%d %H:%M:%S.%f"))
elif field_type.lower() == "int":
upper_bound = '99999999'
print("run_ingest.py -> UpperBound : is 99999999")
else:
upper_bound = curr_dt
print("run_ingest.py -> UpperBound : Max Date is current date")
else:
print("run_ingest.py -> UpperBound : Max Date source is same as Min date")
if field_type.strip().lower() == "timestamp" or field_type.lower() == "":
ingest_special_args = get_ingest_special_args(envvars.list['lfs_app_config'],table)
if "lower_bound_modifier_days" in ingest_special_args:
try:
val = int(ingest_special_args["lower_bound_modifier_days"].strip())
print("run_ingest.py -> LowerBound Modifier:"+str(val))
lower_bound = datetime.strptime(lower_bound, "%Y-%m-%d %H:%M:%S.%f") + timedelta(days=val)
lower_bound = str(lower_bound)
print("run_ingest.py -> LowerBound : updated to "+lower_bound+" from ingest_special.properties file")
except ValueError:
print("lower_bound_modifier is not an int! "+str(ingest_special_args["lower_bound_modifier_days"])+"!")
if field_type.lower() == "timestamp" and envvars.list['datasource'] == "oracle":
lower_bound_f = "to_timestamp('"+lower_bound+"','YYYY-MM-DD HH24:MI:SS.FF')"
upper_bound_f = "to_timestamp('"+upper_bound+"','YYYY-MM-DD HH24:MI:SS.FF')"
else:
lower_bound_f = lower_bound
upper_bound_f = upper_bound
dynamic_properties = '\n'.join(['\nenv=' + env,
'app=' + app,
'sub_app=' +sub_app,
'group=' +group,
'log_file='+log_file,
'happ=' + envvars.list['happ'] ,
'min_bound=' + lower_bound_f,
'max_bound=' + upper_bound_f,
'hdfs_load_ts=' + hdfs_load_ts])
if field_type.lower() == "int":
dynamic_properties = dynamic_properties + '\n ' + "where=${where_column} between ${min_bound} and ${max_bound}"
dynamic_properties = dynamic_properties + '\n ' + "where_hadoop=${where_column} between ${min_bound} and ${max_bound}"
abc_parameter = env+','+env_ver+','+app+','+sub_app+','+group+","+table+','+field+ lower_bound+"to"+upper_bound
elif field_type == None or field_type=="" or field_type.lower() == "date" or field_type.lower() == "timestamp":
field_rdbms_format=determine_default_field_format(field_rdbms_format)
field_hadoop_format=determine_default_field_format(field_hadoop_format)
if field_type.lower() == "timestamp":
field_rdbms_format = '%Y-%m-%d %H:%M:%S.%f'
field_hadoop_format = '%Y-%m-%d %H:%M:%S.%f'
lower_bound_validated=validate_date_format(lower_bound,field_rdbms_format)
upper_bound_validated=validate_date_format(upper_bound,field_rdbms_format)
lower_bound_hadoop=lower_bound_validated.strftime(field_hadoop_format)
upper_bound_hadoop=upper_bound_validated.strftime(field_hadoop_format)
dynamic_properties = '\n'.join([dynamic_properties,
'min_bound_hadoop=' + lower_bound_hadoop,
'max_bound_hadoop=' + upper_bound_hadoop])
if field_type.lower() == "timestamp" and envvars.list['datasource'] == "oracle":
dynamic_properties = dynamic_properties + '\n ' + "where=${where_column} between ${min_bound} and ${max_bound}"
dynamic_properties = dynamic_properties + '\n ' + "where_hadoop=${where_column} between '${min_bound_hadoop}' and '${max_bound_hadoop}'"
else:
dynamic_properties = dynamic_properties + '\n ' + "where=${where_column} between '${min_bound}' and '${max_bound}'"
dynamic_properties = dynamic_properties + '\n ' + "where_hadoop=${where_column} between '${min_bound_hadoop}' and '${max_bound_hadoop}'"
abc_parameter = env+','+env_ver+','+app+','+sub_app+','+group+","+table+','+field+ lower_bound_hadoop +"to"+upper_bound_hadoop
else:
dynamic_properties = dynamic_properties + '\n ' + "where=${where_column} between ${min_bound} and ${max_bound}"
dynamic_properties = dynamic_properties + '\n ' + "where_hadoop=${where_column} between ${min_bound} and ${max_bound}"
abc_parameter = env+','+env_ver+','+app+','+sub_app+','+group+","+table+','+field+ lower_bound+"to"+upper_bound
else:
print("run_ingest.py -> DownloadTyp: Full Download of table ")
dynamic_properties = '\n'.join(['\nenv=' + env,
'app=' + app,
'sub_app=' +sub_app,
'group=' +group,
'log_file='+log_file,
'happ=' + envvars.list['happ'] ,
'min_bound=' + "''",
'max_bound=' + "''",
'min_bound_hadoop=' + "''",
'max_bound_hadoop=' + "''",
'hdfs_load_ts=' + hdfs_load_ts])
dynamic_properties = dynamic_properties + '\n ' + "where=1=1"
if envvars.list['hive_query'].strip().lower() =='hv_ins_stg_fnl_audit.hql':
dynamic_properties = dynamic_properties + '\n ' + "where_hadoop=as_of_date="+hdfs_load_ts
else:
dynamic_properties = dynamic_properties + '\n ' + "where_hadoop=1=1"
abc_parameter = env+','+env_ver+','+app+','+sub_app+','+group+","+table +","
#ABC logging parameter for oozie
#print "env"+ env
#abc_parameter = env+','+env_ver+','+app+','+sub_app+','+group+","+table+','+field+ lower_bound_hadoop +"to"+upper_bound_hadoop
properties_file.write(dynamic_properties)
properties_file.close()
print("run_ingest.py -> CommnPrpty : " + common_properties)
print("run_ingest.py -> TablePrpty : " + table_properties)
print("run_ingest.py -> DynmcPrpty : " + dynamic_properties.replace("\n",", "))
print("run_ingest.py -> FinalPrpty : " + final_properties)
sys.stdout.flush()
# ABC Logging Started
parameter_string=""
if lower_bound is not None and lower_bound != "":
parameter_string = field +" "+lower_bound+ " "+upper_bound
comments = "Properties file name :" +final_properties
abc_line = "|".join([group,"run_ingest.py","python","run_job.py",str(table),parameter_string,"RUNNING",
getpass.getuser(),comments,str(datetime.today())])
print("**ABC_log**->"+abc_line)
abc_parameter = env+','+env_ver+','+app+','+sub_app+','+group+",run_ingest.py"
sys.stdout.flush()
rc = runoozieworkflow(final_properties,abc_parameter)
print "Return-Code:" + str(rc)
if rc > return_code:
return_code = rc
abc_line = "|".join([group,"run_ingest.py","python","run_job.py",str(table),parameter_string,"ENDED",
getpass.getuser(),"return-code:"+str(return_code),str(datetime.today())])
print("**ABC_log**->"+abc_line)
sys.stdout.flush()
print("run_ingest.py -> Ended : " + datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'))
print start_line
print "Return-Code:" + str(return_code)
sys.exit(return_code)
def runoozieworkflow(final_properties,abc_parameter):
#command to trigger oozie script
workflow = envvars.list['hdfs_app_workflows'] + '/wf_db_ingest'
oozie_wf_script = "python " + envvars.list['lfs_global_scripts'] + "/run_oozie_workflow.py " + workflow + ' ' + final_properties +' '+abc_parameter
print("run_ingest.py -> Invoked : " + oozie_wf_script)
sys.stdout.flush()
#rc,status = commands.getstatusoutput(oozie_wf_script)
#print(status)
call = subprocess.Popen(oozie_wf_script.split(' '),stdout=subprocess.PIPE,stderr=subprocess.STDOUT)
while True:
line = call.stdout.readline()
if not line:
break
print line.strip()
sys.stdout.flush()
call.communicate()
print "call returned"+str(call.returncode)
return call.returncode
def silentremove(filename):
try:
os.remove(filename)
except OSError as e: # this would be "except OSError, e:" before Python 2.6
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occured
def determine_default_field_format(field_format):
#print "field_format = ", field_format
if field_format==None or field_format.strip()=="":
field_format="%Y-%m-%d"
return field_format
def arg_handle():
usage = "usage: run_ingest.py [options]"
parser = OptionParser(usage)
parser.add_option("-i", "--op0",dest="ingest_type",
help="ingest type")
parser.add_option("-t", "--op1", dest="table",
help="environment name")
parser.add_option("-f", "--op2",dest="field",
help="increment field")
parser.add_option("-l", "--op3",dest="lower_bound",
help="increment field min bound")
parser.add_option("-u", "--op4",dest="upper_bound",
help="increment field max bound")
parser.add_option("--cmmn_dt", dest="common_date",
help="application name")
parser.add_option("-a", "--app", dest="app",
help="application name")
parser.add_option("-s", "--subapp", dest="sub_app",
help="application name")
parser.add_option("-e", "--env", dest="env",
help="environment name")
parser.add_option("-v", "--env_ver", dest="env_ver",
help="environment name")
parser.add_option("-g", "--group", dest="group",
help="environment name")
(options, args) = parser.parse_args()
print("run_ingest.py -> Input : " + str(options))
if options.table == "":
parser.error("Argument, table_name, is required.")
return_code = 10
sys.exit(return_code)
table = options.table.lower()
field = options.field
field_name_type_fmt = None
field_name = None
field_type = None
field_rdbms_format=None
field_hadoop_format=None
field_delimiter = "#"
#print "field = ", field
if field is not None:
field = field.replace('\s',' ')
field_name_type_fmt=field.split(field_delimiter)
#print "field_name_type_fmt = ", field_name_type_fmt
field_name=field_name_type_fmt[0]
field_type=""
#print "len field_name_type_fmt = ", len(field_name_type_fmt)
if len(field_name_type_fmt) >=2:
field_type=field_name_type_fmt[1]
if len(field_name_type_fmt) >=3:
field_rdbms_format=field_name_type_fmt[2]
if len(field_name_type_fmt) >=4:
field_hadoop_format=field_name_type_fmt[3]
source = '/cloudera_nfs1/config/oozie_global.properties'
lower_bound = options.lower_bound
if lower_bound is not None:
lower_bound = lower_bound.replace('\s',' ')
upper_bound = options.upper_bound
if upper_bound is not None:
upper_bound = upper_bound.replace('\s',' ')
group = options.group
abc_line = "|".join([group,"run_ingest.py","python","run_job.py",str(table),str(options),"STARTED",
getpass.getuser(),"run_ingest started..",str(datetime.today())])
print("**ABC_log**->"+abc_line)
sys.stdout.flush()
return table,field_name,field_type,field_rdbms_format,field_hadoop_format,lower_bound,upper_bound,source,options.app,options.sub_app,options.env,options.env_ver,group, options.ingest_type.lower(), options.common_date.strip()
def get_ingest_special_args(app_config_folder,table_name):
print("run_ingest.py -> Checking for special properties for table "+table_name+' in '+ app_config_folder+'/ingest_special.properties')
ingest_special_args = {}
try:
with open(app_config_folder+'/ingest_special.properties') as fin:
for line in fin:
args = line.split('|')
if args[0].strip().lower() == table_name:
print("run_ingest.py -> Spl Prp Fnd: "+line)
if len(args) >= 3:
ingest_special_args[args[1]] = args[2]
elif len(args) == 2:
ingest_special_args[args[1]] = ''
else:
print("run_ingest.py -> Spl Prp NF :No argument found for table"+table_name)
except IOError as e:
if e.errno != errno.ENOENT:
return_code = 10
raise IOError("exception file reading error")
sys.exit(return_code)
return ingest_special_args
def get_exception_args(app_config_folder,table_name):
try:
with open(app_config_folder+'/exception.properties') as fin:
for line in fin:
args = line.split(':')
if args[0].strip().lower() == table_name:
if len(args) < 2:
print("lower_bound: not defined in exception file")
return None, None
if len(args) == 2:
return args[1].strip(), args[1].strip()
else:
return args[1].strip(), args[2].strip()
return None, None
except IOError as e:
if e.errno != errno.ENOENT:
return_code = 10
raise IOError("exception file reading error")
sys.exit(return_code)
else:
return None, None
def get_bdw_date_from_id(db_name,curr_dt):
impala_cmd = envvars.list['impalaConnect'] +' "invalidate metadata ' + db_name + '.msrmnt_prd;select msrmnt_prd_id from '+db_name + ".msrmnt_prd where msrmnt_prd_dt = '"+curr_dt+"'"+';"'
rc, output = commands.getstatusoutput(impala_cmd)
outputlist = output.split('\n')
if rc == 0:
max_date_str = outputlist[-1].strip()
validate_int(max_date_str)
return max_date_str
else:
print "run_ingest.py -> ERROR : " + db_name + ".msrmnt_prd table needs to be sqooped first before determining the current partition value for BDW tables"
sys.exit(9)
return None
def get_bdw_id_from_date(db_name,prd_id):
impala_cmd = envvars.list['impalaConnect'] +' "invalidate metadata ' + db_name + '.msrmnt_prd;select msrmnt_prd_dt from '+db_name + ".msrmnt_prd where msrmnt_prd_id = "+prd_id+';"'
rc, output = commands.getstatusoutput(impala_cmd)
outputlist = output.split('\n')
if rc == 0:
max_date_str = outputlist[-1].strip()
validate_date(max_date_str)
return max_date_str
else:
print "run_ingest.py -> ERROR : " + db_name + ".msrmnt_prd table needs to be sqooped first before determining the current partition value for BDW tables"
sys.exit(9)
return None
def get_stats_sqoop(table,field):
#table_sqoop_query = envvars.list['lfs_app_workflows'] + '/wf_db_ingest/' + table + '.sqoop'
#query = ""
#with open (table_sqoop_query, "r") as myfile:
# query=myfile.read()
query = " ".join(['select count(*),' + field ,
'from ' + envvars.list['table'] ,
'where \$CONDITIONS'
'group by '+field,
'order by '+field])
targetDir = "temp/sqoop/datasource/"
status = os.system("hdfs dfs -rm -r " + targetDir)
sqoopParams="sqoop^import^-Dhadoop.security.credential.provider.path=" + envvars.list['password_key_provider'] + \
"^-Doozie.sqoop.log.level=ERROR" \
"^--connect^\"" + envvars.list['jdbc_connect'] + \
"\"^--query^\"" + query + "\"" \
"^--m^1^" + \
"--target-dir^" + targetDir + \
"^--username^" + envvars.list['username'] + \
"^--password-alias^\"" + envvars.list['password_alias'] + "\""
print("getmetadata.py -> Invoked : " + " ".join(sqoopParams.split('^')))
rc, status = commands.getstatusoutput(" ".join(sqoopParams.split('^')))
#while True:
# line = call.stdout.readline()
# if not line:
# break
# print line.strip()
# sys.stdout.flush()
#call.communicate()
#status=call.returncode
print status
print "getmetadata.py -> sqoop status = ", rc
if rc != 0:
print "getmetadata.py -> Getting Metadata failed..."
sys.exit(1)
#Get Final Properties final name and path from variables
outputFilePath = envvars.list['lfs_app_wrk'] + '/' + envvars.list['app'] + '_' + table + '.stats'
silentremove(outputFilePath)
#os.system( "rm " + outputFilePath);
os.system( "hdfs dfs -get " + targetDir + "/part-m-00000 " + outputFilePath)
os.system( "chmod 777 " + outputFilePath)
print("getmetadata.py -> MetadataLocation : " + outputFilePath)
result={}
with open(outputFilePath, "r") as f:
for line in f:
(val, key) = line.replace('\n','').split(',')
result[str(key)] = val
return result
def get_stats_impala(db_name, table_name,field):
impala_cmd = " ".join([envvars.list['impalaConnect'] ,
' "invalidate metadata ' ,
db_name + '.' + table_name ,
'; select count(*),' + field ,
'from ' + db_name + '.' + table_name ,
'group by '+field,
'order by '+field+'"'])
rc, output = commands.getstatusoutput(impala_cmd)
result={}
f = output.split('\n')
print("impala output:"+output+str(f))
f=f[4:]
for line in f:
(val, key) = line.split('\t')
result[str(key)] = val
return result
def get_min_bound_impala(db_name, table_name,field,field_type):
#if field_type == "timestamp":
# field_f = "to_date("+field+")"
#else:
field_f = field
impala_cmd = envvars.list['impalaConnect'] +' "invalidate metadata ' + db_name + '.' + table_name + '; select Max(' + field_f + ') from ' + db_name + '.' + table_name + '"'
rc, output = commands.getstatusoutput(impala_cmd)
outputlist = output.split('\n')
if rc == 0:
max_date_str = outputlist[-1].strip()
if field_type == 'int' and max_date_str == "":
max_date_str = "0"
return max_date_str
elif field_type == "" and max_date_str =="":
max_date_str = "1900-01-01"
return max_date_str
if field.strip().lower() == "msrmnt_prd_id":
min_bound = datetime.strptime(get_bdw_id_from_date(db_name, max_date_str), "%Y-%m-%d") + timedelta(days=1)
print "run_ingest.py -> Lower_bound : BDW table date used "+str(min_bound.date())
return get_bdw_date_from_id(db_name, str(min_bound.date()))
if field_type == 'int':
validate_int(max_date_str)
return max_date_str
else:
if field_type == "timestamp":
validate_timestamp(max_date_str)
min_bound = datetime.strptime(max_date_str, "%Y-%m-%d %H:%M:%S.%f") + timedelta(milliseconds=.001)
else:
validate_date(max_date_str)
min_bound = datetime.strptime(max_date_str, "%Y-%m-%d") + timedelta(days=1)
min_bound =min_bound.date()
return str(min_bound)
else:
return None
def validate_int(int_text):
try:
num=int(int_text)
except:
return_code = 10
raise ValueError("Incorrect data format, should be int but is " + max_date_str)
sys.exit(return_code)
def validate_ped(date_text):
try:
datetime.strptime(date_text, '%Y%m')
except ValueError:
return_code = 10
raise ValueError("Incorrect data format, should be YYYYMM/YYYY-MM-DD but is " + date_text)
sys.exit(return_code)
def validate_date(date_text):
try:
datetime.strptime(date_text, '%Y-%m-%d')
except ValueError:
validate_date_format(date_text,"%d-%b-%Y")
def validate_timestamp(date_text):
try:
datetime.strptime(date_text, '%Y-%m-%d %H:%M:%S.%f')
except ValueError:
return_code = 10
raise ValueError("Incorrect data format, should be YYYY-MM-DD HH:MI:SS.FF but is " + date_text)
sys.exit(return_code)
def validate_date_format(date_text,dateFormat):
try:
return datetime.strptime(date_text, dateFormat)
except ValueError:
print "validate_date_format():: ", date_text, " dateFormat=" , dateFormat
validate_ped(date_text)
if __name__ == "__main__":
main()
|
|
#!/usr/bin/env python
# Standard packages
import os
import sys
import getpass
import argparse
import configuration
import subprocess as sub
from toil.job import Job
from toil.job import JobException
from cassandra.auth import PlainTextAuthProvider
def run_and_log_command(command, logfile):
"""This function uses the python subprocess method to run the specified command and writes all error to the
specified logfile
:param command: The command-line command to execute.
:type name: str.
:param logfile: The logfile to output error messages to.
:type logfile: str.
:returns: Nothing
:raises: RuntimeError
"""
with open(logfile, "wb") as err:
sys.stdout.write("Executing {} and writing to logfile {}\n".format(command, logfile))
err.write("Command: {}\n".format(command))
p = sub.Popen(command, stdout=sub.PIPE, stderr=err, shell=True)
output = p.communicate()
code = p.returncode
if code:
raise RuntimeError("An error occurred when executing the commandline: {}. "
"Please check the logfile {} for details\n".format(command, logfile))
def spawn_batch_jobs(job):
"""
This is simply a placeholder root job for the workflow
"""
job.fileStore.logToMaster("Initializing workflow\n")
def run_fastqc(job, config, samples):
"""Run FastQC on provided FastQ files
:param config: The configuration dictionary.
:type config: dict.
:param samples: Samples dictionary
:type samples: str.
"""
job.fileStore.logToMaster("Running FastQC for all samples\n")
logfile = "fastqc.log"
fastq_files_list = list()
for sample in samples:
fastq_files_list.append(samples[sample]['fastq1'])
fastq_files_list.append(samples[sample]['fastq2'])
fastq_files_string = " ".join(fastq_files_list)
command = ["{}".format(config['fastqc']['bin']),
"{}".format(fastq_files_string),
"--extract"]
job.fileStore.logToMaster("FastQC Command: {}\n".format(command))
run_and_log_command(" ".join(command), logfile)
def run_bwa_mem(job, config, name, samples):
"""Run GATK's DiagnoseTargets against the supplied region
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param fastq1: Input FastQ File.
:type fastq1: str.
:param fastq2: Input FastQ File.
:type fastq2: str.
:returns: str -- Aligned and sorted BAM file name.
"""
job.fileStore.logToMaster("Running BWA for sample {}\n".format(name))
output_bam = "{}.bwa.sorted.bam".format(name)
temp = "{}.bwa.sort.temp".format(name)
logfile = "{}.bwa-align.log".format(name)
bwa_cmd = ["{}".format(config['bwa']['bin']),
"mem",
"-t",
"{}".format(config['bwa']['num_cores']),
"-M",
"-v",
"2",
"{}".format(config['reference']),
"{}".format(samples[name]['fastq1']),
"{}".format(samples[name]['fastq2'])]
view_cmd = ["{}".format(config['samtools']['bin']),
"view",
"-u",
"-"]
sort_cmd = ["{}".format(config['samtools']['bin']),
"sort",
"-@",
"{}".format(config['bwa']['num_cores']),
"-O",
"bam",
"-o",
"{}".format(output_bam),
"-T",
"{}".format(temp),
"-"]
command = "{} | {} | {}".format(" ".join(bwa_cmd), " ".join(view_cmd), " ".join(sort_cmd))
job.fileStore.logToMaster("BWA Command: {}\n".format(command))
run_and_log_command(command, logfile)
return output_bam
def add_or_replace_readgroups(job, config, name, input_bam):
"""Run Picard's AddOrReplaceReadGroups on the specified BAM
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The output bam file name.
"""
job.fileStore.logToMaster("Running AddOrReplaceReadGroups in sample: {}".format(name))
output_bam = "{}.rg.sorted.bam".format(name)
logfile = "{}.addreadgroups.log".format(name)
index_log = "{}.buildindex.log".format(name)
command = ["{}".format(config['picard-add']['bin']),
"AddOrReplaceReadGroups",
"INPUT={}".format(input_bam),
"OUTPUT={}".format(output_bam),
"RGID={}".format(name),
"RGSM={}".format(name),
"RGLB={}".format(name),
"RGPL=illumina",
"RGPU=miseq"]
command2 = ["{}".format(config['picard-add']['bin']),
"BuildBamIndex",
"INPUT={}".format(output_bam)]
job.fileStore.logToMaster("GATK AddOrReplaceReadGroupsCommand Command: {}\n".format(command))
run_and_log_command(" ".join(command), logfile)
job.fileStore.logToMaster("GATK BuildBamIndex Command: {}\n".format(command2))
run_and_log_command(" ".join(command2), index_log)
return output_bam
def realign_target_creator(job, config, name, input_bam):
"""Run GATK TargetCreator on the specified BAM to identify targets for realignment
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The file name of the targets file.
"""
targets = "{}.targets.intervals".format(name)
targets_log = "{}.targetcreation.log".format(name)
command = ["{}".format(config['gatk-realign']['bin']),
"-T",
"RealignerTargetCreator",
"-R",
"{}".format(config['reference']),
"-I",
"{}".format(input_bam),
"-o",
"{}".format(targets),
"-known",
"{}".format(config['indel1']),
"-known",
"{}".format(config['indel2']),
"-nt",
"{}".format(config['gatk-realign']['num_cores'])
]
job.fileStore.logToMaster("GATK RealignerTargetCreator Command: {}\n".format(command))
run_and_log_command(" ".join(command), targets_log)
return targets
def realign_indels(job, config, name, input_bam, targets):
"""Run GATK Indel Realignment on the specified BAM
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:param targets: The file name of targets to realign.
:type targets: str.
:returns: str -- The output bam file name.
"""
output_bam = "{}.realigned.sorted.bam".format(name)
realign_log = "{}.realignindels.log".format(name)
command = ["{}".format(config['gatk-realign']['bin']),
"-T",
"IndelRealigner",
"-R",
"{}".format(config['reference']),
"-I",
"{}".format(input_bam),
"-known",
"{}".format(config['indel1']),
"-known",
"{}".format(config['indel2']),
"-targetIntervals",
"{}".format(targets),
"--read_filter",
"NotPrimaryAlignment",
"-o",
"{}".format(output_bam)]
job.fileStore.logToMaster("GATK IndelRealigner Command: {}\n".format(command))
run_and_log_command(" ".join(command), realign_log)
return output_bam
def recalibrator(job, config, name, input_bam):
"""Run GATK Recalibrator on the specified BAM
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The output bam file name.
"""
output_bam = "{}.recalibrated.sorted.bam".format(name)
recal_config = "{}.recal".format(name)
recal_log = "{}.recalibrate.log".format(name)
print_log = "{}.printrecalibrated.log".format(name)
cp_log = "{}.copy.log".format(name)
# Calculate covariates
recal_commands = ["java -Xmx8g -jar /mnt/shared-data/anaconda2/envs/ddb/bin/GenomeAnalysisTK.jar",
#"{}".format(config['gatk-recal']['bin']),
"-T",
"BaseRecalibrator",
"-R",
"{}".format(config['reference']),
"-I",
"{}".format(input_bam),
"-o",
"{}".format(recal_config),
"--knownSites",
"{}".format(config['dbsnp']),
"-nct",
"{}".format(config['gatk-recal']['num_cores'])]
# Print recalibrated BAM
print_reads_command = ["{}".format(config['gatk-recal']['bin']),
"-T",
"PrintReads",
"-R",
"{}".format(config['reference']),
"-I",
"{}".format(input_bam),
"-o",
"{}".format(output_bam),
"-BQSR",
"{}".format(recal_config),
"-nct",
"{}".format(config['gatk-recal']['num_cores'])]
# Copy index to alternative name
cp_command = ["cp",
"{}.recalibrated.sorted.bai".format(name),
"{}.recalibrated.sorted.bam.bai".format(name)]
job.fileStore.logToMaster("GATK BaseRecalibrator Command: {}\n".format(recal_commands))
run_and_log_command(" ".join(recal_commands), recal_log)
job.fileStore.logToMaster("GATK PrintReads Command: {}\n".format(print_reads_command))
run_and_log_command(" ".join(print_reads_command), print_log)
job.fileStore.logToMaster("GATK Copy Command: {}\n".format(cp_command))
run_and_log_command(" ".join(cp_command), cp_log)
return output_bam
def sambamba_region_coverage(job, config, name, samples, input_bam):
"""Run SamBambam to calculate the coverage of targeted regions
:param config: The configuration dictionary.
:type config: dict.
:param name: sample/library name.
:type name: str.
:param input_bam: The input_bam file name to process.
:type samples: dict
:param samples: The samples configuration dictionary
:type input_bam: str.
:returns: str -- The output BED file name.
"""
output = "{}.sambamba_coverage.bed".format(name)
logfile = "{}.sambamba_coverage.log".format(name)
command = ["{}".format(config['sambamba']['bin']),
"depth region",
"-L",
"{}".format(samples[name]['regions']),
"-t",
"{}".format(config['sambamba']['num_cores']),
"-T",
"{}".format(config['coverage_threshold']),
"-T",
"{}".format(config['coverage_threshold2']),
"{}".format(input_bam),
">",
"{}".format(output)]
job.fileStore.logToMaster("SamBamba Coverage Command: {}\n".format(command))
run_and_log_command(" ".join(command), logfile)
return output
def freebayes_single(job, config, name, input_bam):
"""Run FreeBayes without a matched normal sample
:param config: The configuration dictionary.
:type config: dict.
:param name: sample name.
:type name: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The output vcf file name.
"""
freebayes_vcf = "{}.freebayes.vcf".format(name)
logfile = "{}.freebayes.log".format(name)
command = ["{}".format(config['freebayes']['bin']),
"--fasta-reference",
"{}".format(config['reference']),
"--min-alternate-fraction",
"{}".format(config['min_alt_af']),
"--pooled-discrete",
"--pooled-continuous",
"--genotype-qualities",
"--report-genotype-likelihood-max",
"--allele-balance-priors-off",
"--use-duplicate-reads",
"--min-repeat-entropy 1",
"-v",
"{}".format(freebayes_vcf),
"{}".format(input_bam)]
job.fileStore.logToMaster("FreeBayes Command: {}\n".format(command))
run_and_log_command(" ".join(command), logfile)
return freebayes_vcf
def mutect_single(job, config, name, samples, input_bam):
"""Run MuTect on an an unmatched tumour sample and call somatic variants
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param samples: samples configuration dictionary
:type samples: dict
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The output vcf file name.
"""
mutect_vcf = "{}.mutect.vcf".format(name)
temp_mutect = "{}.tempmutect.vcf".format(name)
output_stats = "{}.mutectstats.txt".format(name)
sample_coverage = "{}.mutectcoverage.wig.txt".format(name)
mutect_logfile = "{}.mutect.log".format(name)
subset_log = "{}.mutect_subset.log".format(name)
mutect_command = ["{}".format(config['mutect']['bin']),
"-T",
"MuTect",
"-R",
"{}".format(config['reference']),
"--dbsnp",
"{}".format(config['dbsnp']),
"--cosmic",
"{}".format(config['cosmic']),
"--enable_extended_output",
"-I:tumor",
"{}".format(input_bam),
"--coverage_file",
"{}".format(sample_coverage),
"-L",
"{}".format(samples[name]['regions']),
"-isr",
"INTERSECTION",
"-im",
"ALL",
"-dt",
"NONE",
"-o",
"{}".format(output_stats),
"-vcf",
"{}".format(temp_mutect)]
subset_command = ["cat",
"{}".format(temp_mutect),
"|",
"{}".format(config['vcftools_subset']['bin']),
"-e",
"-c",
"{}".format(name),
">",
"{}".format(mutect_vcf)]
job.fileStore.logToMaster("MuTect Command: {}\n".format(mutect_command))
run_and_log_command(" ".join(mutect_command), mutect_logfile)
job.fileStore.logToMaster("Subset Command: {}\n".format(subset_command))
run_and_log_command(" ".join(subset_command), subset_log)
return mutect_vcf
def platypus_single(job, config, name, samples, input_bam):
"""Run Platypus on an an unmatched tumour sample and call somatic variants
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The output vcf file name.
"""
platypus_vcf = "{}.platypus.vcf".format(name)
platypus_log = "{}.platypus.log".format(name)
internal_log = "{}.platypus_internal.log".format(name)
platypus_command = ["{}".format(config['platypus']['bin']),
"callVariants",
"--refFile={}".format(config['reference']),
"--regions={}".format(samples[name]['regions']),
"--assemble=1",
"--assembleBadReads=1",
"--assembleBrokenPairs=1",
"--filterDuplicates=0",
"--minVarFreq={}".format(config['min_alt_af']),
"--nCPU={}".format(config['platypus']['num_cores']),
"--logFileName={}".format(internal_log),
"--bamFiles={}".format(input_bam),
"--output={}".format(platypus_vcf)]
job.fileStore.logToMaster("Platypus Command: {}\n".format(platypus_command))
run_and_log_command(" ".join(platypus_command), platypus_log)
return platypus_vcf
def scalpel_single(job, config, name, samples, input_bam):
"""Run Scalpel on an an unmatched tumour sample and call somatic variants
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The output vcf file name.
"""
cwd = os.getcwd()
output_dir = os.path.join(cwd, "{}-scalpel-output".format(name))
scalpel_vcf = os.path.join(output_dir, "variants.indel.vcf")
fixed_vcf = "{}.scalpel.vcf".format(name)
logfile = "{}.scalpel.log".format(name)
logfile2 = "{}.scalpel_fix.log".format(name)
scalpel_command = ["{}".format(config['scalpel']['bin']),
"--single",
"--intarget",
# "--covthr",
# "3",
# "--lowcov",
# "1",
"--ref",
"{}".format(config['reference']),
"--bed",
"{}".format(samples[name]['regions']),
"--format",
"vcf",
"--numprocs",
"{}".format(config['scalpel']['num_cores']),
"--bam",
"{}".format(input_bam),
"--dir",
"{}".format(output_dir)]
fix_sample_name_command = ["cat",
"{}".format(scalpel_vcf),
"|",
"sed",
"'s/sample/{}/g'".format(name),
">",
"{}".format(fixed_vcf)]
job.fileStore.logToMaster("Scalpel Command: {}\n".format(scalpel_command))
run_and_log_command(" ".join(scalpel_command), logfile)
job.fileStore.logToMaster("Scalpel Fix Command: {}\n".format(fix_sample_name_command))
run_and_log_command(" ".join(fix_sample_name_command), logfile2)
file_path = os.path.join(cwd, fixed_vcf)
if os.path.exists(file_path) and os.path.getsize(file_path) > 0:
return scalpel_vcf
else:
job.fileStore.logToMaster("Scalpel ran into a problem and no output was generated for file {}. Check logfile"
"{} for details\n".format(scalpel_vcf, logfile))
return JobException("Scalpel ran into a problem and no output was generated for file {}. Check logfile"
"{} for details\n".format(scalpel_vcf, logfile))
def vardict_single(job, config, name, samples, input_bam):
"""Run VarDict on an an unmatched tumour sample and call somatic variants
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The output vcf file name.
"""
vardict_vcf = "{}.vardict.vcf".format(name)
logfile = "{}.vardict.log".format(name)
vardict = ["{}".format(config['vardict']['bin']),
"-G",
"{}".format(config['reference']),
"-z",
"-c",
"1",
"-S",
"2",
"-E",
"3",
"-g",
"4",
"-B",
"{}".format(config['vardict']['num_cores']),
# "-a", the amplicon flag seems to be creating errors
# "-F 0", Probably don't need this as duplicates aren't marked and ignoring secondary alignment good
"-f",
"{}".format(config['min_alt_af']),
"-N",
"{}".format(name),
"-b",
"{}".format(input_bam),
"{}".format(samples[name]['regions'])]
vardict2vcf = ["{}".format(config['vardict2vcf']['bin']),
"-E",
"-f",
"{}".format(config['min_alt_af']),
"-N",
"{}".format(name)]
vcfsort = ["{}".format(config['vcftools_sort']['bin']),
"-c"]
command = ("{vardict} | {strandbias} | {vardict2vcf} | "
"{sort} > {vcf}".format(vardict=" ".join(vardict), strandbias=config['vardict_strandbias']['bin'],
vardict2vcf=" ".join(vardict2vcf), sort=" ".join(vcfsort), vcf=vardict_vcf))
job.fileStore.logToMaster("VarDict Command: {}\n".format(command))
run_and_log_command(command, logfile)
return vardict_vcf
def run_pindel(job, config, name, input_bam):
"""Run Pindel caller for InDel Detection
:param config: The configuration dictionary.
:type config: dict.
:param name: sample name.
:type name: str..
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The output vcf file name.
"""
pindel_config = "{}.pindel_config.txt".format(name)
output_dir = "{}_pindel".format(name)
output_vcf = "{}.pindel.vcf".format(name)
logfile = "{}.pindel.log".format(name)
vcf_logfile = "{}.pindel2vcf.log".format(name)
with open(pindel_config, 'w') as bam_config:
bam_config.write("%s %s %s\n" % (input_bam, config['insert_size'], name))
command = ("{}".format(config['pindel']['bin']),
"-f",
"{}".format(config['reference']),
"-c",
"ALL",
"-w",
"{}".format(config['pindel']['window']),
"-E",
"{}".format(config['pindel']['sensitivity']),
"-T",
"{}".format(config['pindel']['num_cores']),
"-o",
"{}".format(output_dir),
"-i",
"{}".format(pindel_config))
pindel2vcf_command = ("{}".format(config['pindel2vcf']['bin']),
"-r",
"{}".format(config['reference']),
"-R",
"{}".format(config['snpeff']['reference']),
"-d",
"{}".format(config['snpeff']['reference']),
"-he",
"0.01",
"-G",
"-P",
"{}".format(output_dir),
"-v",
"{}".format(output_vcf))
job.fileStore.logToMaster("Pindel Command: {}\n".format(command))
run_and_log_command(" ".join(command), logfile)
job.fileStore.logToMaster("Pindel2vcf Command: {}\n".format(pindel2vcf_command))
run_and_log_command(" ".join(pindel2vcf_command), vcf_logfile)
return output_vcf
def vt_normalization(job, config, sample, caller, input_vcf):
"""Decompose and left normalize variants
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param sample: caller name.
:type sample: str.
:param input_vcf: The input_vcf file name to process.
:type input_vcf: str.
:returns: str -- The output vcf file name.
"""
output_vcf = "{}.{}.normalized.vcf".format(sample, caller)
logfile = "{}.{}.vt_normalization.log".format(sample, caller)
normalization = ["zless",
"{}".format(input_vcf),
"|",
"sed",
"'s/ID=AD,Number=./ID=AD,Number=R/'",
"|",
"{}".format(config['vt']['bin']),
"decompose",
"-s",
"-",
"|",
"{}".format(config['vt']['bin']),
"normalize",
"-r",
"{}".format(config['reference']),
"-",
">",
"{}".format(output_vcf)]
job.fileStore.logToMaster("VT Command: {}\n".format(normalization))
run_and_log_command(" ".join(normalization), logfile)
return output_vcf
def merge_variant_calls(job, config, sample, callers, vcf_files):
"""Merge variant calls from multiple variant callers
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param callers: Comma-separated list of VCF callers to tag the ensemble output. Must be in same order as vcf_files.
:type sample: str.
:param vcf_files: List of input vcf files for merging.
:type vcf_files: list.
:returns: str -- The output vcf file name.
"""
merged_vcf = "{}.merged.vcf.gz".format(sample)
uncompressed_vcf = "{}.merged.vcf".format(sample)
sorted_vcf = "{}.merged.sorted.vcf".format(sample)
logfile1 = "{}.merging.log".format(sample)
logfile2 = "{}.uncompress-merging.log".format(sample)
logfile3 = "{}.merged_sort.log".format(sample)
vcf_files_string = " ".join(vcf_files)
command = ["{}".format(config['ensemble']['bin']),
"ensemble",
"-c",
"{}".format(config['ensemble']['num_cores']),
"--numpass",
"1",
"--names",
"{}".format(callers),
"{}".format(merged_vcf),
"{}".format(config['reference']),
"{}".format(vcf_files_string)]
command2 = ["bgzip",
"-cd",
"{}".format(merged_vcf),
">",
"{}".format(uncompressed_vcf)]
command3 = ["{}".format(config['picard']['bin']),
"SortVcf",
"SEQUENCE_DICTIONARY={}".format(config['dict']),
"OUTPUT={}".format(sorted_vcf),
"INPUT={}".format(uncompressed_vcf)]
sys.stderr.write("Running commands: \n")
sys.stderr.write("bcbio-variation-recall Command: {}\n".format(command))
sys.stderr.write("Uncompression Command: {}\n".format(command2))
sys.stderr.write("Sort Command: {}\n".format(command3))
job.fileStore.logToMaster("bcbio-variation-recall Command: {}\n".format(command))
run_and_log_command(" ".join(command), logfile1)
job.fileStore.logToMaster("Uncompression Command: {}\n".format(command2))
run_and_log_command(" ".join(command2), logfile2)
job.fileStore.logToMaster("Sort Command: {}\n".format(command3))
run_and_log_command(" ".join(command3), logfile3)
# The Index file created by Picard often causes problems with the GATK
index_file = "{}.idx".format(sorted_vcf)
os.remove(index_file)
return sorted_vcf
def annotate_vcf(job, config, name, input_vcf, input_bam):
"""Run GATK's VariantAnnotation on the specified VCF
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_vcf: The input_vcf file name to process.
:type input_vcf: str.
:param input_bam: The input_bam file name to process.
:type input_bam: str.
:returns: str -- The output vcf file name.
"""
output_vcf = "{}.annotated.vcf".format(name)
annotation_logfile = "{}.variantannotation.log".format(name)
annotation_command = ["{}".format(config['gatk-annotate']['bin']),
"-T",
"VariantAnnotator",
"-R",
"{}".format(config['reference']),
"-nt",
"{}".format(config['gatk-annotate']['num_cores']),
"--group",
"StandardAnnotation",
"--dbsnp",
"{}".format(config['dbsnp']),
"-I",
"{}".format(input_bam),
"--variant",
"{}".format(input_vcf),
"-L",
"{}".format(input_vcf),
"-o",
"{}".format(output_vcf)]
job.fileStore.logToMaster("GATK VariantAnnotator Command: {}\n".format(annotation_command))
run_and_log_command(" ".join(annotation_command), annotation_logfile)
return output_vcf
def filter_variants(job, config, name, input_vcf):
"""Run GATK's VariantFilter on the specified VCF
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_vcf: The input_vcf file name to process.
:type input_vcf: str.
:returns: str -- The output vcf file name.
"""
output_vcf = "{}.filtered.vcf".format(name)
filter_log = "{}.variantfiltration.log".format(name)
filter_command = ["{}".format(config['gatk-filter']['bin']),
"-T",
"VariantFiltration",
"-R",
"{}".format(config['reference']),
"--filterExpression",
"'MQ0 > {}'".format(config['mq0_threshold']),
"--filterName",
"'HighMQ0'",
"--filterExpression",
"'DP < {}'".format(config['coverage_threshold']),
"--filterName",
"'LowDepth'",
"--filterExpression",
"'QUAL < {}'".format(config['var_qual_threshold']),
"--filterName",
"'LowQual'",
"--filterExpression",
"'MQ < {}'".format(config['map_qual_threshold']),
"--filterName",
"'LowMappingQual'",
"--variant",
"{}".format(input_vcf),
"-o",
"{}".format(output_vcf)]
job.fileStore.logToMaster("GATK VariantFiltration Command: {}\n".format(filter_command))
run_and_log_command(" ".join(filter_command), filter_log)
return output_vcf
def snpeff(job, config, name, input_vcf):
"""Annotate the specified VCF using snpEff
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_vcf: The input_vcf file name to process.
:type input_vcf: str.
:returns: str -- The output vcf file name.
"""
output_vcf = "{}.snpEff.{}.vcf".format(name, config['snpeff']['reference'])
logfile = "{}.snpeff.log".format(name)
snpeff_command = ["{}".format(config['snpeff']['bin']),
"-Xmx{}g".format(config['snpeff']['max_mem']),
"-onlyTr {}".format(config['transcripts']),
"-v",
"{}".format(config['snpeff']['reference']),
"{}".format(input_vcf),
">"
"{}".format(output_vcf)]
job.fileStore.logToMaster("snpEff Command: {}\n".format(snpeff_command))
run_and_log_command(" ".join(snpeff_command), logfile)
return output_vcf
def vcfanno(job, config, name, samples, input_vcf):
"""Take the specified VCF and use vcfanno to add additional annotations
:param config: The configuration dictionary.
:type config: dict.
:param sample: sample name.
:type sample: str.
:param input_vcf: The input_vcf file name to process.
:type input_vcf: str.
:returns: str -- The output vcf file name.
"""
output_vcf = "{}.vcfanno.snpEff.{}.vcf".format(name, config['snpeff']['reference'])
logfile = "{}.vcfanno.log".format(name)
command = ["{}".format(config['vcfanno']['bin']),
"-p",
"{}".format(config['vcfanno']['num_cores']),
"--lua",
"{}".format(config['vcfanno']['lua']),
"{}".format(samples[name]['vcfanno_config']),
"{}".format(input_vcf),
">",
"{}".format(output_vcf)]
job.fileStore.logToMaster("VCFAnno Command: {}\n".format(command))
run_and_log_command(" ".join(command), logfile)
return output_vcf
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--samples_file', help="Input configuration file for samples")
parser.add_argument('-c', '--configuration', help="Configuration file for various settings")
parser.add_argument('-a', '--address', help="IP Address for Cassandra connection", default='127.0.0.1')
parser.add_argument('-u', '--username', help='Cassandra username for login', default=None)
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
args.logLevel = "INFO"
cwd = os.getcwd()
sys.stdout.write("Setting up analysis directory\n")
# if not os.path.exists("Logs"):
# os.makedirs("Logs")
# if not os.path.exists("FinalVCFs"):
# os.makedirs("FinalVCFs")
# if not os.path.exists("FinalBAMs"):
# os.makedirs("FinalBAMs")
# if not os.path.exists("Intermediates"):
# os.makedirs("Intermediates")
# if not os.path.exists("Coverage"):
# os.makedirs("Coverage")
# if not os.path.exists("Reports"):
# os.makedirs("Reports")
sys.stdout.write("Parsing configuration data\n")
config = configuration.configure_runtime(args.configuration)
sys.stdout.write("Parsing sample data\n")
samples = configuration.configure_samples(args.samples_file, config)
if args.username:
password = getpass.getpass()
auth_provider = PlainTextAuthProvider(username=args.username, password=password)
else:
auth_provider = None
# Workflow Graph definition. The following workflow definition should create a valid Directed Acyclic Graph (DAG)
root_job = Job.wrapJobFn(spawn_batch_jobs, cores=1)
fastqc_job = Job.wrapJobFn(run_fastqc, config, samples)
# Per sample jobs
for sample in samples:
# Alignment and Refinement Stages
align_job = Job.wrapJobFn(run_bwa_mem, config, sample, samples,
cores=int(config['bwa']['num_cores']),
memory="{}G".format(config['bwa']['max_mem']))
add_job = Job.wrapJobFn(add_or_replace_readgroups, config, sample, align_job.rv(),
cores=1,
memory="{}G".format(config['picard-add']['max_mem']))
creator_job = Job.wrapJobFn(realign_target_creator, config, sample, add_job.rv(),
cores=int(config['gatk-realign']['num_cores']),
memory="{}G".format(config['gatk-realign']['max_mem']))
realign_job = Job.wrapJobFn(realign_indels, config, sample, add_job.rv(), creator_job.rv(),
cores=1,
memory="{}G".format(config['gatk-realign']['max_mem']))
recal_job = Job.wrapJobFn(recalibrator, config, sample, realign_job.rv(),
cores=int(config['gatk-recal']['num_cores']),
memory="{}G".format(config['gatk-recal']['max_mem']))
coverage_job = Job.wrapJobFn(sambamba_region_coverage, config, sample, samples,
"{}.recalibrated.sorted.bam".format(sample),
cores=int(config['gatk']['num_cores']),
memory="{}G".format(config['gatk']['max_mem']))
# Variant Calling
spawn_variant_job = Job.wrapJobFn(spawn_batch_jobs)
freebayes_job = Job.wrapJobFn(freebayes_single, config, sample,
"{}.recalibrated.sorted.bam".format(sample),
cores=1,
memory="{}G".format(config['freebayes']['max_mem']))
mutect_job = Job.wrapJobFn(mutect_single, config, sample, samples,
"{}.recalibrated.sorted.bam".format(sample),
cores=1,
memory="{}G".format(config['mutect']['max_mem']))
vardict_job = Job.wrapJobFn(vardict_single, config, sample, samples,
"{}.recalibrated.sorted.bam".format(sample),
cores=int(config['vardict']['num_cores']),
memory="{}G".format(config['vardict']['max_mem']))
scalpel_job = Job.wrapJobFn(scalpel_single, config, sample, samples,
"{}.recalibrated.sorted.bam".format(sample),
cores=int(config['scalpel']['num_cores']),
memory="{}G".format(config['scalpel']['max_mem']))
platypus_job = Job.wrapJobFn(platypus_single, config, sample, samples,
"{}.recalibrated.sorted.bam".format(sample),
cores=int(config['platypus']['num_cores']),
memory="{}G".format(config['platypus']['max_mem']))
pindel_job = Job.wrapJobFn(run_pindel, config, sample,
"{}.recalibrated.sorted.bam".format(sample),
cores=int(config['pindel']['num_cores']),
memory="{}G".format(config['pindel']['max_mem']))
# Need to filter for on target only results somewhere as well
spawn_normalization_job = Job.wrapJobFn(spawn_batch_jobs)
normalization_job1 = Job.wrapJobFn(vt_normalization, config, sample, "freebayes",
"{}.freebayes.vcf".format(sample),
cores=1,
memory="{}G".format(config['gatk']['max_mem']))
normalization_job2 = Job.wrapJobFn(vt_normalization, config, sample, "mutect",
"{}.mutect.vcf".format(sample),
cores=1,
memory="{}G".format(config['gatk']['max_mem']))
normalization_job3 = Job.wrapJobFn(vt_normalization, config, sample, "vardict",
"{}.vardict.vcf".format(sample),
cores=1,
memory="{}G".format(config['gatk']['max_mem']))
normalization_job4 = Job.wrapJobFn(vt_normalization, config, sample, "scalpel",
"{}.scalpel.vcf".format(sample),
cores=1,
memory="{}G".format(config['gatk']['max_mem']))
normalization_job5 = Job.wrapJobFn(vt_normalization, config, sample, "platypus",
"{}.platypus.vcf".format(sample),
cores=1,
memory="{}G".format(config['gatk']['max_mem']))
normalization_job6 = Job.wrapJobFn(vt_normalization, config, sample, "pindel",
"{}.pindel.vcf".format(sample),
cores=1,
memory="{}G".format(config['gatk']['max_mem']))
callers = "freebayes,mutect,vardict,scalpel,platypus,pindel"
merge_job = Job.wrapJobFn(merge_variant_calls, config, sample, callers, (normalization_job1.rv(),
normalization_job2.rv(),
normalization_job3.rv(),
normalization_job4.rv(),
normalization_job5.rv(),
normalization_job6.rv()))
gatk_annotate_job = Job.wrapJobFn(annotate_vcf, config, sample, merge_job.rv(),
"{}.recalibrated.sorted.bam".format(sample),
cores=int(config['gatk-annotate']['num_cores']),
memory="{}G".format(config['gatk-annotate']['max_mem']))
gatk_filter_job = Job.wrapJobFn(filter_variants, config, sample, gatk_annotate_job.rv(),
cores=1,
memory="{}G".format(config['gatk-filter']['max_mem']))
snpeff_job = Job.wrapJobFn(snpeff, config, sample, "{}.filtered.vcf".format(sample),
cores=int(config['snpeff']['num_cores']),
memory="{}G".format(config['snpeff']['max_mem']))
vcfanno_job = Job.wrapJobFn(vcfanno, config, sample, samples,
"{}.snpEff.{}.vcf".format(sample, config['snpeff']['reference']),
cores=int(config['vcfanno']['num_cores']),
memory="{}G".format(config['vcfanno']['max_mem']))
# Create workflow from created jobs
root_job.addChild(align_job)
# align_job.addChild(add_job)
# add_job.addChild(creator_job)
# creator_job.addChild(realign_job)
# realign_job.addChild(recal_job)
#
# recal_job.addChild(spawn_variant_job)
#
# spawn_variant_job.addChild(coverage_job)
# spawn_variant_job.addChild(freebayes_job)
# spawn_variant_job.addChild(mutect_job)
# spawn_variant_job.addChild(vardict_job)
# spawn_variant_job.addChild(scalpel_job)
# spawn_variant_job.addChild(platypus_job)
# spawn_variant_job.addChild(pindel_job)
#
# spawn_variant_job.addFollowOn(spawn_normalization_job)
#
# spawn_normalization_job.addChild(normalization_job1)
# spawn_normalization_job.addChild(normalization_job2)
# spawn_normalization_job.addChild(normalization_job3)
# spawn_normalization_job.addChild(normalization_job4)
# spawn_normalization_job.addChild(normalization_job5)
# spawn_normalization_job.addChild(normalization_job6)
#
# spawn_normalization_job.addFollowOn(merge_job)
#
# merge_job.addChild(gatk_annotate_job)
# gatk_annotate_job.addChild(gatk_filter_job)
# gatk_filter_job.addChild(snpeff_job)
# snpeff_job.addChild(vcfanno_job)
# root_job.addFollowOn(fastqc_job)
# Start workflow execution
Job.Runner.startToil(root_job, args)
|
|
"""Timeseries plotting functions."""
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats, interpolate
import matplotlib as mpl
import matplotlib.pyplot as plt
from six import string_types
import moss
from seaborn.utils import color_palette
def tsplot(data, time=None, unit=None, condition=None, value=None,
err_style="ci_band", ci=68, interpolate=True, color=None,
estimator=np.mean, n_boot=5000, err_palette=None, err_kws=None,
legend=True, ax=None, **kwargs):
"""Plot one or more timeseries with flexible representation of uncertainty.
This function can take data specified either as a long-form (tidy)
DataFrame or as an ndarray with dimensions for sampling unit, time, and
(optionally) condition. The interpretation of some of the other parameters
changes depending on the type of object passed as data.
Parameters
----------
data : DataFrame or ndarray
Data for the plot. Should either be a "long form" dataframe or an
array with dimensions (unit, time, condition). In both cases, the
condition field/dimension is optional. The type of this argument
determines the interpretation of the next few parameters.
time : string or series-like
Either the name of the field corresponding to time in the data
DataFrame or x values for a plot when data is an array. If a Series,
the name will be used to label the x axis.
value : string
Either the name of the field corresponding to the data values in
the data DataFrame (i.e. the y coordinate) or a string that forms
the y axis label when data is an array.
unit : string
Field in the data DataFrame identifying the sampling unit (e.g.
subject, neuron, etc.). The error representation will collapse over
units at each time/condition observation. This has no role when data
is an array.
condition : string or Series-like
Either the name of the field identifying the condition an observation
falls under in the data DataFrame, or a sequence of names with a length
equal to the size of the third dimension of data. There will be a
separate trace plotted for each condition. If condition is a Series
with a name attribute, the name will form the title for the plot
legend (unless legend is set to False).
err_style : string or list of strings or None
Names of ways to plot uncertainty across units from set of
{ci_band, ci_bars, boot_traces, book_kde, unit_traces, unit_points}.
Can use one or more than one method.
ci : float or list of floats in [0, 100]
Confidence interaval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
interpolate : boolean
Whether to do a linear interpolation between each timepoint when
plotting. The value of this parameter also determines the marker
used for the main plot traces, unless marker is specified as a keyword
argument.
color : seaborn palette or matplotlib color name or dictionary
Palette or color for the main plots and error representation (unless
plotting by unit, which can be separately controlled with err_palette).
If a dictionary, should map condition name to color spec.
estimator : callable
Function to determine central tendency and to pass to bootstrap
must take an ``axis`` argument.
n_boot : int
Number of bootstrap iterations.
err_palette: seaborn palette
Palette name or list of colors used when plotting data for each unit.
err_kws : dict, optional
Keyword argument dictionary passed through to matplotlib function
generating the error plot,
ax : axis object, optional
Plot in given axis; if None creates a new figure
kwargs :
Other keyword arguments are passed to main plot() call
Returns
-------
ax : matplotlib axis
axis with plot data
"""
# Sort out default values for the parameters
if ax is None:
ax = plt.gca()
if err_kws is None:
err_kws = {}
# Handle different types of input data
if isinstance(data, pd.DataFrame):
xlabel = time
ylabel = value
# Condition is optional
if condition is None:
condition = pd.Series(np.ones(len(data)))
legend = False
legend_name = None
n_cond = 1
else:
legend = True and legend
legend_name = condition
n_cond = len(data[condition].unique())
else:
data = np.asarray(data)
# Data can be a timecourse from a single unit or
# several observations in one condition
if data.ndim == 1:
data = data[np.newaxis, :, np.newaxis]
elif data.ndim == 2:
data = data[:, :, np.newaxis]
n_unit, n_time, n_cond = data.shape
# Units are experimental observations. Maybe subjects, or neurons
if unit is None:
units = np.arange(n_unit)
unit = "unit"
units = np.repeat(units, n_time * n_cond)
ylabel = None
# Time forms the xaxis of the plot
if time is None:
times = np.arange(n_time)
else:
times = np.asarray(time)
xlabel = None
if hasattr(time, "name"):
xlabel = time.name
time = "time"
times = np.tile(np.repeat(times, n_cond), n_unit)
# Conditions split the timeseries plots
if condition is None:
conds = range(n_cond)
legend = False
if isinstance(color, dict):
err = "Must have condition names if using color dict."
raise ValueError(err)
else:
conds = np.asarray(condition)
legend = True and legend
if hasattr(condition, "name"):
legend_name = condition.name
else:
legend_name = None
condition = "cond"
conds = np.tile(conds, n_unit * n_time)
# Value forms the y value in the plot
if value is None:
ylabel = None
else:
ylabel = value
value = "value"
# Convert to long-form DataFrame
data = pd.DataFrame(dict(value=data.ravel(),
time=times,
unit=units,
cond=conds))
# Set up the err_style and ci arguments for teh loop below
if isinstance(err_style, string_types):
err_style = [err_style]
elif err_style is None:
err_style = []
if not hasattr(ci, "__iter__"):
ci = [ci]
# Set up the color palette
if color is None:
colors = color_palette()
elif isinstance(color, dict):
colors = [color[c] for c in data[condition].unique()]
else:
try:
colors = color_palette(color, n_cond)
except ValueError:
color = mpl.colors.colorConverter.to_rgb(color)
colors = [color] * n_cond
# Do a groupby with condition and plot each trace
for c, (cond, df_c) in enumerate(data.groupby(condition, sort=False)):
df_c = df_c.pivot(unit, time, value)
x = df_c.columns.values.astype(np.float)
# Bootstrap the data for confidence intervals
boot_data = moss.bootstrap(df_c.values, n_boot=n_boot,
axis=0, func=estimator)
cis = [moss.ci(boot_data, v, axis=0) for v in ci]
central_data = estimator(df_c.values, axis=0)
# Get the color for this condition
color = colors[c]
# Use subroutines to plot the uncertainty
for style in err_style:
# Allow for null style (only plot central tendency)
if style is None:
continue
# Grab the function from the global environment
try:
plot_func = globals()["_plot_%s" % style]
except KeyError:
raise ValueError("%s is not a valid err_style" % style)
# Possibly set up to plot each observation in a different color
if err_palette is not None and "unit" in style:
orig_color = color
color = color_palette(err_palette, len(df_c.values))
# Pass all parameters to the error plotter as keyword args
plot_kwargs = dict(ax=ax, x=x, data=df_c.values,
boot_data=boot_data,
central_data=central_data,
color=color, err_kws=err_kws)
# Plot the error representation, possibly for multiple cis
for ci_i in cis:
plot_kwargs["ci"] = ci_i
plot_func(**plot_kwargs)
if err_palette is not None and "unit" in style:
color = orig_color
# Plot the central trace
marker = kwargs.pop("marker", "" if interpolate else "o")
linestyle = kwargs.pop("linestyle", "-" if interpolate else "")
label = kwargs.pop("label", cond if legend else "_nolegend_")
ax.plot(x, central_data, color=color, label=label,
marker=marker, linestyle=linestyle, **kwargs)
# Pad the sides of the plot only when not interpolating
ax.set_xlim(x.min(), x.max())
x_diff = x[1] - x[0]
if not interpolate:
ax.set_xlim(x.min() - x_diff, x.max() + x_diff)
# Add the plot labels
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if legend:
ax.legend(loc=0, title=legend_name)
return ax
# Subroutines for tsplot errorbar plotting
# ----------------------------------------
def _plot_ci_band(ax, x, ci, color, err_kws, **kwargs):
"""Plot translucent error bands around the central tendancy."""
low, high = ci
if "alpha" not in err_kws:
err_kws["alpha"] = 0.2
ax.fill_between(x, low, high, color=color, **err_kws)
def _plot_ci_bars(ax, x, central_data, ci, color, err_kws, **kwargs):
"""Plot error bars at each data point."""
for x_i, y_i, (low, high) in zip(x, central_data, ci.T):
ax.plot([x_i, x_i], [low, high], color=color,
solid_capstyle="round", **err_kws)
def _plot_boot_traces(ax, x, boot_data, color, err_kws, **kwargs):
"""Plot 250 traces from bootstrap."""
if "alpha" not in err_kws:
err_kws["alpha"] = 0.25
if "lw" in err_kws:
err_kws["linewidth"] = err_kws.pop("lw")
if "linewidth" not in err_kws:
err_kws["linewidth"] = 0.25
ax.plot(x, boot_data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_traces(ax, x, data, ci, color, err_kws, **kwargs):
"""Plot a trace for each observation in the original data."""
if isinstance(color, list):
if "alpha" not in err_kws:
err_kws["alpha"] = .5
for i, obs in enumerate(data):
ax.plot(x, obs, color=color[i], label="_nolegend_", **err_kws)
else:
if "alpha" not in err_kws:
err_kws["alpha"] = .2
ax.plot(x, data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_points(ax, x, data, color, err_kws, **kwargs):
"""Plot each original data point discretely."""
if isinstance(color, list):
for i, obs in enumerate(data):
ax.plot(x, obs, "o", color=color[i], alpha=0.8, markersize=4,
label="_nolegend_", **err_kws)
else:
ax.plot(x, data.T, "o", color=color, alpha=0.5, markersize=4,
label="_nolegend_", **err_kws)
def _plot_boot_kde(ax, x, boot_data, color, **kwargs):
"""Plot the kernal density estimate of the bootstrap distribution."""
kwargs.pop("data")
_ts_kde(ax, x, boot_data, color, **kwargs)
def _plot_unit_kde(ax, x, data, color, **kwargs):
"""Plot the kernal density estimate over the sample."""
_ts_kde(ax, x, data, color, **kwargs)
def _ts_kde(ax, x, data, color, **kwargs):
"""Upsample over time and plot a KDE of the bootstrap distribution."""
kde_data = []
y_min, y_max = data.min(), data.max()
y_vals = np.linspace(y_min, y_max, 100)
upsampler = interpolate.interp1d(x, data)
data_upsample = upsampler(np.linspace(x.min(), x.max(), 100))
for pt_data in data_upsample.T:
pt_kde = stats.kde.gaussian_kde(pt_data)
kde_data.append(pt_kde(y_vals))
kde_data = np.transpose(kde_data)
rgb = mpl.colors.ColorConverter().to_rgb(color)
img = np.zeros((kde_data.shape[0], kde_data.shape[1], 4))
img[:, :, :3] = rgb
kde_data /= kde_data.max(axis=0)
kde_data[kde_data > 1] = 1
img[:, :, 3] = kde_data
ax.imshow(img, interpolation="spline16", zorder=2,
extent=(x.min(), x.max(), y_min, y_max),
aspect="auto", origin="lower")
|
|
import networkx as nx
import numpy as np
from numpy import log, exp
import scipy as sp
import pymc
from scipy import stats
import emcee
import random
import matplotlib as mpl
import matplotlib.pyplot as plt
from itertools import product
from sklearn.metrics import mutual_info_score
from scipy.stats import gaussian_kde
from mpl_toolkits.mplot3d import Axes3D
from itertools import *
#from NPEET import mi
from math import sqrt
import math
from itertools import repeat
import heapq
import pandas as pd
from collections import deque
from .information import mutual_information
from .utility import *
from .information import pairwise_mutual_info, mutual_information, conditional_mutual_information, discrete_mutual_information
from .representation import DGM
def build_pmap_skeleton(names, ci_test, d=None):
"""
Build the skeleton of the P-map using the witness sets.
:param names: variable names
:param ci_test: a conditional independence oracle of the form (name1, name2, witness -> bool)
:param d: maximum number of parents in graph
:return: P-map skeleton graph, set of witnesses
"""
if d is None:
d = len(names) - 1
G = nx.Graph()
G.add_nodes_from(names)
G.add_edges_from(combinations(names, 2))
witnesses = {}
for x, y in combinations(names, 2):
print(x, y)
x_neigh = list(G.neighbors(x))
y_neigh = list(G.neighbors(y))
for witness in chain(*([combinations(x_neigh, i) for i in range(1, 1 + min(len(x_neigh), d))] + \
[combinations(y_neigh, i) for i in range(1, 1 + min(len(y_neigh), d))])):
if ci_test(x, y, witness):
witnesses[x, y] = witness
G.remove_edge(x, y)
break
return G, witnesses
def info_ci_test(x, y, witness, treshold=None, data=None):
"""
Conditional independence test based on conditional mutual information
:param x: first variable name
:param y: second variable name
:param witness: witness set
:param treshold: treshold for mutual information
:param data: the dataset
:return: are the variables independent
"""
return conditional_mutual_information(data, x, witness, y) < treshold
def chow_liu(X):
"""
Chow-Liu structure learning algorithm.
:param X: dataset
:return: the learned graph (tree)
"""
n_objects = X.shape[0]
n_vars = X.shape[1]
g = nx.complete_graph(n_vars)
for i, j in g.edges():
g.edge[i][j]['mutual information'] = mutual_info_score(X[:, i], X[:, j])
g = maximum_spanning_tree(g, weight='mutual information')
return g
class BIC_score:
"""
BIC (Bayes Information Criterion) score of DGM graph. All terms which don't depend
on the network structure are thrown out.
"""
def __init__(self, G, data):
self.G = G
self.data = data
self.cache = { }
self.m = self.data.shape[0]
self.cardinality = { x : max(self.data[x]) + 1 for x in self.G.nodes() }
print(self.cardinality)
self.value = sum([self.famscore(node) for node in self.G.nodes()])
def famscore(self, node):
"""
Family score of a single node. BIC score is decomposable, so total score is the sum
of family scores of all nodes.
"""
def dimension(node):
"""
Number of free parameters in node CPD.
"""
result = self.cardinality[node]
for parent in self.G.predecessors(node):
result *= self.cardinality[parent]
return result - 1
if node in self.cache:
return self.cache[node]
dim = dimension(node)
result = - dim * log(self.m) / 2
if self.G.in_degree(node) > 0:
result += self.m * discrete_mutual_information(self.data[[node]].values, self.data[list(self.G.predecessors(node))].values)
self.cache[node] = result
return result
def invalidate(self, node):
"""
Notifies score calculator that the family of the node has changed, in order to update its
score.
"""
if node in self.cache:
self.value -= self.cache[node]
self.cache.pop(node)
self.value += self.famscore(node)
def reset(self):
"""
Recalculate all the scores.
:return: None
"""
for node in self.G.nodes():
self.invalidate(node)
return None
class StrucutreSearchEdgeOp:
def __init__(self, G, scoring):
self.G = G
self.scoring = scoring
def __call__(self):
G = self.G
for s, t in combinations(self.G.nodes(), 2):
yield (self, (s, t))
def score(self, s, t):
prev = self.scoring.value
if self.apply(s, t):
result = self.scoring.value - prev
self.cancel(s, t)
return result
return 0.
def apply(self, s, t):
return True
def cancel(self, s, t):
pass
class EdgeAddOp(StrucutreSearchEdgeOp):
def __init__(self, G, scoring):
super().__init__(G, scoring)
def _apply(self, s, t):
self.G.add_edge(s, t)
def apply(self, s, t):
if self.G.has_edge(s, t):
return False
self._apply(s, t)
if not nx.is_directed_acyclic_graph(self.G):
self._cancel(s, t)
return False
self.scoring.invalidate(t)
return True
def _cancel(self, s, t):
self.G.remove_edge(s, t)
def cancel(self, s, t):
self._cancel(s, t)
self.scoring.invalidate(t)
def is_affected(self, nodes, s, t):
return t in nodes
def affects(self, s, t):
return [t]
class EdgeRemoveOp(StrucutreSearchEdgeOp):
def __init__(self, G, scoring):
super().__init__(G, scoring)
def apply(self, s, t):
if not self.G.has_edge(s, t):
return False
self._apply(s, t)
self.scoring.invalidate(t)
return True
def _apply(self, s, t):
self.G.remove_edge(s, t)
def cancel(self, s, t):
self._cancel(s, t)
self.scoring.invalidate(t)
def _cancel(self, s, t):
self.G.add_edge(s, t)
def is_affected(self, nodes, s, t):
return t in nodes
def affects(self, s, t):
return [t]
class EdgeReverseOp(StrucutreSearchEdgeOp):
def __init__(self, G, scoring):
super().__init__(G, scoring)
def apply(self, s, t):
if not self.G.has_edge(s, t):
return False
self._apply(s, t)
if nx.is_directed_acyclic_graph(self.G):
self.scoring.invalidate(s)
self.scoring.invalidate(t)
return True
else:
self._cancel(s, t)
return False
def _apply(self, s, t):
self.G.remove_edge(s, t)
self.G.add_edge(t, s)
def cancel(self, s, t):
self.apply(t, s)
def _cancel(self, s, t):
return self._apply(t, s)
def is_affected(self, nodes, s, t):
return t in nodes or s in nodes
def affects(self, s, t):
return [s, t]
def bagging(data):
headers = list(data.columns.values)
data = data.values
result = data[np.random.randint(data.shape[0], size=data.shape[0])]
return pd.DataFrame(data=result, columns=headers)
class StructureSearch:
"""
Class for performing local structure search.
"""
def __init__(self, data: pd.DataFrame, scoring, operations: list):
self.data = data
self.scoring = scoring
self.operations = operations
def __call__(self, G, n_iterations=1000, do_bagging=True, taboo_len=0):
"""
Does the structure search.
:param G: target graph
:param n_iterations: maximum number of iterations
:return: result graph
"""
data = self.data
score = self.scoring(G, data)
bagging_iter = -50
operations = list(map(lambda Op: Op(G, score), self.operations))
opdata = sum([list(op()) for op in operations], [])
# operations_heap = []
# for i in range(len(opdata)):
# operations_heap.append((-opdata[i][0].score(*opdata[i][1]), i))
operations_heap = lmap(lambda i: (-opdata[i][0].score(*opdata[i][1]), i), range(len(opdata)))
taboo = deque([-1 for i in range(taboo_len)])
for n_iter in range(n_iterations):
print('Iteration', n_iter)
affects = []
best_score = -np.inf
operations_heap.sort()
for i in range(len(operations_heap)):
best_score, idx = operations_heap[i]
op, args = opdata[idx]
best_score = -best_score
prev_score = score.value
if idx not in taboo and op.apply(*args):
taboo.append(idx)
taboo.popleft()
if abs(score.value - prev_score - best_score) > 0.00001:
op.cancel(*args)
continue
affects = op.affects(*args)
break
if best_score <= 0:
op.cancel(*args)
if do_bagging and n_iter - bagging_iter > 10:
print('bagging data')
score.data = bagging(score.data)
score.reset()
best_score = score.value
bagging_iter = n_iter
else:
break
else:
for i in range(len(operations_heap)):
neg_score, idx = operations_heap[i]
op, (s, t) = opdata[idx]
if op.is_affected(affects, s, t):
operations_heap[i] = -op.score(s, t), idx
return G
class LocalSearchStructureLearner:
def __init__(self):
pass
def learn(self, data: pd.DataFrame, **options) -> DGM:
searcher = StructureSearch(data, BIC_score, [EdgeAddOp, EdgeRemoveOp, EdgeReverseOp])
G = DGM()
G.add_nodes_from(data.columns.values)
return searcher(G, 100, do_bagging=False, taboo_len=10)
class ChowLiuStructureLearner:
def __init__(self):
pass
def learn(self, data: pd.DataFrame, **options) -> DGM:
g = chow_liu(data.values)
header = data.columns.values
result = DGM()
result.add_nodes_from(header)
for u, v in g.edges():
if u > v:
u, v = v, u
result.add_edge(header[u], header[v])
return result
|
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for normals."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from six.moves import range
import tensorflow as tf
from tensorflow_graphics.geometry.representation.mesh import normals
from tensorflow_graphics.util import test_case
class MeshTest(test_case.TestCase):
@parameterized.parameters(
(((None, 3), (None, 3)), (tf.float32, tf.int32)),
(((3, 6, 3), (3, 5, 4)), (tf.float32, tf.int32)),
)
def test_gather_faces_exception_not_raised(self, shapes, dtypes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(normals.gather_faces, shapes, dtypes)
@parameterized.parameters(
("Not all batch dimensions are identical", (3, 5, 4, 4), (1, 2, 4, 4)),
("Not all batch dimensions are identical", (5, 4, 4), (1, 2, 4, 4)),
("Not all batch dimensions are identical", (3, 5, 4, 4), (2, 4, 4)),
("vertices must have a rank greater than 1", (4,), (1, 2, 4, 4)),
("indices must have a rank greater than 1", (3, 5, 4, 4), (4,)),
)
def test_gather_faces_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(normals.gather_faces, error_msg, shapes)
def test_gather_faces_jacobian_random(self):
"""Test the Jacobian of the face extraction function."""
tensor_size = np.random.randint(2, 5)
tensor_shape = np.random.randint(1, 5, size=tensor_size).tolist()
vertex_init = np.random.random(size=tensor_shape)
indices_init = np.random.randint(0, tensor_shape[-2], size=tensor_shape)
indices_tensor = tf.convert_to_tensor(value=indices_init)
def gather_faces(vertex_tensor):
return normals.gather_faces(vertex_tensor, indices_tensor)
self.assert_jacobian_is_correct_fn(gather_faces, [vertex_init])
@parameterized.parameters(
((((0.,), (1.,)), ((1, 0),)), ((((1.,), (0.,)),),)),
((((0., 1.), (2., 3.)), ((1, 0),)), ((((2., 3.), (0., 1.)),),)),
((((0., 1., 2.), (3., 4., 5.)), ((1, 0),)), ((((3., 4., 5.),
(0., 1., 2.)),),)),
)
def test_gather_faces_preset(self, test_inputs, test_outputs):
"""Tests the extraction of mesh faces."""
self.assert_output_is_correct(
normals.gather_faces, test_inputs, test_outputs, tile=False)
def test_gather_faces_random(self):
"""Tests the extraction of mesh faces."""
tensor_size = np.random.randint(3, 5)
tensor_shape = np.random.randint(1, 5, size=tensor_size).tolist()
vertices = np.random.random(size=tensor_shape)
indices = np.arange(tensor_shape[-2])
indices = indices.reshape([1] * (tensor_size - 1) + [-1])
indices = np.tile(indices, tensor_shape[:-2] + [1, 1])
expected = np.expand_dims(vertices, -3)
self.assertAllClose(
normals.gather_faces(vertices, indices), expected, rtol=1e-3)
@parameterized.parameters(
(((None, 4, 3),), (tf.float32,)),
(((4, 3),), (tf.float32,)),
(((3, 4, 3),), (tf.float32,)),
)
def test_face_normals_exception_not_raised(self, shapes, dtypes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(normals.face_normals, shapes, dtypes)
@parameterized.parameters(
("faces must have a rank greater than 1.", (3,)),
("faces must have greater than 2 dimensions in axis -2", (2, 3)),
("faces must have exactly 3 dimensions in axis -1.", (5, 2)),
)
def test_face_normals_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(normals.face_normals, error_msg, shapes)
def test_face_normals_jacobian_random(self):
"""Test the Jacobian of the face normals function."""
tensor_vertex_size = np.random.randint(1, 3)
tensor_out_shape = np.random.randint(1, 5, size=tensor_vertex_size)
tensor_out_shape = tensor_out_shape.tolist()
tensor_vertex_shape = list(tensor_out_shape)
tensor_vertex_shape[-1] *= 3
tensor_index_shape = tensor_out_shape[-1]
vertex_init = np.random.random(size=tensor_vertex_shape + [3])
index_init = np.arange(tensor_vertex_shape[-1])
np.random.shuffle(index_init)
index_init = np.reshape(index_init, newshape=[1] * \
(tensor_vertex_size - 1) + \
[tensor_index_shape, 3])
index_init = np.tile(index_init, tensor_vertex_shape[:-1] + [1, 1])
index_tensor = tf.convert_to_tensor(value=index_init)
def face_normals(vertex_tensor):
face_tensor = normals.gather_faces(vertex_tensor, index_tensor)
return normals.face_normals(face_tensor)
self.assert_jacobian_is_correct_fn(
face_normals, [vertex_init], atol=1e-4, delta=1e-9)
@parameterized.parameters(
((((0., 0., 0.), (1., 0., 0.), (0., 1., 0.)), ((0, 1, 2),)),
(((0., 0., 1.),),)),
((((0., 0., 0.), (0., 0., 1.), (1., 0., 0.)), ((0, 1, 2),)),
(((0., 1., 0.),),)),
((((0., 0., 0.), (0., 1., 0.), (0., 0., 1.)), ((0, 1, 2),)),
(((1., 0., 0.),),)),
((((0., -2., -2.), (0, -2., 2.), (0., 2., 2.), (0., 2., -2.)),
((0, 1, 2, 3),)), (((-1., 0., 0.),),)),
)
def test_face_normals_preset(self, test_inputs, test_outputs):
"""Tests the computation of mesh face normals."""
faces = normals.gather_faces(*test_inputs[:2])
test_inputs = [faces] + list(test_inputs[2:])
self.assert_output_is_correct(
normals.face_normals, test_inputs, test_outputs, tile=False)
def test_face_normals_random(self):
"""Tests the computation of mesh face normals in each axis."""
tensor_vertex_size = np.random.randint(1, 3)
tensor_out_shape = np.random.randint(1, 5, size=tensor_vertex_size)
tensor_out_shape = tensor_out_shape.tolist()
tensor_vertex_shape = list(tensor_out_shape)
tensor_vertex_shape[-1] *= 3
tensor_index_shape = tensor_out_shape[-1]
for i in range(3):
vertices = np.random.random(size=tensor_vertex_shape + [3])
indices = np.arange(tensor_vertex_shape[-1])
np.random.shuffle(indices)
indices = np.reshape(indices,
newshape=[1] * (tensor_vertex_size - 1) \
+ [tensor_index_shape, 3])
indices = np.tile(indices, tensor_vertex_shape[:-1] + [1, 1])
vertices[..., i] = 0.
expected = np.zeros(shape=tensor_out_shape + [3], dtype=vertices.dtype)
expected[..., i] = 1.
faces = normals.gather_faces(vertices, indices)
self.assertAllClose(
tf.abs(normals.face_normals(faces)), expected, rtol=1e-3)
@parameterized.parameters(
(((4, 3), (5, 3)), (tf.float32, tf.int32)),
(((None, 3), (None, 3)), (tf.float32, tf.int32)),
(((3, None, 3), (3, None, 5)), (tf.float32, tf.int32)),
(((3, 6, 3), (3, 5, 5)), (tf.float32, tf.int32)),
)
def test_vertex_normals_exception_not_raised(self, shapes, dtypes):
"""Tests that the shape exceptions are not raised."""
self.assert_exception_is_not_raised(normals.vertex_normals, shapes, dtypes)
@parameterized.parameters(
("Not all batch dimensions are broadcast-compatible.", (3, 5, 4, 3),
(1, 2, 4, 3)),
("Not all batch dimensions are broadcast-compatible.", (2, 200, 3),
(4, 100, 3)),
("Not all batch dimensions are broadcast-compatible.", (5, 4, 3),
(1, 2, 4, 3)),
("Not all batch dimensions are broadcast-compatible.", (3, 5, 4, 3),
(2, 4, 3)),
("vertices must have a rank greater than 1.", (3,), (1, 2, 4, 3)),
("indices must have a rank greater than 1.", (3, 5, 4, 3), (3,)),
("vertices must have exactly 3 dimensions in axis -1.", (3, 5, 4, 2),
(3, 5, 4, 3)),
("indices must have greater than 2 dimensions in axis -1.", (3, 5, 4, 3),
(3, 5, 4, 2)),
("'indices' must have specified batch dimensions.", (None, 6, 3),
(None, 5, 5)),
)
def test_vertex_normals_exception_raised(self, error_msg, *shapes):
"""Tests that the shape exceptions are properly raised."""
self.assert_exception_is_raised(normals.vertex_normals, error_msg, shapes)
def test_vertex_normals_jacobian_random(self):
"""Test the Jacobian of the vertex normals function."""
tensor_vertex_size = np.random.randint(1, 3)
tensor_out_shape = np.random.randint(1, 5, size=tensor_vertex_size)
tensor_out_shape = tensor_out_shape.tolist()
vertex_axis = np.array(((0., 0., 1), (1., 0., 0.), (0., 1., 0.),
(0., 0., -1.), (-1., 0., 0.), (0., -1., 0.)),
dtype=np.float32)
vertex_axis = vertex_axis.reshape([1] * tensor_vertex_size + [6, 3])
faces = np.array(((0, 1, 2), (0, 2, 4), (0, 4, 5), (0, 5, 1), (3, 2, 1),
(3, 4, 2), (3, 5, 4), (3, 1, 5)),
dtype=np.int32)
faces = faces.reshape([1] * tensor_vertex_size + [8, 3])
index_init = np.tile(faces, tensor_out_shape + [1, 1])
vertex_scale = np.random.uniform(0.5, 5., tensor_out_shape + [1] * 2)
vertex_init = vertex_axis * vertex_scale
index_tensor = tf.convert_to_tensor(value=index_init)
def vertex_normals(vertex_tensor):
return normals.vertex_normals(vertex_tensor, index_tensor)
self.assert_jacobian_is_correct_fn(vertex_normals, [vertex_init])
@parameterized.parameters(
(((((-1., -1., 1.), (-1., 1., 1.), (-1., -1., -1.), (-1., 1., -1.),
(1., -1., 1.), (1., 1., 1.), (1., -1., -1.), (1., 1., -1.)),),
(((1, 2, 0), (3, 6, 2), (7, 4, 6), (5, 0, 4), (6, 0, 2), (3, 5, 7),
(1, 3, 2), (3, 7, 6), (7, 5, 4), (5, 1, 0), (6, 4, 0), (3, 1, 5)),)),
((((-0.3333333134651184, -0.6666666269302368, 0.6666666269302368),
(-0.8164965510368347, 0.40824827551841736, 0.40824827551841736),
(-0.8164965510368347, -0.40824827551841736, -0.40824827551841736),
(-0.3333333134651184, 0.6666666269302368, -0.6666666269302368),
(0.8164965510368347, -0.40824827551841736, 0.40824827551841736),
(0.3333333134651184, 0.6666666269302368, 0.6666666269302368),
(0.3333333134651184, -0.6666666269302368, -0.6666666269302368),
(0.8164965510368347, 0.40824827551841736, -0.40824827551841736)),),)),
)
def test_vertex_normals_preset(self, test_inputs, test_outputs):
"""Tests the computation of vertex normals."""
self.assert_output_is_correct(
normals.vertex_normals, test_inputs, test_outputs, tile=False)
def test_vertex_normals_random(self):
"""Tests the computation of vertex normals for a regular octahedral."""
tensor_vertex_size = np.random.randint(1, 3)
tensor_out_shape = np.random.randint(1, 5, size=tensor_vertex_size)
tensor_out_shape = tensor_out_shape.tolist()
with self.subTest(name="triangular_faces"):
vertex_on_axes = np.array(((0., 0., 1), (1., 0., 0.), (0., 1., 0.),
(0., 0., -1.), (-1., 0., 0.), (0., -1., 0.)),
dtype=np.float32)
vertex_on_axes = vertex_on_axes.reshape([1] * tensor_vertex_size + [6, 3])
index_init = np.array(((0, 1, 2), (0, 2, 4), (0, 4, 5), (0, 5, 1),
(3, 2, 1), (3, 4, 2), (3, 5, 4), (3, 1, 5)),
dtype=np.int32)
index_init = index_init.reshape([1] * tensor_vertex_size + [8, 3])
index_init = np.tile(index_init, tensor_out_shape + [1, 1])
vertex_scale = np.random.uniform(0.5, 5., tensor_out_shape + [1] * 2)
vertex_init = vertex_on_axes * vertex_scale
expected = vertex_on_axes * (vertex_scale * 0. + 1.)
vertex_tensor = tf.convert_to_tensor(value=vertex_init)
index_tensor = tf.convert_to_tensor(value=index_init)
self.assertAllClose(
normals.vertex_normals(vertex_tensor, index_tensor), expected)
with self.subTest(name="polygon_faces"):
num_vertices = np.random.randint(4, 8)
poly_vertices = []
rad_step = np.pi * 2. / num_vertices
for i in range(num_vertices):
poly_vertices.append([np.cos(i * rad_step), np.sin(i * rad_step), 0])
vertex_init = np.array(poly_vertices, dtype=np.float32)
vertex_init = vertex_init.reshape([1] * tensor_vertex_size + [-1, 3])
vertex_init = vertex_init * vertex_scale
index_init = np.arange(num_vertices, dtype=np.int32)
index_init = index_init.reshape([1] * tensor_vertex_size + [1, -1])
index_init = np.tile(index_init, tensor_out_shape + [1, 1])
expected = np.array((0., 0., 1.), dtype=np.float32)
expected = expected.reshape([1] * tensor_vertex_size + [1, 3])
expected = np.tile(expected, tensor_out_shape + [num_vertices, 1])
vertex_tensor = tf.convert_to_tensor(value=vertex_init)
index_tensor = tf.convert_to_tensor(value=index_init)
self.assertAllClose(
normals.vertex_normals(vertex_tensor, index_tensor), expected)
if __name__ == "__main__":
test_case.main()
|
|
#!/usr/bin/env python
#
# Copyright 2015 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""\
Dynamic inventory for Terraform - finds all `.tfstate` files below the working
directory and generates an inventory based on them.
"""
from __future__ import unicode_literals, print_function
import argparse
from collections import defaultdict
from functools import wraps
import json
import os
import re
VERSION = '0.3.0pre'
def tfstates(root=None):
root = root or os.getcwd()
for dirpath, _, filenames in os.walk(root):
for name in filenames:
if os.path.splitext(name)[-1] == '.tfstate':
yield os.path.join(dirpath, name)
def iterresources(filenames):
for filename in filenames:
with open(filename, 'r') as json_file:
state = json.load(json_file)
for module in state['modules']:
name = module['path'][-1]
for key, resource in module['resources'].items():
yield name, key, resource
## READ RESOURCES
PARSERS = {}
def _clean_dc(dcname):
# Consul DCs are strictly alphanumeric with underscores and hyphens -
# ensure that the consul_dc attribute meets these requirements.
return re.sub('[^\w_\-]', '-', dcname)
def iterhosts(resources):
'''yield host tuples of (name, attributes, groups)'''
for module_name, key, resource in resources:
resource_type, name = key.split('.', 1)
try:
parser = PARSERS[resource_type]
except KeyError:
continue
yield parser(resource, module_name)
def parses(prefix):
def inner(func):
PARSERS[prefix] = func
return func
return inner
def calculate_mi_vars(func):
"""calculate microservices-infrastructure vars"""
@wraps(func)
def inner(*args, **kwargs):
name, attrs, groups = func(*args, **kwargs)
# attrs
if attrs.get('role', '') == 'control':
attrs['consul_is_server'] = True
else:
attrs['consul_is_server'] = False
# groups
if attrs.get('publicly_routable', False):
groups.append('publicly_routable')
return name, attrs, groups
return inner
def _parse_prefix(source, prefix, sep='.'):
for compkey, value in source.items():
try:
curprefix, rest = compkey.split(sep, 1)
except ValueError:
continue
if curprefix != prefix or rest == '#':
continue
yield rest, value
def parse_attr_list(source, prefix, sep='.'):
attrs = defaultdict(dict)
for compkey, value in _parse_prefix(source, prefix, sep):
idx, key = compkey.split(sep, 1)
attrs[idx][key] = value
return attrs.values()
def parse_dict(source, prefix, sep='.'):
return dict(_parse_prefix(source, prefix, sep))
def parse_list(source, prefix, sep='.'):
return [value for _, value in _parse_prefix(source, prefix, sep)]
def parse_bool(string_form):
token = string_form.lower()[0]
if token == 't':
return True
elif token == 'f':
return False
else:
raise ValueError('could not convert %r to a bool' % string_form)
@parses('digitalocean_droplet')
@calculate_mi_vars
def digitalocean_host(resource, tfvars=None):
raw_attrs = resource['primary']['attributes']
name = raw_attrs['name']
groups = []
attrs = {
'id': raw_attrs['id'],
'image': raw_attrs['image'],
'ipv4_address': raw_attrs['ipv4_address'],
'locked': parse_bool(raw_attrs['locked']),
'metadata': json.loads(raw_attrs['user_data']),
'region': raw_attrs['region'],
'size': raw_attrs['size'],
'ssh_keys': parse_list(raw_attrs, 'ssh_keys'),
'status': raw_attrs['status'],
# ansible
'ansible_ssh_host': raw_attrs['ipv4_address'],
'ansible_ssh_port': 22,
'ansible_ssh_user': 'root', # it's always "root" on DO
# generic
'public_ipv4': raw_attrs['ipv4_address'],
'private_ipv4': raw_attrs['ipv4_address'],
'provider': 'digitalocean',
}
# attrs specific to microservices-infrastructure
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', attrs['region'])),
'role': attrs['metadata'].get('role', 'none')
})
# add groups based on attrs
groups.append('do_image=' + attrs['image'])
groups.append('do_locked=%s' % attrs['locked'])
groups.append('do_region=' + attrs['region'])
groups.append('do_size=' + attrs['size'])
groups.append('do_status=' + attrs['status'])
groups.extend('do_metadata_%s=%s' % item
for item in attrs['metadata'].items())
# groups specific to microservices-infrastructure
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('openstack_compute_instance_v2')
@calculate_mi_vars
def openstack_host(resource, module_name):
raw_attrs = resource['primary']['attributes']
name = raw_attrs['name']
groups = []
attrs = {
'access_ip_v4': raw_attrs['access_ip_v4'],
'access_ip_v6': raw_attrs['access_ip_v6'],
'flavor': parse_dict(raw_attrs, 'flavor',
sep='_'),
'id': raw_attrs['id'],
'image': parse_dict(raw_attrs, 'image',
sep='_'),
'key_pair': raw_attrs['key_pair'],
'metadata': parse_dict(raw_attrs, 'metadata'),
'network': parse_attr_list(raw_attrs, 'network'),
'region': raw_attrs.get('region', ''),
'security_groups': parse_list(raw_attrs, 'security_groups'),
# ansible
'ansible_ssh_port': 22,
'ansible_ssh_user': raw_attrs.get('metadata.ssh_user', 'centos'),
# workaround for an OpenStack bug where hosts have a different domain
# after they're restarted
'host_domain': 'novalocal',
'use_host_domain': True,
# generic
'public_ipv4': raw_attrs['access_ip_v4'],
'private_ipv4': raw_attrs['access_ip_v4'],
'provider': 'openstack',
}
if 'floating_ip' in raw_attrs:
attrs['private_ipv4'] = raw_attrs['network.0.fixed_ip_v4']
try:
attrs.update({
'ansible_ssh_host': raw_attrs['access_ip_v4'],
'publicly_routable': True,
})
except (KeyError, ValueError):
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
# attrs specific to microservices-infrastructure
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
'role': attrs['metadata'].get('role', 'none'),
})
# add groups based on attrs
groups.append('os_image=' + attrs['image']['name'])
groups.append('os_flavor=' + attrs['flavor']['name'])
groups.extend('os_metadata_%s=%s' % item
for item in attrs['metadata'].items())
groups.append('os_region=' + attrs['region'])
# groups specific to microservices-infrastructure
groups.append('role=' + attrs['metadata'].get('role', 'none'))
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('aws_instance')
@calculate_mi_vars
def aws_host(resource, module_name):
name = resource['primary']['attributes']['tags.Name']
raw_attrs = resource['primary']['attributes']
groups = []
attrs = {
'ami': raw_attrs['ami'],
'availability_zone': raw_attrs['availability_zone'],
'ebs_block_device': parse_attr_list(raw_attrs, 'ebs_block_device'),
'ebs_optimized': parse_bool(raw_attrs['ebs_optimized']),
'ephemeral_block_device': parse_attr_list(raw_attrs,
'ephemeral_block_device'),
'id': raw_attrs['id'],
'key_name': raw_attrs['key_name'],
'private': parse_dict(raw_attrs, 'private',
sep='_'),
'public': parse_dict(raw_attrs, 'public',
sep='_'),
'root_block_device': parse_attr_list(raw_attrs, 'root_block_device'),
'security_groups': parse_list(raw_attrs, 'security_groups'),
'subnet': parse_dict(raw_attrs, 'subnet',
sep='_'),
'tags': parse_dict(raw_attrs, 'tags'),
'tenancy': raw_attrs['tenancy'],
'vpc_security_group_ids': parse_list(raw_attrs,
'vpc_security_group_ids'),
# ansible-specific
'ansible_ssh_port': 22,
'ansible_ssh_user': raw_attrs['tags.sshUser'],
'ansible_ssh_host': raw_attrs['public_ip'],
# generic
'public_ipv4': raw_attrs['public_ip'],
'private_ipv4': raw_attrs['private_ip'],
'provider': 'aws',
}
# attrs specific to microservices-infrastructure
attrs.update({
'consul_dc': _clean_dc(attrs['tags'].get('dc', module_name)),
'role': attrs['tags'].get('role', 'none')
})
# groups specific to microservices-infrastructure
groups.extend(['aws_ami=' + attrs['ami'],
'aws_az=' + attrs['availability_zone'],
'aws_key_name=' + attrs['key_name'],
'aws_tenancy=' + attrs['tenancy']])
groups.extend('aws_tag_%s=%s' % item for item in attrs['tags'].items())
groups.extend('aws_vpc_security_group=' + group
for group in attrs['vpc_security_group_ids'])
groups.extend('aws_subnet_%s=%s' % subnet
for subnet in attrs['subnet'].items())
# groups specific to microservices-infrastructure
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('google_compute_instance')
@calculate_mi_vars
def gce_host(resource, module_name):
name = resource['primary']['id']
raw_attrs = resource['primary']['attributes']
groups = []
# network interfaces
interfaces = parse_attr_list(raw_attrs, 'network_interface')
for interface in interfaces:
interface['access_config'] = parse_attr_list(interface,
'access_config')
for key in interface.keys():
if '.' in key:
del interface[key]
# general attrs
attrs = {
'can_ip_forward': raw_attrs['can_ip_forward'] == 'true',
'disks': parse_attr_list(raw_attrs, 'disk'),
'machine_type': raw_attrs['machine_type'],
'metadata': parse_dict(raw_attrs, 'metadata'),
'network': parse_attr_list(raw_attrs, 'network'),
'network_interface': interfaces,
'self_link': raw_attrs['self_link'],
'service_account': parse_attr_list(raw_attrs, 'service_account'),
'tags': parse_list(raw_attrs, 'tags'),
'zone': raw_attrs['zone'],
# ansible
'ansible_ssh_port': 22,
'ansible_ssh_user': raw_attrs.get('metadata.ssh_user', 'centos'),
'provider': 'gce',
}
# attrs specific to microservices-infrastructure
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('dc', module_name)),
'role': attrs['metadata'].get('role', 'none'),
})
try:
attrs.update({
'ansible_ssh_host': interfaces[0]['access_config'][0]['nat_ip'],
'public_ipv4': interfaces[0]['access_config'][0]['nat_ip'],
'private_ipv4': interfaces[0]['address'],
'publicly_routable': True,
})
except (KeyError, ValueError):
attrs.update({'ansible_ssh_host': '', 'publicly_routable': False})
# add groups based on attrs
groups.extend('gce_image=' + disk['image'] for disk in attrs['disks'])
groups.append('gce_machine_type=' + attrs['machine_type'])
groups.extend('gce_metadata_%s=%s' % (key, value)
for (key, value) in attrs['metadata'].items()
if key not in set(['sshKeys']))
groups.extend('gce_tag=' + tag for tag in attrs['tags'])
groups.append('gce_zone=' + attrs['zone'])
if attrs['can_ip_forward']:
groups.append('gce_ip_forward')
if attrs['publicly_routable']:
groups.append('gce_publicly_routable')
# groups specific to microservices-infrastructure
groups.append('role=' + attrs['metadata'].get('role', 'none'))
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
@parses('vsphere_virtual_machine')
@calculate_mi_vars
def vsphere_host(resource, module_name):
raw_attrs = resource['primary']['attributes']
name = raw_attrs['name']
groups = []
attrs = {
'id': raw_attrs['id'],
'ip_address': raw_attrs['ip_address'],
'metadata': parse_dict(raw_attrs, 'configuration_parameters'),
'ansible_ssh_port': 22,
'provider': 'vsphere',
}
try:
attrs.update({
'ansible_ssh_host': raw_attrs['ip_address'],
})
except (KeyError, ValueError):
attrs.update({'ansible_ssh_host': '', })
attrs.update({
'consul_dc': _clean_dc(attrs['metadata'].get('consul_dc', module_name)),
'role': attrs['metadata'].get('role', 'none'),
'ansible_ssh_user': attrs['metadata'].get('ssh_user', 'centos'),
})
groups.append('role=' + attrs['role'])
groups.append('dc=' + attrs['consul_dc'])
return name, attrs, groups
## QUERY TYPES
def query_host(hosts, target):
for name, attrs, _ in hosts:
if name == target:
return attrs
return {}
def query_list(hosts):
groups = defaultdict(dict)
meta = {}
for name, attrs, hostgroups in hosts:
for group in set(hostgroups):
groups[group].setdefault('hosts', [])
groups[group]['hosts'].append(name)
meta[name] = attrs
groups['_meta'] = {'hostvars': meta}
return groups
def main():
parser = argparse.ArgumentParser(
__file__, __doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter, )
modes = parser.add_mutually_exclusive_group(required=True)
modes.add_argument('--list',
action='store_true',
help='list all variables')
modes.add_argument('--host', help='list variables for a single host')
modes.add_argument('--version',
action='store_true',
help='print version and exit')
parser.add_argument('--pretty',
action='store_true',
help='pretty-print output JSON')
parser.add_argument('--nometa',
action='store_true',
help='with --list, exclude hostvars')
default_root = os.environ.get('TERRAFORM_STATE_ROOT',
os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', )))
parser.add_argument('--root',
default=default_root,
help='custom root to search for `.tfstate`s in')
args = parser.parse_args()
if args.version:
print('%s %s' % (__file__, VERSION))
parser.exit()
hosts = iterhosts(iterresources(tfstates(args.root)))
if args.list:
output = query_list(hosts)
if args.nometa:
del output['_meta']
else:
output = query_host(hosts, args.host)
print(json.dumps(output, indent=4 if args.pretty else None))
parser.exit()
if __name__ == '__main__':
main()
|
|
# Copyright (C) 2006-2011, University of Maryland
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/ or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: James Krycka
"""
This module implements the AppFrame class which creates the main frame of the
GUI for the Direct Inversion Reflectometry application including a basic menu,
tool bar, and status bar.
"""
#==============================================================================
from __future__ import print_function
import sys
import wx
from .utilities import resource
from .about import (AboutDialog, APP_TITLE, APP_DESCRIPTION, APP_LICENSE,
APP_CREDITS, APP_TUTORIAL)
from .app_panel import AppPanel
from .wx_utils import choose_fontsize, display_fontsize
# Resource files.
PROG_ICON = "direfl.ico"
#==============================================================================
class AppFrame(wx.Frame):
"""
This class creates the top-level frame for the application and populates it
with application specific panels and widgets.
"""
def __init__(self, parent=None, id=wx.ID_ANY, title=APP_TITLE,
pos=wx.DefaultPosition, size=wx.DefaultSize, name="AppFrame"):
wx.Frame.__init__(self, parent, id, title, pos, size, name=name)
# Display the application's icon in the title bar.
icon = wx.Icon(resource(PROG_ICON), wx.BITMAP_TYPE_ICO)
self.SetIcon(icon)
# Set the default font family and font size for the application.
self.set_default_font()
# Initialize the menu bar with common items.
self.add_menubar()
# Initialize the tool bar.
self.add_toolbar()
# Initialize the status bar.
self.add_statusbar()
# Build the application panels for the GUI on the frame.
AppPanel(frame=self)
# Note: Do not call self.Fit() as this will reduce the frame to its
# bare minimum size; we want it to keep its default size.
def init_GUI(self):
"""
Constructs the GUI for the application on top of the basic frame
already created. The GUI should be built after the splash screen
(if used) is displayed so that this work is done while the user is
viewing the splash screen.
"""
AppPanel(frame=self)
def set_default_font(self):
"""
Sets the default font family and font size for the frame which will be
inherited by all child windows subsequently created.
"""
# Save the system default font information before we make any changes.
fontname = default_fontname = self.GetFont().GetFaceName()
fontsize = default_fontsize = self.GetFont().GetPointSize()
# If requested, override the font name to use. Note that:
# - the MS Windows default font appears to be the same as Tahoma
# - Arial tends to be narrower and taller than Tahoma.
# - Verdana tends to be wider and shorter than Tahoma.
if len(sys.argv) > 1:
if '--tahoma' in sys.argv[1:]:
fontname = "Tahoma"
elif '--arial' in sys.argv[1:]:
fontname = "Arial"
elif '--verdana' in sys.argv[1:]:
fontname = "Verdana"
fontsize = choose_fontsize(fontname=fontname)
# If requested, override the font point size to use.
if len(sys.argv) > 1:
if '--12pt' in sys.argv[1:]:
fontsize = 12
elif '--11pt' in sys.argv[1:]:
fontsize = 11
elif '--10pt' in sys.argv[1:]:
fontsize = 10
elif '--9pt' in sys.argv[1:]:
fontsize = 9
elif '--8pt' in sys.argv[1:]:
fontsize = 8
elif '--7pt' in sys.argv[1:]:
fontsize = 7
elif '--6pt' in sys.argv[1:]:
fontsize = 6
# Set the default font for this and all child windows. The font of the
# frame's title bar is not affected (which is a good thing). However,
# setting the default font does not affect the font used in the frame's
# menu bar or menu items (which is not such a good thing because the
# menu text size be different than the size used by the application's
# other widgets). The menu font cannot be changed by wxPython.
self.SetFont(wx.Font(fontsize, wx.SWISS, wx.NORMAL, wx.NORMAL, False,
fontname))
# If requested, display font and miscellaneous platform information.
if len(sys.argv) > 1 and '--platform' in sys.argv[1:]:
print("*** Platform =", wx.PlatformInfo)
print("*** Default font is %s Chosen font is %s"
% (default_fontname, self.GetFont().GetFaceName()))
print("*** Default point size = %d Chosen point size = %d"
% (default_fontsize, self.GetFont().GetPointSize()))
display_fontsize(fontname=fontname)
def add_menubar(self):
"""Creates a default menu bar, menus, and menu options."""
# Create the menu bar.
mb = wx.MenuBar()
# Add a 'File' menu to the menu bar and define its options.
file_menu = wx.Menu()
_item = file_menu.Append(wx.ID_ANY, "&Exit", "Exit the application")
self.Bind(wx.EVT_MENU, self.OnExit, _item)
mb.Append(file_menu, "&File")
# Add a 'Help' menu to the menu bar and define its options.
help_menu = wx.Menu()
_item = help_menu.Append(wx.ID_ANY, "&About",
"Get description of application")
self.Bind(wx.EVT_MENU, self.OnAbout, _item)
_item = help_menu.Append(wx.ID_ANY, "&Tutorial",
"Locate tutorial and documentation")
self.Bind(wx.EVT_MENU, self.OnTutorial, _item)
_item = help_menu.Append(wx.ID_ANY, "&License",
"Read license and copyright notice")
self.Bind(wx.EVT_MENU, self.OnLicense, _item)
_item = help_menu.Append(wx.ID_ANY, "&Credits",
"Get list of authors and sponsors")
self.Bind(wx.EVT_MENU, self.OnCredits, _item)
mb.Append(help_menu, "&Help")
# Attach the menu bar to the frame.
self.SetMenuBar(mb)
def add_toolbar(self):
"""Creates a default tool bar."""
#tb = self.CreateToolBar()
tb = wx.ToolBar(parent=self, style=wx.TB_HORIZONTAL|wx.NO_BORDER)
tb.Realize()
self.SetToolBar(tb)
def add_statusbar(self):
"""Creates a default status bar."""
sb = self.statusbar = self.CreateStatusBar()
sb.SetFieldsCount(1)
def OnAbout(self, event):
"""Shows the About dialog box."""
dlg = AboutDialog(parent=self, title="About", info=APP_DESCRIPTION,
show_name=True, show_notice=True, show_link=True,
show_link_docs=True)
dlg.ShowModal()
dlg.Destroy()
def OnCredits(self, event):
"""Shows the Credits dialog box."""
dlg = AboutDialog(parent=self, title="Credits", info=APP_CREDITS,
show_name=True, show_notice=True, show_link=False,
show_link_docs=False)
dlg.ShowModal()
dlg.Destroy()
def OnExit(self, event):
"""Terminates the program."""
self.Close()
def OnLicense(self, event):
"""Shows the License dialog box."""
dlg = AboutDialog(parent=self, title="License", info=APP_LICENSE,
show_name=True, show_notice=True, show_link=False,
show_link_docs=False)
dlg.ShowModal()
dlg.Destroy()
def OnTutorial(self, event):
"""Shows the Tutorial dialog box."""
dlg = AboutDialog(parent=self, title="Tutorial", info=APP_TUTORIAL,
show_name=False, show_notice=False, show_link=False,
show_link_docs=True)
dlg.ShowModal()
dlg.Destroy()
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import uuid
import fixtures
import mock
from oslo_config import cfg
from six.moves import http_client
from testtools import matchers
from keystone.common import controller
from keystone import exception
from keystone.tests import unit
from keystone.tests.unit import test_v3
CONF = cfg.CONF
class IdentityTestCase(test_v3.RestfulTestCase):
"""Test users and groups."""
def setUp(self):
super(IdentityTestCase, self).setUp()
self.group = self.new_group_ref(
domain_id=self.domain_id)
self.group = self.identity_api.create_group(self.group)
self.group_id = self.group['id']
self.credential_id = uuid.uuid4().hex
self.credential = self.new_credential_ref(
user_id=self.user['id'],
project_id=self.project_id)
self.credential['id'] = self.credential_id
self.credential_api.create_credential(
self.credential_id,
self.credential)
# user crud tests
def test_create_user(self):
"""Call ``POST /users``."""
ref = self.new_user_ref(domain_id=self.domain_id)
r = self.post(
'/users',
body={'user': ref})
return self.assertValidUserResponse(r, ref)
def test_create_user_without_domain(self):
"""Call ``POST /users`` without specifying domain.
According to the identity-api specification, if you do not
explicitly specific the domain_id in the entity, it should
take the domain scope of the token as the domain_id.
"""
# Create a user with a role on the domain so we can get a
# domain scoped token
domain = self.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
user = self.new_user_ref(domain_id=domain['id'])
password = user['password']
user = self.identity_api.create_user(user)
user['password'] = password
self.assignment_api.create_grant(
role_id=self.role_id, user_id=user['id'],
domain_id=domain['id'])
ref = self.new_user_ref(domain_id=domain['id'])
ref_nd = ref.copy()
ref_nd.pop('domain_id')
auth = self.build_authentication_request(
user_id=user['id'],
password=user['password'],
domain_id=domain['id'])
r = self.post('/users', body={'user': ref_nd}, auth=auth)
self.assertValidUserResponse(r, ref)
# Now try the same thing without a domain token - which should fail
ref = self.new_user_ref(domain_id=domain['id'])
ref_nd = ref.copy()
ref_nd.pop('domain_id')
auth = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
# TODO(henry-nash): Due to bug #1283539 we currently automatically
# use the default domain_id if a domain scoped token is not being
# used. For now we just check that a deprecation warning has been
# issued. Change the code below to expect a failure once this bug is
# fixed.
with mock.patch(
'oslo_log.versionutils.report_deprecated_feature') as mock_dep:
r = self.post('/users', body={'user': ref_nd}, auth=auth)
self.assertTrue(mock_dep.called)
ref['domain_id'] = CONF.identity.default_domain_id
return self.assertValidUserResponse(r, ref)
def test_create_user_bad_request(self):
"""Call ``POST /users``."""
self.post('/users', body={'user': {}},
expected_status=http_client.BAD_REQUEST)
def test_list_users(self):
"""Call ``GET /users``."""
resource_url = '/users'
r = self.get(resource_url)
self.assertValidUserListResponse(r, ref=self.user,
resource_url=resource_url)
def test_list_users_with_multiple_backends(self):
"""Call ``GET /users`` when multiple backends is enabled.
In this scenario, the controller requires a domain to be specified
either as a filter or by using a domain scoped token.
"""
self.config_fixture.config(group='identity',
domain_specific_drivers_enabled=True)
# Create a user with a role on the domain so we can get a
# domain scoped token
domain = self.new_domain_ref()
self.resource_api.create_domain(domain['id'], domain)
user = self.new_user_ref(domain_id=domain['id'])
password = user['password']
user = self.identity_api.create_user(user)
user['password'] = password
self.assignment_api.create_grant(
role_id=self.role_id, user_id=user['id'],
domain_id=domain['id'])
ref = self.new_user_ref(domain_id=domain['id'])
ref_nd = ref.copy()
ref_nd.pop('domain_id')
auth = self.build_authentication_request(
user_id=user['id'],
password=user['password'],
domain_id=domain['id'])
# First try using a domain scoped token
resource_url = '/users'
r = self.get(resource_url, auth=auth)
self.assertValidUserListResponse(r, ref=user,
resource_url=resource_url)
# Now try with an explicit filter
resource_url = ('/users?domain_id=%(domain_id)s' %
{'domain_id': domain['id']})
r = self.get(resource_url)
self.assertValidUserListResponse(r, ref=user,
resource_url=resource_url)
# Now try the same thing without a domain token or filter,
# which should fail
r = self.get('/users', expected_status=exception.Unauthorized.code)
def test_list_users_with_static_admin_token_and_multiple_backends(self):
# domain-specific operations with the bootstrap ADMIN token is
# disallowed when domain-specific drivers are enabled
self.config_fixture.config(group='identity',
domain_specific_drivers_enabled=True)
self.get('/users', token=CONF.admin_token,
expected_status=exception.Unauthorized.code)
def test_list_users_no_default_project(self):
"""Call ``GET /users`` making sure no default_project_id."""
user = self.new_user_ref(self.domain_id)
user = self.identity_api.create_user(user)
resource_url = '/users'
r = self.get(resource_url)
self.assertValidUserListResponse(r, ref=user,
resource_url=resource_url)
def test_get_user(self):
"""Call ``GET /users/{user_id}``."""
r = self.get('/users/%(user_id)s' % {
'user_id': self.user['id']})
self.assertValidUserResponse(r, self.user)
def test_get_user_with_default_project(self):
"""Call ``GET /users/{user_id}`` making sure of default_project_id."""
user = self.new_user_ref(domain_id=self.domain_id,
project_id=self.project_id)
user = self.identity_api.create_user(user)
r = self.get('/users/%(user_id)s' % {'user_id': user['id']})
self.assertValidUserResponse(r, user)
def test_add_user_to_group(self):
"""Call ``PUT /groups/{group_id}/users/{user_id}``."""
self.put('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
def test_list_groups_for_user(self):
"""Call ``GET /users/{user_id}/groups``."""
self.user1 = self.new_user_ref(
domain_id=self.domain['id'])
password = self.user1['password']
self.user1 = self.identity_api.create_user(self.user1)
self.user1['password'] = password
self.user2 = self.new_user_ref(
domain_id=self.domain['id'])
password = self.user2['password']
self.user2 = self.identity_api.create_user(self.user2)
self.user2['password'] = password
self.put('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user1['id']})
# Scenarios below are written to test the default policy configuration
# One should be allowed to list one's own groups
auth = self.build_authentication_request(
user_id=self.user1['id'],
password=self.user1['password'])
resource_url = ('/users/%(user_id)s/groups' %
{'user_id': self.user1['id']})
r = self.get(resource_url, auth=auth)
self.assertValidGroupListResponse(r, ref=self.group,
resource_url=resource_url)
# Administrator is allowed to list others' groups
resource_url = ('/users/%(user_id)s/groups' %
{'user_id': self.user1['id']})
r = self.get(resource_url)
self.assertValidGroupListResponse(r, ref=self.group,
resource_url=resource_url)
# Ordinary users should not be allowed to list other's groups
auth = self.build_authentication_request(
user_id=self.user2['id'],
password=self.user2['password'])
r = self.get('/users/%(user_id)s/groups' % {
'user_id': self.user1['id']}, auth=auth,
expected_status=exception.ForbiddenAction.code)
def test_check_user_in_group(self):
"""Call ``HEAD /groups/{group_id}/users/{user_id}``."""
self.put('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
self.head('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
def test_list_users_in_group(self):
"""Call ``GET /groups/{group_id}/users``."""
self.put('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
resource_url = ('/groups/%(group_id)s/users' %
{'group_id': self.group_id})
r = self.get(resource_url)
self.assertValidUserListResponse(r, ref=self.user,
resource_url=resource_url)
self.assertIn('/groups/%(group_id)s/users' % {
'group_id': self.group_id}, r.result['links']['self'])
def test_remove_user_from_group(self):
"""Call ``DELETE /groups/{group_id}/users/{user_id}``."""
self.put('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
self.delete('/groups/%(group_id)s/users/%(user_id)s' % {
'group_id': self.group_id, 'user_id': self.user['id']})
def test_update_user(self):
"""Call ``PATCH /users/{user_id}``."""
user = self.new_user_ref(domain_id=self.domain_id)
del user['id']
r = self.patch('/users/%(user_id)s' % {
'user_id': self.user['id']},
body={'user': user})
self.assertValidUserResponse(r, user)
def test_admin_password_reset(self):
# bootstrap a user as admin
user_ref = self.new_user_ref(domain_id=self.domain['id'])
password = user_ref['password']
user_ref = self.identity_api.create_user(user_ref)
# auth as user should work before a password change
old_password_auth = self.build_authentication_request(
user_id=user_ref['id'],
password=password)
r = self.v3_authenticate_token(old_password_auth, expected_status=201)
old_token = r.headers.get('X-Subject-Token')
# auth as user with a token should work before a password change
old_token_auth = self.build_authentication_request(token=old_token)
self.v3_authenticate_token(old_token_auth, expected_status=201)
# administrative password reset
new_password = uuid.uuid4().hex
self.patch('/users/%s' % user_ref['id'],
body={'user': {'password': new_password}},
expected_status=200)
# auth as user with original password should not work after change
self.v3_authenticate_token(old_password_auth,
expected_status=http_client.UNAUTHORIZED)
# auth as user with an old token should not work after change
self.v3_authenticate_token(old_token_auth,
expected_status=http_client.NOT_FOUND)
# new password should work
new_password_auth = self.build_authentication_request(
user_id=user_ref['id'],
password=new_password)
self.v3_authenticate_token(new_password_auth, expected_status=201)
def test_update_user_domain_id(self):
"""Call ``PATCH /users/{user_id}`` with domain_id."""
user = self.new_user_ref(domain_id=self.domain['id'])
user = self.identity_api.create_user(user)
user['domain_id'] = CONF.identity.default_domain_id
r = self.patch('/users/%(user_id)s' % {
'user_id': user['id']},
body={'user': user},
expected_status=exception.ValidationError.code)
self.config_fixture.config(domain_id_immutable=False)
user['domain_id'] = self.domain['id']
r = self.patch('/users/%(user_id)s' % {
'user_id': user['id']},
body={'user': user})
self.assertValidUserResponse(r, user)
def test_delete_user(self):
"""Call ``DELETE /users/{user_id}``.
As well as making sure the delete succeeds, we ensure
that any credentials that reference this user are
also deleted, while other credentials are unaffected.
In addition, no tokens should remain valid for this user.
"""
# First check the credential for this user is present
r = self.credential_api.get_credential(self.credential['id'])
self.assertDictEqual(r, self.credential)
# Create a second credential with a different user
self.user2 = self.new_user_ref(
domain_id=self.domain['id'],
project_id=self.project['id'])
self.user2 = self.identity_api.create_user(self.user2)
self.credential2 = self.new_credential_ref(
user_id=self.user2['id'],
project_id=self.project['id'])
self.credential_api.create_credential(
self.credential2['id'],
self.credential2)
# Create a token for this user which we can check later
# gets deleted
auth_data = self.build_authentication_request(
user_id=self.user['id'],
password=self.user['password'],
project_id=self.project['id'])
token = self.get_requested_token(auth_data)
# Confirm token is valid for now
self.head('/auth/tokens',
headers={'X-Subject-Token': token},
expected_status=200)
# Now delete the user
self.delete('/users/%(user_id)s' % {
'user_id': self.user['id']})
# Deleting the user should have deleted any credentials
# that reference this project
self.assertRaises(exception.CredentialNotFound,
self.credential_api.get_credential,
self.credential['id'])
# And the no tokens we remain valid
tokens = self.token_provider_api._persistence._list_tokens(
self.user['id'])
self.assertEqual(0, len(tokens))
# But the credential for user2 is unaffected
r = self.credential_api.get_credential(self.credential2['id'])
self.assertDictEqual(r, self.credential2)
# group crud tests
def test_create_group(self):
"""Call ``POST /groups``."""
ref = self.new_group_ref(domain_id=self.domain_id)
r = self.post(
'/groups',
body={'group': ref})
return self.assertValidGroupResponse(r, ref)
def test_create_group_bad_request(self):
"""Call ``POST /groups``."""
self.post('/groups', body={'group': {}},
expected_status=http_client.BAD_REQUEST)
def test_list_groups(self):
"""Call ``GET /groups``."""
resource_url = '/groups'
r = self.get(resource_url)
self.assertValidGroupListResponse(r, ref=self.group,
resource_url=resource_url)
def test_get_group(self):
"""Call ``GET /groups/{group_id}``."""
r = self.get('/groups/%(group_id)s' % {
'group_id': self.group_id})
self.assertValidGroupResponse(r, self.group)
def test_update_group(self):
"""Call ``PATCH /groups/{group_id}``."""
group = self.new_group_ref(domain_id=self.domain_id)
del group['id']
r = self.patch('/groups/%(group_id)s' % {
'group_id': self.group_id},
body={'group': group})
self.assertValidGroupResponse(r, group)
def test_update_group_domain_id(self):
"""Call ``PATCH /groups/{group_id}`` with domain_id."""
group = self.new_group_ref(domain_id=self.domain['id'])
group = self.identity_api.create_group(group)
group['domain_id'] = CONF.identity.default_domain_id
r = self.patch('/groups/%(group_id)s' % {
'group_id': group['id']},
body={'group': group},
expected_status=exception.ValidationError.code)
self.config_fixture.config(domain_id_immutable=False)
group['domain_id'] = self.domain['id']
r = self.patch('/groups/%(group_id)s' % {
'group_id': group['id']},
body={'group': group})
self.assertValidGroupResponse(r, group)
def test_delete_group(self):
"""Call ``DELETE /groups/{group_id}``."""
self.delete('/groups/%(group_id)s' % {
'group_id': self.group_id})
def test_create_user_password_not_logged(self):
# When a user is created, the password isn't logged at any level.
log_fix = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
ref = self.new_user_ref(domain_id=self.domain_id)
self.post(
'/users',
body={'user': ref})
self.assertNotIn(ref['password'], log_fix.output)
def test_update_password_not_logged(self):
# When admin modifies user password, the password isn't logged at any
# level.
log_fix = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
# bootstrap a user as admin
user_ref = self.new_user_ref(domain_id=self.domain['id'])
password = user_ref['password']
user_ref = self.identity_api.create_user(user_ref)
# administrative password reset
new_password = uuid.uuid4().hex
self.patch('/users/%s' % user_ref['id'],
body={'user': {'password': new_password}},
expected_status=200)
self.assertNotIn(password, log_fix.output)
self.assertNotIn(new_password, log_fix.output)
class IdentityV3toV2MethodsTestCase(unit.TestCase):
"""Test users V3 to V2 conversion methods."""
def setUp(self):
super(IdentityV3toV2MethodsTestCase, self).setUp()
self.load_backends()
self.user_id = uuid.uuid4().hex
self.default_project_id = uuid.uuid4().hex
self.tenant_id = uuid.uuid4().hex
# User with only default_project_id in ref
self.user1 = {'id': self.user_id,
'name': self.user_id,
'default_project_id': self.default_project_id,
'domain_id': CONF.identity.default_domain_id}
# User without default_project_id or tenantId in ref
self.user2 = {'id': self.user_id,
'name': self.user_id,
'domain_id': CONF.identity.default_domain_id}
# User with both tenantId and default_project_id in ref
self.user3 = {'id': self.user_id,
'name': self.user_id,
'default_project_id': self.default_project_id,
'tenantId': self.tenant_id,
'domain_id': CONF.identity.default_domain_id}
# User with only tenantId in ref
self.user4 = {'id': self.user_id,
'name': self.user_id,
'tenantId': self.tenant_id,
'domain_id': CONF.identity.default_domain_id}
# Expected result if the user is meant to have a tenantId element
self.expected_user = {'id': self.user_id,
'name': self.user_id,
'username': self.user_id,
'tenantId': self.default_project_id}
# Expected result if the user is not meant to have a tenantId element
self.expected_user_no_tenant_id = {'id': self.user_id,
'name': self.user_id,
'username': self.user_id}
def test_v3_to_v2_user_method(self):
updated_user1 = controller.V2Controller.v3_to_v2_user(self.user1)
self.assertIs(self.user1, updated_user1)
self.assertDictEqual(self.user1, self.expected_user)
updated_user2 = controller.V2Controller.v3_to_v2_user(self.user2)
self.assertIs(self.user2, updated_user2)
self.assertDictEqual(self.user2, self.expected_user_no_tenant_id)
updated_user3 = controller.V2Controller.v3_to_v2_user(self.user3)
self.assertIs(self.user3, updated_user3)
self.assertDictEqual(self.user3, self.expected_user)
updated_user4 = controller.V2Controller.v3_to_v2_user(self.user4)
self.assertIs(self.user4, updated_user4)
self.assertDictEqual(self.user4, self.expected_user_no_tenant_id)
def test_v3_to_v2_user_method_list(self):
user_list = [self.user1, self.user2, self.user3, self.user4]
updated_list = controller.V2Controller.v3_to_v2_user(user_list)
self.assertEqual(len(updated_list), len(user_list))
for i, ref in enumerate(updated_list):
# Order should not change.
self.assertIs(ref, user_list[i])
self.assertDictEqual(self.user1, self.expected_user)
self.assertDictEqual(self.user2, self.expected_user_no_tenant_id)
self.assertDictEqual(self.user3, self.expected_user)
self.assertDictEqual(self.user4, self.expected_user_no_tenant_id)
class UserSelfServiceChangingPasswordsTestCase(test_v3.RestfulTestCase):
def setUp(self):
super(UserSelfServiceChangingPasswordsTestCase, self).setUp()
self.user_ref = self.new_user_ref(domain_id=self.domain['id'])
password = self.user_ref['password']
self.user_ref = self.identity_api.create_user(self.user_ref)
self.user_ref['password'] = password
self.token = self.get_request_token(self.user_ref['password'], 201)
def get_request_token(self, password, expected_status):
auth_data = self.build_authentication_request(
user_id=self.user_ref['id'],
password=password)
r = self.v3_authenticate_token(auth_data,
expected_status=expected_status)
return r.headers.get('X-Subject-Token')
def change_password(self, expected_status, **kwargs):
"""Returns a test response for a change password request."""
return self.post('/users/%s/password' % self.user_ref['id'],
body={'user': kwargs},
token=self.token,
expected_status=expected_status)
def test_changing_password(self):
# original password works
token_id = self.get_request_token(self.user_ref['password'],
expected_status=201)
# original token works
old_token_auth = self.build_authentication_request(token=token_id)
self.v3_authenticate_token(old_token_auth, expected_status=201)
# change password
new_password = uuid.uuid4().hex
self.change_password(password=new_password,
original_password=self.user_ref['password'],
expected_status=204)
# old password fails
self.get_request_token(self.user_ref['password'],
expected_status=http_client.UNAUTHORIZED)
# old token fails
self.v3_authenticate_token(old_token_auth,
expected_status=http_client.NOT_FOUND)
# new password works
self.get_request_token(new_password, expected_status=201)
def test_changing_password_with_missing_original_password_fails(self):
r = self.change_password(password=uuid.uuid4().hex,
expected_status=http_client.BAD_REQUEST)
self.assertThat(r.result['error']['message'],
matchers.Contains('original_password'))
def test_changing_password_with_missing_password_fails(self):
r = self.change_password(original_password=self.user_ref['password'],
expected_status=http_client.BAD_REQUEST)
self.assertThat(r.result['error']['message'],
matchers.Contains('password'))
def test_changing_password_with_incorrect_password_fails(self):
self.change_password(password=uuid.uuid4().hex,
original_password=uuid.uuid4().hex,
expected_status=http_client.UNAUTHORIZED)
def test_changing_password_with_disabled_user_fails(self):
# disable the user account
self.user_ref['enabled'] = False
self.patch('/users/%s' % self.user_ref['id'],
body={'user': self.user_ref})
self.change_password(password=uuid.uuid4().hex,
original_password=self.user_ref['password'],
expected_status=http_client.UNAUTHORIZED)
def test_changing_password_not_logged(self):
# When a user changes their password, the password isn't logged at any
# level.
log_fix = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
# change password
new_password = uuid.uuid4().hex
self.change_password(password=new_password,
original_password=self.user_ref['password'],
expected_status=204)
self.assertNotIn(self.user_ref['password'], log_fix.output)
self.assertNotIn(new_password, log_fix.output)
|
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the \"License\"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an \"AS IS\" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gzip
import logging
import tarfile
import uuid
import xml.etree.cElementTree as elementTree
from xml.etree.cElementTree import Element
from io import StringIO
from io import BytesIO
from textwrap import dedent
import docker
import os
import yaml
from copy import copy
class Cluster(object):
"""
Base Cluster class. This is intended to be a generic interface
to different types of clusters. Clusters could be Kubernetes clusters,
Docker swarms, or cloud compute/container services.
"""
def deploy_flow(self, flow, name=None, vols=None):
"""
Deploys a flow to the cluster.
"""
def __enter__(self):
"""
Allocate ephemeral cluster resources.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Clean up ephemeral cluster resources.
"""
class SingleNodeDockerCluster(Cluster):
"""
A "cluster" which consists of a single docker node. Useful for
testing or use-cases which do not span multiple compute nodes.
"""
def __init__(self):
self.minifi_version = os.environ['MINIFI_VERSION']
self.nifi_version = '1.7.0'
self.minifi_root = '/opt/minifi/nifi-minifi-cpp-' + self.minifi_version
self.nifi_root = '/opt/nifi/nifi-' + self.nifi_version
self.network = None
self.containers = []
self.images = []
self.tmp_files = []
# Get docker client
self.client = docker.from_env()
def deploy_flow(self,
flow,
name=None,
vols=None,
engine='minifi-cpp'):
"""
Compiles the flow to a valid config file and overlays it into a new image.
"""
if vols is None:
vols = {}
logging.info('Deploying %s flow...%s', engine,name)
if name is None:
name = engine + '-' + str(uuid.uuid4())
logging.info('Flow name was not provided; using generated name \'%s\'', name)
# Create network if necessary
if self.network is None:
net_name = 'nifi-' + str(uuid.uuid4())
logging.info('Creating network: %s', net_name)
self.network = self.client.networks.create(net_name)
if engine == 'nifi':
self.deploy_nifi_flow(flow, name, vols)
elif engine == 'minifi-cpp':
self.deploy_minifi_cpp_flow(flow, name, vols)
else:
raise Exception('invalid flow engine: \'%s\'' % engine)
def deploy_minifi_cpp_flow(self, flow, name, vols):
# Build configured image
dockerfile = dedent("""FROM {base_image}
USER root
ADD config.yml {minifi_root}/conf/config.yml
RUN chown minificpp:minificpp {minifi_root}/conf/config.yml
USER minificpp
""".format(name=name,hostname=name,
base_image='apacheminificpp:' + self.minifi_version,
minifi_root=self.minifi_root))
test_flow_yaml = minifi_flow_yaml(flow)
logging.info('Using generated flow config yml:\n%s', test_flow_yaml)
conf_file_buffer = BytesIO()
try:
conf_file_buffer.write(test_flow_yaml.encode('utf-8'))
conf_file_len = conf_file_buffer.tell()
conf_file_buffer.seek(0)
context_files = [
{
'name': 'config.yml',
'size': conf_file_len,
'file_obj': conf_file_buffer
}
]
configured_image = self.build_image(dockerfile, context_files)
finally:
conf_file_buffer.close()
logging.info('Creating and running docker container for flow...')
container = self.client.containers.run(
configured_image[0],
detach=True,
name=name,
network=self.network.name,
volumes=vols)
logging.info('Started container \'%s\'', container.name)
self.containers.append(container)
def deploy_nifi_flow(self, flow, name, vols):
dockerfile = dedent("""FROM {base_image}
USER root
ADD flow.xml.gz {nifi_root}/conf/flow.xml.gz
RUN chown nifi:nifi {nifi_root}/conf/flow.xml.gz
RUN sed -i -e 's/^\(nifi.remote.input.host\)=.*/\\1={name}/' {nifi_root}/conf/nifi.properties
RUN sed -i -e 's/^\(nifi.remote.input.socket.port\)=.*/\\1=5000/' {nifi_root}/conf/nifi.properties
USER nifi
""".format(name=name,
base_image='apache/nifi:' + self.nifi_version,
nifi_root=self.nifi_root))
test_flow_xml = nifi_flow_xml(flow, self.nifi_version)
logging.info('Using generated flow config xml:\n%s', test_flow_xml)
conf_file_buffer = BytesIO()
try:
with gzip.GzipFile(mode='wb', fileobj=conf_file_buffer) as conf_gz_file_buffer:
conf_gz_file_buffer.write(test_flow_xml.encode())
conf_file_len = conf_file_buffer.tell()
conf_file_buffer.seek(0)
context_files = [
{
'name': 'flow.xml.gz',
'size': conf_file_len,
'file_obj': conf_file_buffer
}
]
configured_image = self.build_image(dockerfile, context_files)
finally:
conf_file_buffer.close()
logging.info('Creating and running docker container for flow...')
container = self.client.containers.run(
configured_image[0],
detach=True,
name=name,
hostname=name,
network=self.network.name,
volumes=vols)
logging.info('Started container \'%s\'', container.name)
self.containers.append(container)
def build_image(self, dockerfile, context_files):
conf_dockerfile_buffer = BytesIO()
docker_context_buffer = BytesIO()
try:
# Overlay conf onto base nifi image
conf_dockerfile_buffer.write(dockerfile.encode())
conf_dockerfile_buffer.seek(0)
with tarfile.open(mode='w', fileobj=docker_context_buffer) as docker_context:
dockerfile_info = tarfile.TarInfo('Dockerfile')
dockerfile_info.size = len(conf_dockerfile_buffer.getvalue())
docker_context.addfile(dockerfile_info,
fileobj=conf_dockerfile_buffer)
for context_file in context_files:
file_info = tarfile.TarInfo(context_file['name'])
file_info.size = context_file['size']
docker_context.addfile(file_info,
fileobj=context_file['file_obj'])
docker_context_buffer.seek(0)
logging.info('Creating configured image...')
configured_image = self.client.images.build(fileobj=docker_context_buffer,
custom_context=True,
rm=True,
forcerm=True)
self.images.append(configured_image)
finally:
conf_dockerfile_buffer.close()
docker_context_buffer.close()
return configured_image
def __enter__(self):
"""
Allocate ephemeral cluster resources.
"""
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Clean up ephemeral cluster resources
"""
# Clean up containers
for container in self.containers:
logging.info('Cleaning up container: %s', container.name)
container.remove(v=True, force=True)
# Clean up images
for image in self.images:
logging.info('Cleaning up image: %s', image[0].id)
self.client.images.remove(image[0].id, force=True)
# Clean up network
if self.network is not None:
logging.info('Cleaning up network network: %s', self.network.name)
self.network.remove()
# Clean up tmp files
for tmp_file in self.tmp_files:
os.remove(tmp_file)
class Connectable(object):
def __init__(self,
name=None,
auto_terminate=None):
self.uuid = uuid.uuid4()
if name is None:
self.name = str(self.uuid)
else:
self.name = name
if auto_terminate is None:
self.auto_terminate = []
else:
self.auto_terminate = auto_terminate
self.connections = {}
self.out_proc = self
def connect(self, connections):
for rel in connections:
# Ensure that rel is not auto-terminated
if rel in self.auto_terminate:
del self.auto_terminate[self.auto_terminate.index(rel)]
# Add to set of output connections for this rel
if rel not in self.connections:
self.connections[rel] = []
self.connections[rel].append(connections[rel])
return self
def __rshift__(self, other):
"""
Right shift operator to support flow DSL, for example:
GetFile('/input') >> LogAttribute() >> PutFile('/output')
"""
connected = copy(self)
connected.connections = copy(self.connections)
if self.out_proc is self:
connected.out_proc = connected
else:
connected.out_proc = copy(connected.out_proc)
if isinstance(other, tuple):
if isinstance(other[0], tuple):
for rel_tuple in other:
rel = {rel_tuple[0]: rel_tuple[1]}
connected.out_proc.connect(rel)
else:
rel = {other[0]: other[1]}
connected.out_proc.connect(rel)
else:
connected.out_proc.connect({'success': other})
connected.out_proc = other
return connected
class Processor(Connectable):
def __init__(self,
clazz,
properties=None,
schedule=None,
name=None,
controller_services=None,
auto_terminate=None):
super(Processor, self).__init__(name=name,
auto_terminate=auto_terminate)
if controller_services is None:
controller_services = []
if schedule is None:
schedule = {}
if properties is None:
properties = {}
if name is None:
pass
self.clazz = clazz
self.properties = properties
self.controller_services = controller_services
self.schedule = {
'scheduling strategy': 'EVENT_DRIVEN',
'scheduling period': '1 sec',
'penalization period': '30 sec',
'yield period': '1 sec',
'run duration nanos': 0
}
self.schedule.update(schedule)
def nifi_property_key(self, key):
"""
Returns the Apache NiFi-equivalent property key for the given key. This is often, but not always, the same as
the internal key.
"""
return key
class InvokeHTTP(Processor):
def __init__(self, url,
method='GET',
ssl_context_service=None):
properties = {'Remote URL': url, 'HTTP Method': method}
controller_services = []
if ssl_context_service is not None:
properties['SSL Context Service'] = ssl_context_service.name
controller_services.append(ssl_context_service)
super(InvokeHTTP, self).__init__('InvokeHTTP',
properties=properties,
controller_services=controller_services,
auto_terminate=['success',
'response',
'retry',
'failure',
'no retry'])
class ListenHTTP(Processor):
def __init__(self, port, cert=None):
properties = {'Listening Port': port}
if cert is not None:
properties['SSL Certificate'] = cert
properties['SSL Verify Peer'] = 'no'
super(ListenHTTP, self).__init__('ListenHTTP',
properties=properties,
auto_terminate=['success'])
class LogAttribute(Processor):
def __init__(self, ):
super(LogAttribute, self).__init__('LogAttribute',
auto_terminate=['success'])
class DebugFlow(Processor):
def __init__(self, ):
super(DebugFlow, self).__init__('DebugFlow')
class AttributesToJSON(Processor):
def __init__(self, destination, attributes):
super(AttributesToJSON, self).__init__('AttributesToJSON',
properties={'Destination': destination, 'Attributes List': attributes},
schedule={'scheduling period': '0 sec'},
auto_terminate=['failure'])
class GetFile(Processor):
def __init__(self, input_dir):
super(GetFile, self).__init__('GetFile',
properties={'Input Directory': input_dir, 'Keep Source File': 'true'},
schedule={'scheduling period': '2 sec'},
auto_terminate=['success'])
class GenerateFlowFile(Processor):
def __init__(self, file_size):
super(GenerateFlowFile, self).__init__('GenerateFlowFile',
properties={'File Size': file_size},
schedule={'scheduling period': '0 sec'},
auto_terminate=['success'])
class PutFile(Processor):
def __init__(self, output_dir):
super(PutFile, self).__init__('PutFile',
properties={'Directory': output_dir},
auto_terminate=['success', 'failure'])
def nifi_property_key(self, key):
if key == 'Output Directory':
return 'Directory'
else:
return key
class InputPort(Connectable):
def __init__(self, name=None, remote_process_group=None):
super(InputPort, self).__init__(name=name)
self.remote_process_group = remote_process_group
class RemoteProcessGroup(object):
def __init__(self, url,
name=None):
self.uuid = uuid.uuid4()
if name is None:
self.name = str(self.uuid)
else:
self.name = name
self.url = url
class ControllerService(object):
def __init__(self, name=None, properties=None):
self.id = str(uuid.uuid4())
if name is None:
self.name = str(uuid.uuid4())
logging.info('Controller service name was not provided; using generated name \'%s\'', self.name)
else:
self.name = name
if properties is None:
properties = {}
self.properties = properties
class SSLContextService(ControllerService):
def __init__(self, name=None, cert=None, key=None, ca_cert=None):
super(SSLContextService, self).__init__(name=name)
self.service_class = 'SSLContextService'
if cert is not None:
self.properties['Client Certificate'] = cert
if key is not None:
self.properties['Private Key'] = key
if ca_cert is not None:
self.properties['CA Certificate'] = ca_cert
def minifi_flow_yaml(connectable, root=None, visited=None):
if visited is None:
visited = []
if root is None:
res = {
'Flow Controller': {
'name': 'MiNiFi Flow'
},
'Processors': [],
'Connections': [],
'Remote Processing Groups': [],
'Controller Services': []
}
else:
res = root
visited.append(connectable)
if hasattr(connectable, 'name'):
connectable_name = connectable.name
else:
connectable_name = str(connectable.uuid)
if isinstance(connectable, InputPort):
group = connectable.remote_process_group
res_group = None
for res_group_candidate in res['Remote Processing Groups']:
assert isinstance(res_group_candidate, dict)
if res_group_candidate['id'] == str(group.uuid):
res_group = res_group_candidate
if res_group is None:
res_group = {
'name': group.name,
'id': str(group.uuid),
'url': group.url,
'timeout': '30 sec',
'yield period': '10 sec',
'Input Ports': []
}
res['Remote Processing Groups'].append(res_group)
res_group['Input Ports'].append({
'id': str(connectable.uuid),
'name': connectable.name,
'max concurrent tasks': 1,
'Properties': {}
})
if isinstance(connectable, Processor):
res['Processors'].append({
'name': connectable_name,
'id': str(connectable.uuid),
'class': 'org.apache.nifi.processors.standard.' + connectable.clazz,
'scheduling strategy': connectable.schedule['scheduling strategy'],
'scheduling period': connectable.schedule['scheduling period'],
'penalization period': connectable.schedule['penalization period'],
'yield period': connectable.schedule['yield period'],
'run duration nanos': connectable.schedule['run duration nanos'],
'Properties': connectable.properties,
'auto-terminated relationships list': connectable.auto_terminate
})
for svc in connectable.controller_services:
if svc in visited:
continue
visited.append(svc)
res['Controller Services'].append({
'name': svc.name,
'id': svc.id,
'class': svc.service_class,
'Properties': svc.properties
})
for conn_name in connectable.connections:
conn_procs = connectable.connections[conn_name]
if isinstance(conn_procs, list):
for proc in conn_procs:
res['Connections'].append({
'name': str(uuid.uuid4()),
'source id': str(connectable.uuid),
'source relationship name': conn_name,
'destination id': str(proc.uuid)
})
if proc not in visited:
minifi_flow_yaml(proc, res, visited)
else:
res['Connections'].append({
'name': str(uuid.uuid4()),
'source id': str(connectable.uuid),
'source relationship name': conn_name,
'destination id': str(conn_procs.uuid)
})
if conn_procs not in visited:
minifi_flow_yaml(conn_procs, res, visited)
if root is None:
return yaml.dump(res, default_flow_style=False)
def nifi_flow_xml(connectable, nifi_version=None, root=None, visited=None):
if visited is None:
visited = []
position = Element('position')
position.set('x', '0.0')
position.set('y', '0.0')
comment = Element('comment')
styles = Element('styles')
bend_points = Element('bendPoints')
label_index = Element('labelIndex')
label_index.text = '1'
z_index = Element('zIndex')
z_index.text = '0'
if root is None:
res = Element('flowController')
max_timer_driven_thread_count = Element('maxTimerDrivenThreadCount')
max_timer_driven_thread_count.text = '10'
res.append(max_timer_driven_thread_count)
max_event_driven_thread_count = Element('maxEventDrivenThreadCount')
max_event_driven_thread_count.text = '5'
res.append(max_event_driven_thread_count)
root_group = Element('rootGroup')
root_group_id = Element('id')
root_group_id_text = str(uuid.uuid4())
root_group_id.text = root_group_id_text
root_group.append(root_group_id)
root_group_name = Element('name')
root_group_name.text = root_group_id_text
root_group.append(root_group_name)
res.append(root_group)
root_group.append(position)
root_group.append(comment)
res.append(Element('controllerServices'))
res.append(Element('reportingTasks'))
res.set('encoding-version', '1.2')
else:
res = root
visited.append(connectable)
if hasattr(connectable, 'name'):
connectable_name_text = connectable.name
else:
connectable_name_text = str(connectable.uuid)
if isinstance(connectable, InputPort):
input_port = Element('inputPort')
input_port_id = Element('id')
input_port_id.text = str(connectable.uuid)
input_port.append(input_port_id)
input_port_name = Element('name')
input_port_name.text = connectable_name_text
input_port.append(input_port_name)
input_port.append(position)
input_port.append(comment)
input_port_scheduled_state = Element('scheduledState')
input_port_scheduled_state.text = 'RUNNING'
input_port.append(input_port_scheduled_state)
input_port_max_concurrent_tasks = Element('maxConcurrentTasks')
input_port_max_concurrent_tasks.text = '1'
input_port.append(input_port_max_concurrent_tasks)
next( res.iterfind('rootGroup') ).append(input_port)
if isinstance(connectable, Processor):
conn_destination = Element('processor')
proc_id = Element('id')
proc_id.text = str(connectable.uuid)
conn_destination.append(proc_id)
proc_name = Element('name')
proc_name.text = connectable_name_text
conn_destination.append(proc_name)
conn_destination.append(position)
conn_destination.append(styles)
conn_destination.append(comment)
proc_class = Element('class')
proc_class.text = 'org.apache.nifi.processors.standard.' + connectable.clazz
conn_destination.append(proc_class)
proc_bundle = Element('bundle')
proc_bundle_group = Element('group')
proc_bundle_group.text = 'org.apache.nifi'
proc_bundle.append(proc_bundle_group)
proc_bundle_artifact = Element('artifact')
proc_bundle_artifact.text = 'nifi-standard-nar'
proc_bundle.append(proc_bundle_artifact)
proc_bundle_version = Element('version')
proc_bundle_version.text = nifi_version
proc_bundle.append(proc_bundle_version)
conn_destination.append(proc_bundle)
proc_max_concurrent_tasks = Element('maxConcurrentTasks')
proc_max_concurrent_tasks.text = '1'
conn_destination.append(proc_max_concurrent_tasks)
proc_scheduling_period = Element('schedulingPeriod')
proc_scheduling_period.text = connectable.schedule['scheduling period']
conn_destination.append(proc_scheduling_period)
proc_penalization_period = Element('penalizationPeriod')
proc_penalization_period.text = connectable.schedule['penalization period']
conn_destination.append(proc_penalization_period)
proc_yield_period = Element('yieldPeriod')
proc_yield_period.text = connectable.schedule['yield period']
conn_destination.append(proc_yield_period)
proc_bulletin_level = Element('bulletinLevel')
proc_bulletin_level.text = 'WARN'
conn_destination.append(proc_bulletin_level)
proc_loss_tolerant = Element('lossTolerant')
proc_loss_tolerant.text = 'false'
conn_destination.append(proc_loss_tolerant)
proc_scheduled_state = Element('scheduledState')
proc_scheduled_state.text = 'RUNNING'
conn_destination.append(proc_scheduled_state)
proc_scheduling_strategy = Element('schedulingStrategy')
proc_scheduling_strategy.text = connectable.schedule['scheduling strategy']
conn_destination.append(proc_scheduling_strategy)
proc_execution_node = Element('executionNode')
proc_execution_node.text = 'ALL'
conn_destination.append(proc_execution_node)
proc_run_duration_nanos = Element('runDurationNanos')
proc_run_duration_nanos.text = str(connectable.schedule['run duration nanos'])
conn_destination.append(proc_run_duration_nanos)
for property_key, property_value in connectable.properties.items():
proc_property = Element('property')
proc_property_name = Element('name')
proc_property_name.text = connectable.nifi_property_key(property_key)
proc_property.append(proc_property_name)
proc_property_value = Element('value')
proc_property_value.text = property_value
proc_property.append(proc_property_value)
conn_destination.append(proc_property)
for auto_terminate_rel in connectable.auto_terminate:
proc_auto_terminated_relationship = Element('autoTerminatedRelationship')
proc_auto_terminated_relationship.text = auto_terminate_rel
conn_destination.append(proc_auto_terminated_relationship)
next( res.iterfind('rootGroup') ).append(conn_destination)
""" res.iterfind('rootGroup').next().append(conn_destination) """
for svc in connectable.controller_services:
if svc in visited:
continue
visited.append(svc)
controller_service = Element('controllerService')
controller_service_id = Element('id')
controller_service_id.text = str(svc.id)
controller_service.append(controller_service_id)
controller_service_name = Element('name')
controller_service_name.text = svc.name
controller_service.append(controller_service_name)
controller_service.append(comment)
controller_service_class = Element('class')
controller_service_class.text = svc.service_class,
controller_service.append(controller_service_class)
controller_service_bundle = Element('bundle')
controller_service_bundle_group = Element('group')
controller_service_bundle_group.text = svc.group
controller_service_bundle.append(controller_service_bundle_group)
controller_service_bundle_artifact = Element('artifact')
controller_service_bundle_artifact.text = svc.artifact
controller_service_bundle.append(controller_service_bundle_artifact)
controller_service_bundle_version = Element('version')
controller_service_bundle_version.text = nifi_version
controller_service_bundle.append(controller_service_bundle_version)
controller_service.append(controller_service_bundle)
controller_enabled = Element('enabled')
controller_enabled.text = 'true',
controller_service.append(controller_enabled)
for property_key, property_value in svc.properties:
controller_service_property = Element('property')
controller_service_property_name = Element('name')
controller_service_property_name.text = property_key
controller_service_property.append(controller_service_property_name)
controller_service_property_value = Element('value')
controller_service_property_value.text = property_value
controller_service_property.append(controller_service_property_value)
controller_service.append(controller_service_property)
next( res.iterfind('rootGroup') ).append(controller_service)
""" res.iterfind('rootGroup').next().append(controller_service)"""
for conn_name in connectable.connections:
conn_destinations = connectable.connections[conn_name]
if isinstance(conn_destinations, list):
for conn_destination in conn_destinations:
connection = nifi_flow_xml_connection(res,
bend_points,
conn_name,
connectable,
label_index,
conn_destination,
z_index)
next( res.iterfind('rootGroup') ).append(connection)
""" res.iterfind('rootGroup').next().append(connection) """
if conn_destination not in visited:
nifi_flow_xml(conn_destination, nifi_version, res, visited)
else:
connection = nifi_flow_xml_connection(res,
bend_points,
conn_name,
connectable,
label_index,
conn_destinations,
z_index)
next( res.iterfind('rootGroup') ).append(connection)
""" res.iterfind('rootGroup').next().append(connection) """
if conn_destinations not in visited:
nifi_flow_xml(conn_destinations, nifi_version, res, visited)
if root is None:
return ('<?xml version="1.0" encoding="UTF-8" standalone="no"?>'
+ "\n"
+ elementTree.tostring(res, encoding='utf-8').decode('utf-8'))
def nifi_flow_xml_connection(res, bend_points, conn_name, connectable, label_index, destination, z_index):
connection = Element('connection')
connection_id = Element('id')
connection_id.text = str(uuid.uuid4())
connection.append(connection_id)
connection_name = Element('name')
connection.append(connection_name)
connection.append(bend_points)
connection.append(label_index)
connection.append(z_index)
connection_source_id = Element('sourceId')
connection_source_id.text = str(connectable.uuid)
connection.append(connection_source_id)
connection_source_group_id = Element('sourceGroupId')
connection_source_group_id.text = next( res.iterfind('rootGroup/id') ).text
"""connection_source_group_id.text = res.iterfind('rootGroup/id').next().text"""
connection.append(connection_source_group_id)
connection_source_type = Element('sourceType')
if isinstance(connectable, Processor):
connection_source_type.text = 'PROCESSOR'
elif isinstance(connectable, InputPort):
connection_source_type.text = 'INPUT_PORT'
else:
raise Exception('Unexpected source type: %s' % type(connectable))
connection.append(connection_source_type)
connection_destination_id = Element('destinationId')
connection_destination_id.text = str(destination.uuid)
connection.append(connection_destination_id)
connection_destination_group_id = Element('destinationGroupId')
connection_destination_group_id.text = next(res.iterfind('rootGroup/id')).text
""" connection_destination_group_id.text = res.iterfind('rootGroup/id').next().text """
connection.append(connection_destination_group_id)
connection_destination_type = Element('destinationType')
if isinstance(destination, Processor):
connection_destination_type.text = 'PROCESSOR'
elif isinstance(destination, InputPort):
connection_destination_type.text = 'INPUT_PORT'
else:
raise Exception('Unexpected destination type: %s' % type(destination))
connection.append(connection_destination_type)
connection_relationship = Element('relationship')
if not isinstance(connectable, InputPort):
connection_relationship.text = conn_name
connection.append(connection_relationship)
connection_max_work_queue_size = Element('maxWorkQueueSize')
connection_max_work_queue_size.text = '10000'
connection.append(connection_max_work_queue_size)
connection_max_work_queue_data_size = Element('maxWorkQueueDataSize')
connection_max_work_queue_data_size.text = '1 GB'
connection.append(connection_max_work_queue_data_size)
connection_flow_file_expiration = Element('flowFileExpiration')
connection_flow_file_expiration.text = '0 sec'
connection.append(connection_flow_file_expiration)
return connection
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license (see the COPYING file).
""" QiBuild """
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import pytest
import qisrc.review
import qisrc.manifest
def test_simple_read(tmpdir):
""" Test Simple Read """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<repo project="foo/bar.git" src="lib/bar" branch="next"
remotes="origin" />
</manifest>
""")
manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
assert len(manifest.repos) == 1
bar1 = manifest.repos[0]
assert bar1.src == "lib/bar"
assert bar1.clone_url == "[email protected]:foo/bar.git"
assert bar1.default_branch == "next"
def test_src_are_unique(tmpdir):
""" Test Src Are Unique """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<repo project="foo/bar.git" src="lib/bar" branch="next"
remotes="origin" />
<repo project="bar/bar.git" src="lib/bar" branch="next"
remotes="origin" />
</manifest>
""")
with pytest.raises(qisrc.manifest.ManifestError) as e:
qisrc.manifest.Manifest(manifest_xml.strpath)
assert "Found two projects sharing the same sources" in str(e.value)
def test_projects_are_unique(tmpdir):
""" Test Projects Are Unique """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<repo project="foo/bar.git" src="bar" remotes="origin" />
<repo project="foo/bar.git" src="bar2" remotes="origin" />
</manifest>
""")
with pytest.raises(qisrc.manifest.ManifestError) as e:
qisrc.manifest.Manifest(manifest_xml.strpath)
assert "foo/bar.git found twice" in str(e.value)
def test_empty_src(tmpdir):
""" Test Empty Src """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<repo project="foo/bar.git" branch="master" remotes="origin" />
</manifest>
""")
manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
bar1 = manifest.repos[0]
assert bar1.src == "foo/bar"
def test_no_remotes_attr(tmpdir):
""" Test No Remote Attr """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<repo project="foo/bar.git" src="lib/bar"/>
</manifest>
""")
with pytest.raises(qisrc.manifest.ManifestError) as e:
qisrc.manifest.Manifest(manifest_xml.strpath)
assert str(e.value) == "Missing 'remotes' attribute"
def test_several_reviews(tmpdir):
""" Test Several Reviews """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="review1" url="[email protected]" review="true" />
<remote name="review2" url="[email protected]" review="true" />
</manifest>
""")
with pytest.raises(qisrc.manifest.ManifestError) as e:
qisrc.manifest.Manifest(manifest_xml.strpath)
assert "Only one" in str(e.value)
def test_no_matching_remote(tmpdir):
""" Test No Matching Remote """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<repo project="foo/bar.git" src="lib/bar" remotes="invalid" />
</manifest>
""")
with pytest.raises(qisrc.manifest.ManifestError) as e:
qisrc.manifest.Manifest(manifest_xml.strpath)
assert str(e.value) == "No matching remote: invalid for repo foo/bar.git"
def test_repo_branch(tmpdir):
""" Test Repo Branch """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<repo project="bar.git" remotes="origin" />
<repo project="foo.git" branch="devel" remotes="origin" />
</manifest>
""")
manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
bar1 = manifest.repos[0]
foo1 = manifest.repos[1]
assert bar1.default_branch == "master"
assert foo1.default_branch == "devel"
def test_remote_branch(tmpdir):
""" Test Remote Branch """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" default_branch="release" />
<repo project="bar.git" remotes="origin" />
</manifest>
""")
manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
bar1 = manifest.repos[0]
assert bar1.default_branch == "release"
def test_invalid_group(tmpdir):
""" Test Invalid Group """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<repo project="foo.git" remotes="origin" />
<groups>
<group name="foo-group">
<project name="foo.git" />
<project name="bar.git" />
</group>
</groups>
</manifest>
""")
manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
with pytest.raises(qisrc.manifest.ManifestError) as e:
manifest.get_repos(groups=["foo-group"])
assert "foo-group" in str(e.value)
assert "bar.git" in str(e.value)
with pytest.raises(qisrc.manifest.ManifestError) as e:
manifest.get_repos(groups=["mygroup"])
assert "No such group: mygroup" in str(e.value)
def test_review_projects(tmpdir):
""" Test Review Project """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<remote name="gerrit" url="http://gerrit:8080" review="true" />
<repo project="foo/bar.git" src="lib/bar" remotes="gerrit" />
</manifest>
""")
manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
assert len(manifest.repos) == 1
bar1 = manifest.repos[0]
assert bar1.src == "lib/bar"
assert bar1.clone_url == "http://gerrit:8080/foo/bar.git"
assert bar1.review is True
def test_review_projects_with_two_remotes(tmpdir):
""" Test Review Projects With Two Remotes """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<remote name="gerrit" url="http://gerrit:8080" review="true" />
<repo project="foo/bar.git" src="lib/bar" remotes="origin gerrit" />
</manifest>
""")
manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
assert len(manifest.repos) == 1
bar1 = manifest.repos[0]
assert bar1.src == "lib/bar"
assert len(bar1.remotes) == 2
origin_remote = bar1.remotes[0]
gerrit_remote = bar1.remotes[1]
assert origin_remote.name == "origin"
assert gerrit_remote.name == "gerrit"
assert gerrit_remote.review is True
assert bar1.review_remote == gerrit_remote
assert bar1.review is True
assert bar1.default_remote.name == "origin"
def test_no_review(tmpdir):
""" Test No Review """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<remote name="gerrit" url="http://gerrit:8080" review="true" />
<repo project="foo/bar.git" src="lib/bar" remotes="origin gerrit" />
</manifest>
""")
manifest = qisrc.manifest.Manifest(manifest_xml.strpath, review=False)
assert len(manifest.repos) == 1
[repo] = manifest.repos
assert repo.review is False
assert repo.default_remote.name == "origin"
assert len(repo.remotes) == 1
[remote] = repo.remotes
assert remote.name == "origin"
assert remote.review is False
def test_default_remote(tmpdir):
""" Test Default Remote """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<remote name="gerrit" url="http://gerrit:8080" review="true" />
<repo project="foo.git" src="foo" remotes="origin gerrit"
default_remote="gerrit" />
<repo project="bar.git" src="bar" remotes="origin gerrit" />
<repo project="baz.git" src="baz" remotes="origin" />
</manifest>
""")
manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
assert manifest.get_repo("foo.git").default_remote.name == "gerrit"
assert manifest.get_repo("bar.git").default_remote.name == "origin"
assert manifest.get_repo("baz.git").default_remote.name == "origin"
def test_groups(tmpdir):
""" Test Groups """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<repo project="qi/libqi.git" remotes="origin" />
<repo project="qi/libqimessaging.git" remotes="origin" />
<repo project="qi/naoqi.git" remotes="origin" />
<groups>
<group name="qim">
<project name="qi/libqi.git" />
<project name="qi/libqimessaging.git" />
</group>
</groups>
</manifest>
""")
manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
git_projects = manifest.get_repos(groups=["qim"])
assert len(git_projects) == 2
assert git_projects[0].clone_url == "[email protected]:qi/libqi.git"
assert git_projects[1].clone_url == "[email protected]:qi/libqimessaging.git"
def test_default_group(tmpdir):
""" Test Default Group """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<repo project="a.git" remotes="origin" />
<repo project="b.git" remotes="origin" />
<repo project="c.git" remotes="origin" />
<groups>
<group name="a_group" default="true" >
<project name="a.git" />
<project name="b.git" />
</group>
</groups>
</manifest>
""")
manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
git_projects = manifest.get_repos()
assert len(git_projects) == 2
def test_default_branch(tmpdir):
""" Test Default Branch """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<branch default="devel" />
<repo project="foo/bar.git" src="lib/bar" remotes="origin" />
<repo project="foo/foo.git" src="lib/foo" remotes="origin" branch="tutu" />
</manifest>
""")
manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
assert len(manifest.repos) == 2
assert manifest.default_branch == "devel"
bar1 = manifest.repos[0]
assert bar1.default_branch == "devel"
foo1 = manifest.repos[1]
assert foo1.default_branch == "tutu"
def test_multiple_remotes(tmpdir):
""" Test Multiple Remotes """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<repo project="foo/bar.git" src="lib/bar" remotes="origin">
<upstream name="kernel-lts" url="git.kernel.org" />
</repo>
</manifest>
""")
manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
assert len(manifest.repos) == 1
foo1 = manifest.repos[0]
assert len(foo1.remotes) == 2
def test_fixed_ref(tmpdir):
""" Test Fixed Ref """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<repo project="foo/bar.git"
src="lib/bar"
remotes="origin"
ref="v0.1" />
</manifest>
""")
manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
foo1 = manifest.repos[0]
assert foo1.default_branch is None
assert foo1.fixed_ref == "v0.1"
def test_fixed_ref_and_branch_are_exclusive(tmpdir):
""" Test Fixed Ref And Branch are Exclusive """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<repo project="foo/bar.git"
src="lib/bar"
remotes="origin"
ref="v0.1"
branch="master" />
</manifest>
""")
with pytest.raises(Exception)as e:
qisrc.manifest.Manifest(manifest_xml.strpath)
assert "'branch' and 'ref' are mutually exclusive" in e.value.args[0]
def test_from_git_repo(git_server):
""" Test From Git Repo """
git_server.create_repo("foo")
git_server.switch_manifest_branch("devel")
git_server.create_repo("bar")
manifest_repo = git_server.root.join("src", "manifest").strpath
manifest = qisrc.manifest.from_git_repo(manifest_repo, "master")
assert len(manifest.repos) == 1
manifest = qisrc.manifest.from_git_repo(manifest_repo, "devel")
assert len(manifest.repos) == 2
def test_all_repos(tmpdir):
""" Test All Repo """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<repo project="a.git" remotes="origin" />
<repo project="b.git" remotes="origin" />
<repo project="c.git" remotes="origin" />
<groups>
<group name="a_group" default="true" >
<project name="a.git" />
<project name="b.git" />
</group>
</groups>
</manifest>
""")
manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
git_projects = manifest.get_repos(get_all=True)
assert len(git_projects) == 3
def test_import_parser(tmpdir):
""" Test Import Parser """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<import manifest="a.git" remotes="origin" />
<import manifest="b.git" remotes="origin" />
</manifest>
""")
manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
import_manifest = manifest.import_manifest
assert len(import_manifest) == 2
assert len(import_manifest[0].remote_names) == 1
assert import_manifest[0].default_remote_name == "origin"
assert import_manifest[0].remotes[0].url == "[email protected]:a.git"
def test_import_parser_error_manifest(tmpdir):
""" Test Import Parser Error Manifest """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<import remotes="origin" />
</manifest>
""")
with pytest.raises(Exception)as e:
_manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
assert "Missing 'manifest' attribute" in e.value.args[0]
def test_import_parser_error_remote_empty(tmpdir):
""" Test Import Parser Error Remote Empty """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<import manifest="a.git" remotes="" />
</manifest>
""")
with pytest.raises(Exception)as e:
_manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
assert "Empty 'remotes' attribute" in e.value.args[0]
def test_import_parser_error_remote(tmpdir):
""" Test Import Parser Error Remote """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<remote name="origin" url="[email protected]" />
<import manifest="a.git"/>
</manifest>
""")
with pytest.raises(Exception)as e:
_manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
assert "Missing 'remotes' attribute" in e.value.args[0]
def test_import_parser_error_remote_missing(tmpdir):
""" Test Import Parsder Error Remote Missing """
manifest_xml = tmpdir.join("manifest.xml")
manifest_xml.write(""" \
<manifest>
<import manifest="a.git" remotes="origin" />
</manifest>
""")
with pytest.raises(Exception)as e:
_manifest = qisrc.manifest.Manifest(manifest_xml.strpath)
assert "No matching remote: origin for repo a.git" in e.value.args[0]
|
|
'''
Technical analysis with popular indicators
'''
import numpy as np
import pandas as pd
import json
import time
import pandas.io.data as web
from datetime import date, datetime, timedelta
from collections import defaultdict
start = datetime(2010, 1, 1)
end = date.today()
df1 = pd.read_csv('data/companylist.csv')
df2 = pd.read_csv('data/companylist1.csv')
df3 = pd.read_csv('data/companylist2.csv')
c = web.DataReader("F", 'yahoo', start, end)
symbols = np.append(df1.Symbol.values, df2.Symbol.values)
symbols = np.append(symbols, df3.Symbol.values)
prev_er_date = date.today() - timedelta(days = 98)
current_er_date = date.today() - timedelta(days = 10)
symbol = 'AAPL'
class tech_analysis(object):
def __init__(self,symbol, prev_er_date, current_er_date):
self.data = web.DataReader(symbol, 'yahoo', prev_er_date + timedelta(days = 1), current_er_date)
self.prev_er_date = prev_er_date + timedelta(days = 1)
self.current_er_date = current_er_date
def on_balance_volume(self):
'''start_date is the date after the previous earning report and
end_date is the date before earning report'''
data = web.DataReader("AAPL", 'yahoo', self.prev_er_date, self.current_er_date)
df = data
# start_date = self.prev_er_date + timedelta(days = 1)
# end_date = self.current_er_date - timedelta(days = 1)
# a = self.data.loc[start_date]
# df = self.data.reset_index()
# df = df[df['Date']<= end_date][df['Date']>= start_date]
# df = df.loc[lambda df1: df1.Date > start_date and df1.Date < end_date, :]
prev_obv = 0
p_price = 0
for i, value in df.iterrows():
if value['Close'] > p_price:
current_obv = prev_obv + value['Volume']
elif value['Close'] < p_price:
current_obv = prev_obv - value['Volume']
else:
current_obv = prev_obv
p_price = value['Close']
return current_obv
def accumulation_distribution(self):
'''
There are three steps to calculating the Accumulation Distribution Line (ADL).
First, calculate the Money Flow Multiplier.
Second, multiply this value by volume to find the Money Flow Volume.
Third, create a running total of Money Flow Volume to form the Accumulation Distribution Line (ADL).
'''
money_flow_multiplier_day = (self.data.iloc[-1]['Close']-self.data.iloc[-1]['Low'] - (self.data.iloc[-1]['High']-self.data.iloc[-1]['Close'] ))/(self.data.iloc[-1]['High']-self.data.iloc[-1]['Low'])
money_flow_multiplier_week = (self.data.iloc[-1]['Close']-min(self.data['Low'][-5:]) - (max(self.data['High'][-5:])-self.data.iloc[-1]['Close'] ))/(max(self.data['High'][-5:])-min(self.data['Low'][-5:]))
money_flow_multiplier_biweek = (self.data.iloc[-1]['Close']-min(self.data['Low'][-10:]) - (max(self.data['High'][-10:])-self.data.iloc[-1]['Close'] ))/(max(self.data['High'][-10:])-min(self.data['Low'][-10:]))
money_flow_multiplier_quarter = (self.data.iloc[-1]['Close']-min(self.data['Low']) - (max(self.data['High'])-self.data.iloc[-1]['Close'] ))/(max(self.data['High'])-min(self.data['Low']))
money_flow_vol = None
ADL = None
prev_ADL = 0
return money_flow_multiplier_day, money_flow_multiplier_week, money_flow_multiplier_biweek, money_flow_multiplier_quarter
def avg_true_range(self):
'''
Typically, the Average True Range (ATR) is based on 14 periods and
can be calculated on an intraday, daily, weekly or monthly basis.
For this example, the ATR will be based on daily data.
Because there must be a beginning, the first TR value is simply the High minus the Low,
and the first 14-day ATR is the average of the daily TR values for the last 14 days.
After that, Wilder sought to smooth the data by incorporating the previous period's ATR value.
'''
data_len = self.data.shape[0]
self.data.iloc[-15]['High'], self.data.iloc[-15]['Low'], self.data.iloc[-15]['Close']
TRs = []
pos_DMs = []
neg_DMs = []
DXs = []
for i in xrange(1,data_len):
high = self.data.iloc[i]['High']
low = self.data.iloc[i]['Low']
prev_high = self.data.iloc[i-1]['High']
prev_close = self.data.iloc[i-1]['Close']
prev_low = self.data.iloc[i-1]['Low']
pos_DM1 = max(high-prev_high, 0) if (high-prev_high) > (prev_low - low) else 0
neg_DM1 = max(prev_low - low, 0) if (prev_low - low) > (high - prev_high) else 0
TR = max(high-low, abs(high - prev_close), abs(low - prev_close))
TRs.append(TR)
pos_DMs.append(pos_DM1)
neg_DMs.append(neg_DM1)
if i > 13:
TR14 = sum(TRs[i-14:])
pos_DM14 = sum(pos_DMs[i-14:])
neg_Dm14 = sum(neg_DMs[i-14:])
pos_DI14 = 100*pos_DM14/TR14
neg_DI14 = 100*neg_DM14/TR14
DI14_diff = abs(pos_DI14 - neg_DI14)
DI14_sum = (pos_DI14 + neg_DI14)
DX = 100*DI14_diff/DI14_sum
DXs.append(DX)
if i > 26:
ADX = np.mean(DXs[i-14:])
return ADX[-1]
def aroon_indicator(self, days_high = 25):
'''
days_high = 25
The Aroon osciallatro is a technical indicator used to measure if a security is in a trend,
and the magnitude of that trend. The indicator can also be used to identify when a new trend is set to begin.
The indicator is comprised of two lines: an Aroon-up line and an Aroon-down line.
A security is considered to be in an uptrend when the Aroon-up line is above 70, along with being above the Aroon-down line.
The security is in a downtrend when the Aroon-down line is above 70 and also above the Aroon-up line.
'''
data_len = self.data.shape[0]
prev_high_ix = np.argmax(self.data['High'][:days_high+1])
prev_high = max(self.data['High'][:days_high])
prev_low_ix = np.argmin(self.data['Low'][:days_high+1])
prev_low = min(self.data['Low'][:days_high])
aroon_ups = []
aroon_downs = []
for i in xrange(days_high, data_len):
if (self.data['High'][i] > prev_high) :
prev_high_ix = i
prev_high = self.data['High'][i]
elif i - prev_high_ix > days_high:
prev_high_ix += np.argmax(self.data['High'][i-days_high:i+1])
prev_high = max(self.data['High'][i-days_high:i+1])
if (self.data['Low'][i] < prev_low):
prev_low_ix = i
prev_low = self.data['Low'][i]
elif i - prev_low_ix > days_high:
prev_low_ix += np.argmin(self.data['Low'][i-days_high:i+1])
prev_low = min(self.data['Low'][i-days_high:i+1])
aroon_up = ((days_high - (i-prev_high_ix))/float(days_high))*100
aroon_down = ((days_high - (i-prev_low_ix))/float(days_high))*100
aroon_ups.append(aroon_up)
aroon_downs.append(aroon_down)
return aroon_ups, aroon_downs
def MACD(self, EMA1_ = 12, EMA2_ = 26):
'''
Moving average convergence divergence (MACD) is a trend-following momentum indicator that shows the relationship between two moving averages of prices.
The MACD is calculated by subtracting the 26-day exponential moving average (EMA) from the 12-day EMA.
A nine-day EMA of the MACD, called the "signal line", is then plotted on top of the MACD, functioning as a trigger for buy and sell signals.
'''
EMA1 = self.EMA_(period = EMA1_)
EMA2 = self.EMA_(period = EMA2_)
MACDs = []
for i in xrange(len(EMA2)):
MACD = EMA1[EMA2_ - EMA1_ + i] - EMA2[i]
MACDs.append(MACD)
signals = self.EMA_(period = 9, data = MACDs)
return MACDs, signals
def EMA_(self,period = 10, data = self.data['Close']):
SMA = sum(data[:period])/float(period)
mult = (2 / float(period + 1) )
EMA = SMA
EMAs = [EMA]
for i in xrange(period+1, len(data['Close'])+1):
SMA = sum(data['Close'][i-period:i])/float(period)
EMA = (data['Close'][i-1] - EMA) * mult + EMA
EMAs.append(EMA)
return EMAs
def SMA_(self,period = 10, data =self.data['Close']):
SMAs = []
for i in xrange(period, len(data)):
SMA = sum(data[i-period:i])/float(period)
SMAs.append(SMA)
return SMAs
def RSI(self,period = 14):
'''
Relative Strength Index (RSI) is an extremely popular momentum indicator that has been featured in a number of articles,
interviews and books over the years. In particular, Constance Brown's book,
Technical Analysis for the Trading Professional, features the concept of bull market and bear market ranges for RSI.
Andrew Cardwell, Brown's RSI mentor, introduced positive and negative reversals for RSI.
In addition, Cardwell turned the notion of divergence, literally and figuratively, on its head.
'''
gains = []
losses = []
avg_gains = []
avg_losses = []
RSs = []
RSIs = []
for i in xrange(1,self.data.shape[0]):
change = self.data['Close'][i] - self.data['Close'][i-1]
if change < 0:
losses.append(abs(change))
gains.append(0)
else:
gains.append(change)
losses.append(0)
if i >= period:
avg_gain = np.mean(gains[i-period+1:])
avg_loss = np.mean(losses[i-period+1:])
RS = avg_gain / avg_loss if avg_loss != 0 else 99999
RSI = 0 if avg_loss == 0 else 100 - (100/(1+RS))
RSs.append(RS)
RSIs.append(RSI)
avg_gains.append(avg_gain)
avg_losses.append(avg_loss)
return RSs,RSIs
def stochastic_oscillator(self,period = 14):
'''
K = (Current Close - Lowest Low)/(Highest High - Lowest Low) * 100
D = 3-day SMA of K
Lowest Low = lowest low for the look-back period
Highest High = highest high for the look-back period
K is multiplied by 100 to move the decimal point two places
'''
stochastic_oscillators = []
for i in xrange(period,self.data.shape[0]+1):
high = max(slef.data['High'][i - 14, i])
low = min(slef.data['Low'][i - 14, i])
current_close = slef.data['Close'][i-1]
sc = (current_close-low)/(high-low)*100
stochastic_oscillators.append(sc)
D = self.SMA_(period = 3, data = stochastic_oscillators)
return stochastic_oscillators, D
def chaikin_money_flow(self, period = 20):
'''
1. Money Flow Multiplier = [(Close - Low) - (High - Close)] /(High - Low)
2. Money Flow Volume = Money Flow Multiplier x Volume for the Period
3. 20-period CMF = 20-period Sum of Money Flow Volume / 20 period Sum of Volume
'''
mf_vols =[]
CMFs = []
vols = []
for i in xrange(self.data.shape[0]):
mf_mult = ((self.data['Close'][i] - self.data['Low'][i]) - (self.data['High'][i] - self.data['Close'][i]))/(self.data['High'][i] - self.data['Low'][i])
mf_vol = mf_mult * self.data['Volume'][i]
vols.append(self.data['Volume'][i])
mf_vols.append(mf_vol)
if i >= 19:
cmf = sum(mf_vols[i-period+1:i+1])/sum(vols[i-period+1:i+1])
CMFs.append(cmf)
return CMFs
def price_relative(self,symbol = 'SPY'):
'''
Price Relative = Base Security / Comparative Security
Ratio Symbol Close = Close of First Symbol / Close of Second Symbol
Ratio Symbol Open = Open of First Symbol / Close of Second Symbol
Ratio Symbol High = High of First Symbol / Close of Second Symbol
Ratio Symbol Low = Low of First Symbol / Close of Second Symbol
'''
second_data = web.DataReader(symbol, 'yahoo', self.prev_er_date, self.current_er_date)
changes = []
diffs = []
for i in xrange(1,self.data['Close']):
prev_price_rel = self.data['Close'][i-1] / second_data['Close'][i-1]
price_rel = self.data['Close'][i] / second_data['Close'][i]
change_price_rel = (price_rel - prev_price_rel)/prev_price_rel
change_data = (self.data['Close'][i] - self.data['Close'][i-1]) / self.data['Close'][i-1]
change_second_data = (second_data['Close'][i] - second_data['Close'][i-1]) / second_data['Close'][i-1]
diff = change_data - change_second_data
changes.append(change_price_rel)
diffs.append(diff)
return changes, diffs
a = tech_analysis(symbol,prev_er_date, current_er_date)
# print a.on_balance_volume()
print a.accumulation_distribution_line()
|
|
#!/usr/bin/env python
import sys
import time
import math
import numpy
import random
import scipy.linalg
#=================
def ellipsePoints(angleinc, center, a, b, alpha):
'''
Generate a sequence of x,y points given the parameters of an
ellipse, and an angular increment.
convention note: ellipse points are created as x,y coordinates, so alpha
is measured as positive values towards the y-axis
note: generate_ellipse() below is a faster version of this
'''
cosa = numpy.cos(alpha)
sina = numpy.sin(alpha)
points = []
for angle in numpy.arange(0, 2*numpy.pi, angleinc):
acosangle = a * numpy.cos(angle)
bsinangle = b * numpy.sin(angle)
row = center[0] + acosangle * cosa - bsinangle * sina
col = center[1] + acosangle * sina + bsinangle * cosa
points.append((row,col))
return points
#=================
def ellipseKeyPoints(center, a, b, alpha):
'''
Calulate the points at each end of the ellipse axes.
convention note: ellipse points are created as x,y coordinates, so alpha
is measured as positive values towards the y-axis
'''
points = ellipsePoints(numpy.pi/2.0, center, a, b, alpha)
keypoints = {}
center = tuple(center)
keypoints[center] = {'axis': 'center', 'angle': None}
axes = ['a','b']
for i in range(4):
axis = axes[i%2]
angle = alpha+i*numpy.pi/2.0
while angle < 0:
angle += 2*numpy.pi
keypoints[points[i]] = {'axis': axis, 'angle': angle}
return keypoints
#=================
def drawEllipse(shape, angleinc, center, a, b, alpha):
'''
Generate a zero initialized image array with an ellipse drawn
by setting pixels to 1.
convention note: ellipse points are x,y coordinates, so alpha
is measured as positive values towards the y-axis
'''
result = numpy.zeros(shape, numpy.int)
points = ellipsePoints(angleinc, center, a, b, alpha)
for point in points:
point = map(int, point)
try:
result[int(point[0]), int(point[1])] = 1
except:
continue
return result
#=================
def algebraic2parametric(coeff):
'''
Based on matlab function "ellipse_param.m" which accompanies
"Least-Squares Fitting of Circles and Ellipses", W. Gander, G. H. Golub, R. Strebel,
BIT Numerical Mathematics, Springer 1994
convert the coefficients (a,b,c,d,e,f) of the algebraic equation:
ax^2 + bxy + cy^2 + dx + ey + f = 0
to the parameters of the parametric equation. The parameters are
returned as a dictionary containing:
center - center of the ellipse
a - major axis
b - minor axis
alpha - angle of major axis
convention note: alpha is measured as positive values towards the y-axis
'''
#print coeff
#print ("A=%.3f B=%.3f C=%.3f D=%.3f E=%.3f F=%.3f"
# %(coeff[0], coeff[1], coeff[2], coeff[3], coeff[4], coeff[5],))
if numpy.any(numpy.isnan(coeff)) or numpy.any(numpy.isinf(coeff)):
return None
A = numpy.array((coeff[0], coeff[1]/2, coeff[1]/2, coeff[2]))
A.shape = 2,2
bb = numpy.asarray(coeff[3:5])
c = coeff[5]
D,Q = scipy.linalg.eig(A)
D = D.real
det = D[0]*D[1]
if det <= 0:
return None
else:
bs = numpy.dot(Q.transpose(), bb)
alpha = numpy.arctan2(Q[1,0], Q[0,0])
zs = scipy.linalg.solve(-2*numpy.diagflat(D), bs)
z = numpy.dot(Q, zs)
h = numpy.dot(-bs.transpose(), zs) / 2 - c
a = numpy.sqrt(h/D[0])
b = numpy.sqrt(h/D[1])
## correct backwards major/minor axes
## 'major axis as a, minor axis as b'
if b > a:
temp = b
b = a
a = temp
alpha = math.pi/2 + alpha
#print "alpha", alpha
if alpha <= -math.pi/2:
alpha += math.pi
elif alpha > math.pi/2:
alpha -= math.pi
return {'center':z, 'a':a, 'b':b, 'alpha':alpha}
#=================
def solveEllipseB2AC(points):
'''
Based on Matlab code from: "Direct Least Square Fitting of Ellipses"
Andrew Fitzgibbon, Maurizio Pilu, Robert B. Fisher. Tern Analysis
and Machine Intelligence, Vol 21, No 5, May 1999.
This method has a tendency to crash, but is very fast
probably should use QR decomposition to avoid crashing on singularities
convention note: ellipse points are x,y coordinates, so alpha
is measured as positive values towards the y-axis
'''
X = numpy.array(points, numpy.float)
D = numpy.column_stack((X[:,0]**2, X[:,0]*X[:,1], X[:,1]**2, X[:,0], X[:,1], numpy.ones(X.shape[0])))
S = numpy.dot(D.transpose(), D)
C = numpy.zeros((6,6), numpy.float)
C[0,2] = -2
C[1,1] = 1
C[2,0] = -2
### replace eig with QR decomp
geval,gevec = scipy.linalg.eig(a=S, b=C)
geval = geval.real
gevec = gevec.real
Neg = numpy.nonzero(numpy.logical_and(geval<0, numpy.logical_not(numpy.isinf(geval))))
a = gevec[:,Neg]
a = numpy.ravel(a)
if len(a) == 0:
return None
return algebraic2parametric(a)
##========================
##========================
def solveEllipseByQRdecomp(points, center=(0,0)):
"""
QR decomposition is not the fastest method
but it is by far the most stable
"""
t0 = time.time()
xy = numpy.array(points, dtype=numpy.float64) - numpy.array(center, dtype=numpy.float64)
X = numpy.column_stack((
xy[:,0]**2,
xy[:,0]*xy[:,1],
xy[:,1]**2,
))
Y = numpy.ones(xy.shape[0])
### solve it by QR decomposition
Q, R = numpy.linalg.qr(X)
if numpy.linalg.det(R) == 0:
print "Singular matrix in calculation"
return None
QT = numpy.transpose(Q)
Rinv = numpy.linalg.inv(R)
beta = numpy.dot(numpy.dot(Rinv, QT), Y)
algebraic = (beta[0], beta[1], beta[2], 0, 0, -1)
params = algebraic2parametric(algebraic)
if params is None:
return None
params['center'] = center
return params
##========================
def weightedLeastSquares(X, Y, W):
"""
solve using the normal equations with no manipulation
"""
### solve it
XTW = numpy.transpose(X)*W
XTWX = numpy.dot(XTW, X)
if numpy.linalg.det(XTWX) == 0:
print "Singular matrix in calculation"
return None
XTWXinv = numpy.linalg.inv(XTWX)
beta = numpy.dot(numpy.dot(XTWXinv, XTW), Y)
return beta
#=================
def totalLeastSquareEllipse(points, center=(0,0), weights=None, epsilon=1e-5, maxiter=10):
'''
This was implemented by Neil Voss
uses simple linear least squares to solve the general equation for
an ellipse, but implements the total least squares algorithm.
http://en.wikipedia.org/wiki/Total_least_squares
total least squares assumes some points are better than others
so it uses an iterative approach to
down weight points with higher fit error
and points with smaller error are weighed higher
takes a (N,2) numpy array containing ellipse points and
return the best least square fit for an ellipse
values A,B,C
where
Ax^2 + Bxy +Cy^2 + Dx + Ey + F = 0
D = E = 0 to center the ellipse on the origin
F = -1 to force the general conic equation to be an ellipse
convention note: ellipse points are x,y coordinates, so alpha
is measured as positive values towards the y-axis
'''
t0 = time.time()
xy = numpy.array(points, dtype=numpy.float64) - numpy.array(center, dtype=numpy.float64)
X = numpy.column_stack((
xy[:,0]**2,
xy[:,0]*xy[:,1],
xy[:,1]**2,
))
Y = numpy.ones(xy.shape[0])
### setup weights if necessary
W = weights
if W is None:
W = numpy.ones(Y.shape) # even weights for the observations
### solve it
sys.stderr.write("total least squares")
err0 = None
for i in range(maxiter):
sys.stderr.write(".")
beta = weightedLeastSquares(X, Y, W)
if beta is None:
return None
## calculate the absolute mean error
err = numpy.absolute(numpy.dot(X, beta) - Y).ravel()
#print "totalLeastSquares iter %d error: %.4f"%(i, err.mean())
## fit to a normal distribution
normerr = ( err - err.min() )/err.std()
## calculate new weights based on
W = numpy.exp( -1 * normerr**2 )
if W.sum() == 0:
apDisplay.printWarning("Failed to set weights")
return beta
## see if we can stop
if err0 is not None:
change = numpy.absolute(err-err0).mean()
if change < epsilon:
break
err0 = err
algebraic = (beta[0], beta[1], beta[2], 0, 0, -1)
params = algebraic2parametric(algebraic)
if params is None:
return None
params['center'] = center
return params
#=================
def solveEllipseGander(points):
'''
Solve the ellipse that best fits the given points.
Based on the matlab function "algellipse.m" in the files that
accompany: "Least-Squares Fitting of Circles and Ellipses", W. Gander, G. H. Golub, R. Strebel,
BIT Numerical Mathematics, Springer 1994
This method seems to go O(n^2), so can be slow with lots of points
convention note: ellipse points are x,y coordinates, so alpha
is measured as positive values towards the y-axis
'''
X = numpy.array(points)
a = numpy.column_stack((X[:,0]**2, X[:,0]*X[:,1], X[:,1]**2, X[:,0], X[:,1], numpy.ones(X.shape[0])))
U, S, Vh = scipy.linalg.svd(a)
V = Vh.transpose()
u = numpy.ravel(V[:,5:6])
return algebraic2parametric(u)
#=================
def solveEllipseOLS(points, center=(0,0)):
"""
Solve Ellipse using oridinary least squares (OLS) closed form equation
Note: this method is designed to have the center to be 0,0
because the CTF is always centered
This was implemented by Neil Voss for use in the ACE2 program in 2010
* Algebra was performed using the maxima program
* Designed to have a fixed center point
takes a (N,2) numpy array containing ellipse points and
return the best least square fit for an ellipse
values A,B,C
where
Ax^2 + Bxy +Cy^2 + Dx + Ey + F = 0
D = E = 0 to center the ellipse on the origin
F = -1 to force the general conic equation to be an ellipse
convention note: ellipse points are x,y coordinates, so alpha
is measured as positive values towards the y-axis
"""
### power twos
X = numpy.array(points, numpy.float) - numpy.array(center, numpy.float)
### power twos
p2 = numpy.power(X, 2.0)
Sx2 = p2[:,0].sum()
Sy2 = p2[:,1].sum()
Sxy = (X[:,0]*X[:,1]).sum()
### power fours
p4 = numpy.power(X, 4.0)
Sx4 = p4[:,0].sum()
Sy4 = p4[:,1].sum()
Sx2y2 = (p2[:,0]*p2[:,1]).sum()
Sx3y = (numpy.power(X[:,0], 3.0)*X[:,1]).sum()
Sxy3 = (X[:,0]*numpy.power(X[:,1], 3.0)).sum()
### Calculate ellipse parameters
A = (Sx3y*(Sxy3*Sy2-Sxy*Sy4)+Sx2y2*(Sx2*Sy4+Sxy*Sxy3)
-numpy.power(Sx2y2,2.0)*Sy2-Sx2*numpy.power(Sxy3,2.0))/(Sx4*(Sx2y2*Sy4-numpy.power(Sxy3,2.0))
-numpy.power(Sx3y,2.0)*Sy4+2.0*Sx2y2*Sx3y*Sxy3-numpy.power(Sx2y2,3.0));
B = -(Sx4*(Sxy3*Sy2-Sxy*Sy4)+Sx3y*(Sx2*Sy4-Sx2y2*Sy2)-Sx2*Sx2y2*Sxy3
+numpy.power(Sx2y2,2.0)*Sxy)/(Sx4*(Sx2y2*Sy4-numpy.power(Sxy3,2.0))
-numpy.power(Sx3y,2.0)*Sy4+2.0*Sx2y2*Sx3y*Sxy3-numpy.power(Sx2y2,3.0));
C = (Sx4*(Sx2y2*Sy2-Sxy*Sxy3)-numpy.power(Sx3y,2.0)*Sy2+Sx3y*(Sx2*Sxy3+Sx2y2*Sxy)
-Sx2*numpy.power(Sx2y2,2.0))/(Sx4*(Sx2y2*Sy4-numpy.power(Sxy3,2.0))
-numpy.power(Sx3y,2.0)*Sy4+2.0*Sx2y2*Sx3y*Sxy3-numpy.power(Sx2y2,3.0));
algebraic = (A, B, C, 0, 0, -1)
params = algebraic2parametric(algebraic)
if params is None:
return None
params['center'] = center
return params
#=================
def generate_ellipse(a, b, alpha, center=(0,0), numpoints=3, noise=None,
method="step", integers=False):
"""
a - major axis radius
b - minor axis radius
alpha - angle (in radians)
center = x0,y0 - position of center of ellipse
numpoints - # of points that make an ellipse
noise - float of the amount of noise to add
this is a faster version of ellipsePoints() function above
without the "for" loop and with extra features
convention note: ellipse points are created as x,y coordinates, so alpha
is measured as positive values towards the y-axis
"""
cosa = numpy.cos(alpha)
sina = numpy.sin(alpha)
if method == "step":
thetas = numpy.linspace(0, 2*math.pi, numpoints)
elif method == "random":
thetas = numpy.random.random(numpoints) * 2*math.pi
else:
print "unknown method", method
return None
rows = center[0] + a* numpy.cos(thetas) * cosa - b* numpy.sin(thetas) * sina
cols = center[1] + a* numpy.cos(thetas) * sina + b* numpy.sin(thetas) * cosa
points = numpy.vstack((rows,cols)).T
#print points[:5,:]
if noise is not None:
rand = numpy.random.standard_normal(points.shape)
points += rand * noise
#print points[0]
## use only integers
if integers is True:
points = numpy.array(numpy.around(points, 0), dtype=numpy.int)
#print points[0]
#print points[:5,:]
return points
#=================
def printParamsDict(params):
printParams(params['center'], params['a'], params['b'], params['alpha'])
return
#=================
def printParams(center, a, b, alpha):
print ("%.3f %.3f < %.2f (%.1f, %.1f)"%
(a, b, alpha*180/math.pi, center[0], center[1]))
return
"""
Speed and accuracy notes:
NumPoints = 3777 ; Noise = 0.1 pixels
orig 5.829 1.737 < -76.84 (4.0, 16.0)
b2ac 5.813 1.747 < -76.83 (4.0, 16.0)
gander 5.834 1.740 < -76.60 (4.0, 16.0)
ols 5.833 1.753 < -76.83 (4.0, 16.0)
b2ac complete in 8.585 millisec ** crashes sometimes
gander complete in 924.305 millisec ** way too slow for more than 500 points
ols complete in 5.268 millisec ** has fixed center
"""
### test code
if __name__ == '__main__':
## randomly generate a noisy ellipse
# note: center is (col,row) i.e. (x,y) while shape is (row,col)
xdim = numcol = 32
ydim = numrow = 16
shape = (numrow,numcol)
alpha = random.random()*math.pi - math.pi/2
center = numpy.array((numrow, numcol), dtype=numpy.float)/2.0
majormax = min( abs(numrow/math.cos(alpha)) , abs(numcol/math.sin(alpha)) )/3.0 - 1
minormax = min( abs(numrow/math.sin(alpha)) , abs(numcol/math.cos(alpha)) )/3.0 - 1
print alpha, majormax, minormax
major = (majormax-2) * random.random() + 2
minor = (min(minormax,major)-1) * random.random() + 1
numpoints = 8 + int(100000*random.random()*random.random()*random.random())
noise = 0.2 #random.random()
print "NumPoints = %d ; Noise = %.1f"%(numpoints, noise)
printParams(center, major, minor, alpha)
### draw real ellipse
points = generate_ellipse(major, minor, alpha, center, numpoints,
noise, method="step", integers=False)
params = {'center':center, 'a':major, 'b':minor, 'alpha':alpha}
grid = numpy.zeros(shape, dtype=numpy.int)
intpoints = numpy.array(points, dtype=numpy.int)
print intpoints
grid[intpoints[:,0], intpoints[:,1]] = 1
#for point in points:
# p = numpy.floor(point)
# grid[p[0],p[1]] = 1
print grid
print ""
print drawEllipse(shape, 4*numpy.pi/180.0, **params)
### draw b2ac ellipse
t0 = time.time()
params1 = solveEllipseB2AC(points)
print '\nB2AC', params1
if params1 is not None:
print drawEllipse(shape, 4*numpy.pi/180.0, **params1)
b2actime = time.time() - t0
### draw gander ellipse
t0 = time.time()
if numpoints < 10000:
params2 = solveEllipseGander(points)
print '\nGANDER', params2
print drawEllipse(shape, 4*numpy.pi/180.0, **params2)
else:
print "skipping GANDER"
params2 = None
gandertime = time.time() - t0
### draw ols ellipse
t0 = time.time()
params3 = solveEllipseOLS(points, center)
print '\nORDINARY LEAST SQUARES', params3
print drawEllipse(shape, 4*numpy.pi/180.0, **params3)
olstime = time.time() - t0
### draw QR ellipse
t0 = time.time()
params4 = solveEllipseByQRdecomp(points, center)
print '\nQR DECOMP', params4
if params4 is not None:
print drawEllipse(shape, 4*numpy.pi/180.0, **params4)
qrdecomp = time.time() - t0
### draw Total Least Squares ellipse
t0 = time.time()
params5 = totalLeastSquareEllipse(points, center)
print '\nTotal Least Squares', params5
if params5 is not None:
print drawEllipse(shape, 4*numpy.pi/180.0, **params5)
totallsq = time.time() - t0
print majormax, minormax
print "NumPoints = %d ; Noise = %.1f"%(numpoints, noise)
print "Actual values"
printParams(center, major, minor, alpha)
print "Fit values"
if params1 is not None:
printParams(**params1)
else:
print "b2ac failed"
if params2 is not None:
printParams(**params2)
else:
print "gander skipped"
printParams(**params3)
if params4 is not None:
printParams(**params4)
else:
print "qr decomp failed"
if params5 is not None:
printParams(**params5)
else:
print "total lsq failed"
print "b2ac complete in %.3f millisec"%(b2actime*1000)
print "gander complete in %.3f millisec"%(gandertime*1000)
print "ols complete in %.3f millisec"%(olstime*1000)
print "qr complete in %.3f millisec"%(qrdecomp*1000)
print "total complete in %.3f millisec"%(totallsq*1000)
|
|
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# annToMask - Convert segmentation in an annotation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>annToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import copy
import itertools
from pycocotools import mask as maskUtils
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
def _isArrayLike(obj):
return hasattr(obj, '__iter__') and hasattr(obj, '__len__')
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if _isArrayLike(catNms) else [catNms]
supNms = supNms if _isArrayLike(supNms) else [supNms]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if _isArrayLike(imgIds) else [imgIds]
catIds = catIds if _isArrayLike(catIds) else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if _isArrayLike(ids):
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if _isArrayLike(ids):
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if _isArrayLike(ids):
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception('datasetType not supported')
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = maskUtils.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print('Loading and preparing results...')
tic = time.time()
if type(resFile) == str: #or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if not 'bbox' in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def download(self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urlretrieve(img['coco_url'], fname)
print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print('Converting ndarray to lists...')
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, h, w)
rle = maskUtils.merge(rles)
elif type(segm['counts']) == list:
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, h, w)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
m = maskUtils.decode(rle)
return m
|
|
from celery.task import Task
from hellomama_registration import utils
from registrations.models import Registration, SubscriptionRequest
from .models import Change
class ImplementAction(Task):
""" Task to apply a Change action.
"""
name = "hellomama_registration.changes.tasks.implement_action"
def change_baby(self, change):
# Get mother's current subscriptions
subscriptions = utils.get_subscriptions(change.mother_id)
# Deactivate subscriptions
for subscription in subscriptions:
utils.deactivate_subscription(subscription)
# Get mother's identity
mother = utils.get_identity(change.mother_id)
# Get mother's registration
registrations = Registration.objects.\
filter(mother_id=change.mother_id, stage='prebirth').\
order_by('-created_at')
stage = 'postbirth'
weeks = 0
voice_days = mother["details"].get("preferred_msg_days")
voice_times = mother["details"].get("preferred_msg_times")
mother_short_name = utils.get_messageset_short_name(
stage, 'mother', mother["details"]["preferred_msg_type"],
weeks, voice_days, voice_times)
mother_msgset_id, mother_msgset_schedule, next_sequence_number =\
utils.get_messageset_schedule_sequence(mother_short_name, weeks)
# Make new subscription request object
mother_sub = {
"identity": change.mother_id,
"messageset": mother_msgset_id,
"next_sequence_number": next_sequence_number,
"lang": mother["details"]["preferred_language"],
"schedule": mother_msgset_schedule
}
SubscriptionRequest.objects.create(**mother_sub)
# Make household subscription if required
for registration in registrations:
if registration.data["msg_receiver"] != 'mother_only':
household_short_name = utils.get_messageset_short_name(
stage, 'household', mother["details"]
["preferred_msg_type"], weeks, "fri", "9_11")
household_msgset_id, household_msgset_schedule, seq_number =\
utils.get_messageset_schedule_sequence(
household_short_name, weeks)
household_sub = {
"identity": mother["details"]["linked_to"],
"messageset": household_msgset_id,
"next_sequence_number": seq_number,
"lang": mother["details"]["preferred_language"],
"schedule": household_msgset_schedule
}
SubscriptionRequest.objects.create(**household_sub)
break
return "Change baby completed"
def change_loss(self, change):
# Get mother's current subscriptions
subscriptions = utils.get_subscriptions(change.mother_id)
# Deactivate subscriptions
for subscription in subscriptions:
utils.deactivate_subscription(subscription)
# Get mother's identity
mother = utils.get_identity(change.mother_id)
stage = 'miscarriage'
weeks = 0
voice_days = mother["details"].get("preferred_msg_days")
voice_times = mother["details"].get("preferred_msg_times")
mother_short_name = utils.get_messageset_short_name(
stage, 'mother', mother["details"]["preferred_msg_type"],
weeks, voice_days, voice_times)
mother_msgset_id, mother_msgset_schedule, next_sequence_number =\
utils.get_messageset_schedule_sequence(mother_short_name, weeks)
# Make new subscription request object
mother_sub = {
"identity": change.mother_id,
"messageset": mother_msgset_id,
"next_sequence_number": next_sequence_number,
"lang": mother["details"]["preferred_language"],
"schedule": mother_msgset_schedule
}
SubscriptionRequest.objects.create(**mother_sub)
# Get mother's registration
registrations = Registration.objects.\
filter(mother_id=change.mother_id, stage='prebirth').\
order_by('-created_at')
for registration in registrations:
if registration.data["msg_receiver"] != 'mother_only':
# Get household's current subscriptions
subscriptions = utils.get_subscriptions(
mother["details"]["linked_to"])
# Deactivate subscriptions
for subscription in subscriptions:
utils.deactivate_subscription(subscription)
break
return "Change loss completed"
def change_messaging(self, change):
# Get mother's current subscriptions
subscriptions = utils.get_subscriptions(change.mother_id)
current_sub = next(subscriptions) # necessary assumption
current_nsn = current_sub["next_sequence_number"]
# get current subscription's messageset
current_msgset = utils.get_messageset(current_sub["messageset"])
# get current subscription's schedule
current_sched = utils.get_schedule(current_sub["schedule"])
current_days = current_sched["day_of_week"]
current_rate = len(current_days.split(',')) # msgs per week
# Deactivate subscriptions
utils.deactivate_subscription(current_sub)
for subscription in subscriptions:
utils.deactivate_subscription(subscription)
if 'audio' in current_msgset["short_name"]:
from_type = 'audio'
else:
from_type = 'text'
if 'miscarriage' in current_msgset["short_name"]:
stage = 'miscarriage'
weeks = 1 # just a placeholder to get the messageset_short_name
elif 'postbirth' in current_msgset["short_name"]:
stage = 'postbirth'
# set placeholder weeks for getting the messageset_short_name
if '0_12' in current_msgset["short_name"]:
weeks = 1
else:
weeks = 13
else:
stage = 'prebirth'
weeks = 11 # just a placeholder to get the messageset_short_name
if change.data.get("new_short_name"):
new_short_name = change.data.get("new_short_name")
to_type = 'text'
if 'audio' in new_short_name:
to_type = 'audio'
else:
# Determine voice_days & voice_times
if change.data["msg_type"] == 'audio':
to_type = 'audio'
voice_days = change.data["voice_days"]
voice_times = change.data["voice_times"]
else:
to_type = 'text'
voice_days = None
voice_times = None
new_short_name = utils.get_messageset_short_name(
stage, 'mother', to_type,
weeks, voice_days, voice_times)
new_msgset_id, new_msgset_schedule, next_sequence_number =\
utils.get_messageset_schedule_sequence(new_short_name, weeks)
# calc new_nsn rather than using next_sequence_number
if from_type == to_type:
new_nsn = current_nsn
else:
new_sched = utils.get_schedule(new_msgset_schedule)
new_days = new_sched["day_of_week"]
new_rate = len(new_days.split(',')) # msgs per week
new_nsn = int(current_nsn * new_rate / float(current_rate))
# prevent rounding nsn to 0
if new_nsn == 0:
new_nsn = 1
# Make new subscription request object
mother_sub = {
"identity": change.mother_id,
"messageset": new_msgset_id,
"next_sequence_number": new_nsn,
# use first subscription's lang if the change doesn't include a new
# language
"lang": change.data.get("new_language", current_sub["lang"]),
"schedule": new_msgset_schedule
}
SubscriptionRequest.objects.create(**mother_sub)
return "Change messaging completed"
def change_language(self, change):
# Get mother's current subscriptions
subscriptions = utils.get_subscriptions(change.mother_id)
# Patch subscriptions languages
for subscription in subscriptions:
utils.patch_subscription(
subscription, {"lang": change.data["new_language"]})
if change.data["household_id"]:
# Get household's current subscriptions
subscriptions = utils.get_subscriptions(
change.data["household_id"])
# Patch subscriptions languages
for subscription in subscriptions:
utils.patch_subscription(
subscription, {"lang": change.data["new_language"]})
return "Change language completed"
def unsubscribe_household_only(self, change):
# Get household's current subscriptions
subscriptions = utils.get_subscriptions(
change.data["household_id"])
# Deactivate subscriptions
for subscription in subscriptions:
utils.deactivate_subscription(subscription)
return "Unsubscribe household completed"
def unsubscribe_mother_only(self, change):
# Get mother's current subscriptions
subscriptions = utils.get_subscriptions(
change.mother_id)
# Deactivate subscriptions
for subscription in subscriptions:
utils.deactivate_subscription(subscription)
return "Unsubscribe mother completed"
def run(self, change_id, **kwargs):
""" Implements the appropriate action
"""
change = Change.objects.get(id=change_id)
result = {
'change_baby': self.change_baby,
'change_loss': self.change_loss,
'change_messaging': self.change_messaging,
'change_language': self.change_language,
'unsubscribe_household_only': self.unsubscribe_household_only,
'unsubscribe_mother_only': self.unsubscribe_mother_only,
}.get(change.action, None)(change)
return result
implement_action = ImplementAction()
|
|
"""payu.manifest
===============
Provides an manifest class to store manifest data, which uses a
subclassed yamanifest PayuManifest class
:copyright: Copyright 2019 Aidan Heerdegen, see AUTHORS for details.
:license: Apache License, Version 2.0, see LICENSE for details.
"""
from __future__ import print_function, absolute_import
# External
import fnmatch
import os
import sys
import shutil
import stat
from yamanifest.manifest import Manifest as YaManifest
from payu.fsops import make_symlink, mkdir_p
# fast_hashes = ['nchash','binhash']
fast_hashes = ['binhash']
full_hashes = ['md5']
all_hashes = fast_hashes + full_hashes
class PayuManifest(YaManifest):
"""
A manifest object sub-classed from yamanifest object with some payu
specific additions and enhancements.
"""
def __init__(self, path,
ignore=None,
fast_hashes=fast_hashes,
full_hashes=full_hashes,
**kwargs):
super(PayuManifest, self).__init__(path=path,
hashes=fast_hashes+full_hashes,
**kwargs)
self.fast_hashes = fast_hashes
self.full_hashes = full_hashes
if ignore is not None:
self.ignore = ignore
self.needsync = False
self.existing_filepaths = set()
def check_fast(self, reproduce=False, **args):
"""
Check hash value for all filepaths using a fast hash function and fall
back to slower full hash functions if fast hashes fail to agree.
"""
hashvals = {}
fast_check = self.check_file(
filepaths=self.data.keys(),
hashvals=hashvals,
hashfn=self.fast_hashes,
shortcircuit=True,
**args
)
if not fast_check:
# Save all the fast hashes for failed files that we've already
# calculated
for filepath in hashvals:
for hash, val in hashvals[filepath].items():
self.data[filepath]['hashes'][hash] = val
if reproduce:
for filepath in hashvals:
print('Check failed for {0} {1}'
''.format(filepath, hashvals[filepath]))
tmphash = {}
full_check = self.check_file(
filepaths=filepath,
hashfn=self.full_hashes,
hashvals=tmphash,
shortcircuit=False,
**args
)
if full_check:
# File is still ok, so replace fast hashes
print('Full hashes ({0}) checked ok'
''.format(self.full_hashes))
print('Updating fast hashes for {0} in {1}'
''.format(filepath, self.path))
self.add_fast(filepath, force=True)
print('Saving updated manifest')
self.needsync = True
else:
sys.stderr.write(
'Run cannot reproduce: manifest {0} is not '
'correct\n'.format(self.path)
)
for path, hashdict in tmphash.items():
print(' {0}:'.format(path))
for hash, val in hashdict.items():
hash_table = self.data[path]['hashes']
hash_table_val = hash_table.get(hash, None)
print(' {0}: {1} != {2}'
''.format(hash, val, hash_table_val))
sys.exit(1)
else:
# Not relevant if full hashes are correct. Regenerate full
# hashes for all filepaths that failed fast check.
print('Updating full hashes for {0} files in {1}'
''.format(len(hashvals), self.path))
# Add all full hashes at once -- much faster. Definitely want
# to force the full hash to be updated. In the specific case of
# an empty hash the value will be None, without force it will
# be written as null.
self.add(
filepaths=list(hashvals.keys()),
hashfn=self.full_hashes,
force=True,
fullpaths=[self.fullpath(fpath) for fpath
in list(hashvals.keys())]
)
# Flag need to update version on disk
self.needsync = True
def add_filepath(self, filepath, fullpath, hashes, copy=False):
"""
Bespoke function to add filepath & fullpath to manifest
object without hashing. Can defer hashing until all files are
added. Hashing all at once is much faster as overhead for
threading is spread over all files
"""
# Ignore directories
if os.path.isdir(fullpath):
return False
# Ignore anything matching the ignore patterns
for pattern in self.ignore:
if fnmatch.fnmatch(os.path.basename(fullpath), pattern):
return False
if filepath not in self.data:
self.data[filepath] = {}
self.data[filepath]['fullpath'] = fullpath
if 'hashes' not in self.data[filepath]:
self.data[filepath]['hashes'] = {hash: None for hash in hashes}
if copy:
self.data[filepath]['copy'] = copy
if filepath in self.existing_filepaths:
self.existing_filepaths.remove(filepath)
return True
def add_fast(self, filepath, hashfn=None, force=False):
"""
Bespoke function to add filepaths but set shortcircuit to True, which
means only the first calculable hash will be stored. In this way only
one "fast" hashing function need be called for each filepath.
"""
if hashfn is None:
hashfn = self.fast_hashes
self.add(filepath, hashfn, force, shortcircuit=True)
def copy_file(self, filepath):
"""
Returns flag which says to copy rather than link a file.
"""
copy_file = False
try:
copy_file = self.data[filepath]['copy']
except KeyError:
return False
return copy_file
def make_link(self, filepath):
"""
Payu integration function for creating symlinks in work directories
which point back to the original file.
"""
# Check file exists. It may have been deleted but still in manifest
if not os.path.exists(self.fullpath(filepath)):
print('File not found: {filepath}'.format(
filepath=self.fullpath(filepath)))
if self.contains(filepath):
print('removing from manifest')
self.delete(filepath)
self.needsync = True
self.existing_filepaths.discard(filepath)
else:
try:
destdir = os.path.dirname(filepath)
# Make destination directory if not already exists
# Necessary because sometimes this is called before
# individual model setup
if not os.path.exists(destdir):
os.makedirs(destdir)
if self.copy_file(filepath):
shutil.copy(self.fullpath(filepath), filepath)
perm = (stat.S_IRUSR | stat.S_IRGRP
| stat.S_IROTH | stat.S_IWUSR)
os.chmod(filepath, perm)
else:
make_symlink(self.fullpath(filepath), filepath)
except Exception:
action = 'copying' if self.copy_file else 'linking'
print('payu: error: {action} orig: {orig} '
'local: {local}'.format(action=action,
orig=self.fullpath(filepath),
local=filepath))
raise
finally:
self.existing_filepaths.discard(filepath)
def make_links(self):
"""
Used to make all links at once for reproduce runs or scaninputs=False
"""
for filepath in list(self):
self.make_link(filepath)
def copy(self, path):
"""
Copy myself to another location
"""
shutil.copy(self.path, path)
def get_fullpaths(self):
files = []
for filepath in list(self):
files.append(self.fullpath(filepath))
return files
def get_hashes(self, hashfn):
hashes = []
for filepath in self:
hashes.append(self.get(filepath, hashfn))
return hashes
class Manifest(object):
"""
A Manifest class which stores all manifests for file tracking and
methods to operate on them
"""
def __init__(self, config, reproduce):
# Manifest control configuration
self.manifest_config = config
# Not currently supporting specifying hash functions
self.fast_hashes = self.manifest_config.get('fasthash', fast_hashes)
self.full_hashes = self.manifest_config.get('fullhash', full_hashes)
if type(self.fast_hashes) is str:
self.fast_hashes = [self.fast_hashes, ]
if type(self.full_hashes) is str:
self.full_hashes = [self.full_hashes, ]
self.ignore = self.manifest_config.get('ignore', ['.*'])
if isinstance(self.ignore, str):
self.ignore = [self.ignore]
# Initialise manifests and reproduce flags
self.manifests = {}
self.have_manifest = {}
reproduce_config = self.manifest_config.get('reproduce', {})
self.reproduce = {}
for mf in ['input', 'restart', 'exe']:
self.init_mf(mf)
self.reproduce[mf] = reproduce_config.get(mf, reproduce)
# Make sure the manifests directory exists
mkdir_p(os.path.dirname(self.manifests['exe'].path))
# Set flag to auto-scan input directories
self.scaninputs = self.manifest_config.get('scaninputs', True)
if self.reproduce['input'] and self.scaninputs:
print("scaninputs set to False when reproduce input is True")
self.scaninputs = False
def init_mf(self, mf):
# Initialise a sub-manifest object
self.manifests[mf] = PayuManifest(
os.path.join('manifests', '{}.yaml'.format(mf)),
ignore=self.ignore,
fast_hashes=self.fast_hashes,
full_hashes=self.full_hashes
)
self.have_manifest[mf] = False
def __iter__(self):
"""
Iterator method
"""
for mf in self.manifests:
yield self.manifests[mf]
def __len__(self):
"""Return the number of manifests in the manifest class."""
return len(self.manifests)
def load(self):
"""
Load manifests
"""
for mf in self.manifests:
self.have_manifest[mf] = False
if (os.path.exists(self.manifests[mf].path)):
try:
print('Loading {mf} manifest: {path}'
''.format(mf=mf, path=self.manifests[mf].path))
self.manifests[mf].load()
except Exception as e:
print('Error loading {mf} manifest: '
'{error}'.format(mf=mf, error=e))
finally:
if len(self.manifests[mf]) > 0:
self.have_manifest[mf] = True
def setup(self):
# Load all available manifests
self.load()
if self.have_manifest['input']:
if self.scaninputs: # Must be False for reproduce=True
# Save existing filepath information
self.manifests['input'].existing_filepaths = \
set(self.manifests['input'].data.keys())
if self.have_manifest['exe']:
if not self.reproduce['exe']:
# Re-initialise exe manifest. Trivial to recreate
# and means no check required for changed executable
# paths
self.init_mf('exe')
if self.have_manifest['restart']:
if not self.reproduce['restart']:
# Re-initialise restart manifest. Only keep restart manifest
# if reproduce. Normally want to scan for new restarts
self.init_mf('restart')
# Check to make all manifests that should be populated are and
# make links in work directory for existing manifests
for mf in self.manifests.keys():
if self.have_manifest[mf]:
# Don't make links for inputs when scaninputs is True
if mf == 'input' and self.scaninputs:
break
print('Making {} links'.format(mf))
self.manifests[mf].make_links()
else:
if self.reproduce[mf]:
print('{} manifest must exist if reproduce is True'
''.format(mf.capitalize()))
exit(1)
def check_manifests(self):
print("Checking exe and input manifests")
self.manifests['exe'].check_fast(reproduce=self.reproduce['exe'])
if not self.reproduce['input']:
if len(self.manifests['input'].existing_filepaths) > 0:
# Delete missing filepaths from input manifest
for filepath in self.manifests['input'].existing_filepaths:
print('File no longer in input directory: {file} '
'removing from manifest'.format(file=filepath))
self.manifests['input'].delete(filepath)
self.manifests['input'].needsync = True
self.manifests['input'].check_fast(reproduce=self.reproduce['input'])
if self.reproduce['restart']:
print("Checking restart manifest")
else:
print("Creating restart manifest")
self.manifests['restart'].needsync = True
self.manifests['restart'].check_fast(
reproduce=self.reproduce['restart'])
# Write updates to version on disk
for mf in self.manifests:
if self.manifests[mf].needsync:
print("Writing {}".format(self.manifests[mf].path))
self.manifests[mf].dump()
def copy_manifests(self, path):
mkdir_p(path)
try:
for mf in self.manifests:
self.manifests[mf].copy(path)
except IOError:
pass
def add_filepath(self, manifest, filepath, fullpath, copy=False):
"""
Wrapper to the add_filepath function in PayuManifest. Prevents outside
code from directly calling anything in PayuManifest.
"""
filepath = os.path.normpath(filepath)
if self.manifests[manifest].add_filepath(
filepath=filepath,
fullpath=fullpath,
hashes=self.fast_hashes + self.full_hashes,
copy=copy):
# Only link if filepath was added
self.manifests[manifest].make_link(filepath)
def get_all_fullpaths(self):
"""
Return a list of all fullpaths in manifest files
"""
files = []
for mf in self.manifests:
files.extend(self.manifests[mf].get_fullpaths())
return files
|
|
# Copyright (c) 2018 Usabilla.com
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
import datetime
import hashlib
import hmac
import requests
import urllib3.request as urllib
from collections import OrderedDict
class Credentials(object):
"""An object that holds information about client and secret key."""
def __init__(self, client_key, secret_key):
"""Initialize a Credentials instance."""
if client_key == '' or secret_key == '':
raise GeneralError('Emtpy credentials.', 'The credentials you have entered are invalid.')
self.client_key = client_key
self.secret_key = secret_key
def get_credentials(self):
"""Return the client and secret key."""
return {'client_key': self.client_key, 'secret_key': self.secret_key}
class GeneralError(Exception):
"""GeneralError API exception."""
def __init__(self, type, message):
"""Initialize a GeneralError exception."""
self.type = type
self.message = message
def __str__(self):
"""String representation of the exception."""
return "%s (%s)" % (self.type, self.message)
def __repr__(self):
"""Representation of the exception."""
return "%s(type=%s)" % (self.__class__.__name__, self.type)
class APIClient(object):
"""APIClient object.
For the key derivation functions see:
http://docs.aws.amazon.com/general/latest/gr/signature-v4-examples.html#signature-v4-examples-python
"""
resources = {
'scopes': {
'live': {
'products': {
'websites': {
'resources': {
'button': '/button',
'feedback': '/button/:id/feedback',
'campaign': '/campaign',
'campaign_result': '/campaign/:id/results',
'campaign_stats': '/campaign/:id/stats',
'inpage': '/inpage',
'inpage_result': '/inpage/:id/feedback'
}
},
'email': {
'resources': {
'button': '/button',
'feedback': '/button/:id/feedback'
}
},
'apps': {
'resources': {
'app': '',
'feedback': '/:id/feedback',
'campaign': '/campaign',
'campaign_result': '/campaign/:id/results'
}
}
}
}
}
}
""" Scope constants """
SCOPE_LIVE = 'live'
""" Product contants """
PRODUCT_WEBSITES = 'websites'
PRODUCT_EMAIL = 'email'
PRODUCT_APPS = 'apps'
""" Resource contants """
RESOURCE_FEEDBACK = 'feedback'
RESOURCE_APP = 'app'
RESOURCE_BUTTON = 'button'
RESOURCE_CAMPAIGN = 'campaign'
RESOURCE_CAMPAIGN_RESULT = 'campaign_result'
RESOURCE_CAMPAIGN_STATS = 'campaign_stats'
RESOURCE_INPAGE = 'inpage'
RESOURCE_INPAGE_RESULT = 'inpage_result'
method = 'GET'
host = 'data.usabilla.com'
host_protocol = 'https://'
session = requests.Session()
def __init__(self, client_key, secret_key):
"""Initialize an APIClient object."""
self.query_parameters = ''
self.credentials = Credentials(client_key=client_key, secret_key=secret_key)
def sign(self, key, msg):
"""Get the digest of the message using the specified key."""
return hmac.new(key, msg, hashlib.sha256).digest()
def get_signature_key(self, key, long_date):
"""Get the signature key."""
k_date = self.sign(('USBL1' + key).encode('utf-8'), long_date.encode('utf-8'))
k_signing = self.sign(k_date, 'usbl1_request'.encode('utf-8'))
return k_signing
def set_query_parameters(self, parameters):
"""Set the query parameters.
:param parameters: A `dict` representing the query parameters to be used for the request.
:type parameters: dict
"""
self.query_parameters = urllib.urlencode(OrderedDict(sorted(parameters.items())))
def get_query_parameters(self):
"""Get the query parameters."""
return self.query_parameters
def send_signed_request(self, scope):
"""Send the signed request to the API.
The process is the following:
1) Create a canonical request
2) Create a string to sign
3) Calculate the signature
4) Sign the request
5) Send the request
:param scope: The resource relative url to query for data.
:type scope: str
:returns: A `dict` of the data.
:rtype: dict
"""
if self.credentials.client_key is None or self.credentials.secret_key is None:
raise GeneralError('Invalid Access Key.', 'The Access Key supplied is invalid.')
# Create a date for headers and the credential string.
t = datetime.datetime.utcnow()
usbldate = t.strftime('%a, %d %b %Y %H:%M:%S GMT')
datestamp = t.strftime('%Y%m%d') # Date w/o time, used in credential scope
long_date = t.strftime('%Y%m%dT%H%M%SZ')
# Create canonical URI--the part of the URI from domain to query.
canonical_uri = scope
# Create the canonical query string.
canonical_querystring = self.get_query_parameters()
# Create the canonical headers and signed headers.
canonical_headers = 'date:' + usbldate + '\n' + 'host:' + self.host + '\n'
# Create the list of signed headers.
signed_headers = 'date;host'
# Create payload hash (hash of the request body content).
payload_hash = hashlib.sha256(''.encode('utf-8')).hexdigest()
# Combine elements to create canonical request.
canonical_request = '{method}\n{uri}\n{query}\n{can_headers}\n{signed_headers}\n{hash}'.format(
method=self.method,
uri=canonical_uri,
query=canonical_querystring,
can_headers=canonical_headers,
signed_headers=signed_headers,
hash=payload_hash
)
# Match the algorithm to the hashing algorithm you use
algorithm = 'USBL1-HMAC-SHA256'
credential_scope = datestamp + '/' + 'usbl1_request'
string_to_sign = '{algorithm}\n{long_date}\n{credential_scope}\n{digest}'.format(
algorithm=algorithm,
long_date=long_date,
credential_scope=credential_scope,
digest=hashlib.sha256(canonical_request.encode('utf-8')).hexdigest(),
)
# Create the signing key.
signing_key = self.get_signature_key(self.credentials.secret_key, datestamp)
# Sign the string_to_sign using the signing_key.
signature = hmac.new(signing_key, (string_to_sign).encode('utf-8'), hashlib.sha256).hexdigest()
# Constrcut the authorization header.
authorization_header = (
'{algorithm} Credential={cred}/{cred_scope}, SignedHeaders={signed_headers}, Signature={signature}'
).format(
algorithm=algorithm,
cred=self.credentials.client_key,
cred_scope=credential_scope,
signed_headers=signed_headers,
signature=signature,
)
headers = {'date': usbldate, 'Authorization': authorization_header}
# Send the request.
request_url = self.host + scope + '?' + canonical_querystring
r = self.session.get(self.host_protocol + request_url, headers=headers)
r.raise_for_status()
return r.json()
def check_resource_validity(self, scope, product, resource):
"""Checks whether the resource exists
:param scope: A `string` that specifies the resource scope
:param product: A `string` that specifies the product type
:param resource: A `string` that specifies the resource type
:type scope: str
:type product: str
:type resource: str
:returns: An `string` that represents the resource request url
:rtype: string
"""
if scope not in self.resources['scopes'].keys():
raise GeneralError('invalid scope', 'Invalid scope name')
found_scope = self.resources['scopes'][scope]
if product not in found_scope['products'].keys():
raise GeneralError('invalid product', 'Invalid product name')
found_product = found_scope['products'][product]
if resource not in found_product['resources'].keys():
raise GeneralError('invalid resource', 'Invalid resource name')
found_resource = found_product['resources'][resource]
return '/%s/%s%s' % (scope, product, found_resource)
def handle_id(self, url, resource_id):
"""Replaces the :id pattern in the url
:param url: A `string` that specifies the resource request url
:param resource_id: A `string` that specifies the resource id
:type url: str
:type resource_id: str
:returns: An `string` that represents the resource request url
:rtype: string
"""
if resource_id is not None:
if resource_id == '':
raise GeneralError('invalid id', 'Invalid resource ID')
if resource_id == '*':
resource_id = '%2A'
url = url.replace(':id', str(resource_id))
return url
def item_iterator(self, url):
"""Get items using an iterator.
:param url: A `string` that specifies the resource request url
:type url: str
:returns: A `generator` that yields the requested data.
:rtype: generator
:raises requests.exceptions.HTTPError: if an HTTP error occurred
"""
has_more = True
while has_more:
results = self.send_signed_request(url)
has_more = results['hasMore']
for item in results['items']:
yield item
self.set_query_parameters({'since': results['lastTimestamp']})
def get_resource(self, scope, product, resource, resource_id=None, iterate=False):
"""Retrieves resources of the specified type
:param scope: A `string` that specifies the resource scope
:param product: A `string` that specifies the product type
:param resource: A `string` that specifies the resource type
:param resource_id: A `string` that specifies the resource id
:param iterate: A `boolean` that specifies whether the you want to use an iterator
:type scope: str
:type product: str
:type resource: str
:type resource_id: str
:type iterate: bool
:returns: A `generator` that yields the requested data or a single resource
:rtype: generator or single resource
"""
url = self.handle_id(self.check_resource_validity(scope, product, resource), resource_id)
if iterate:
return self.item_iterator(url)
else:
return self.send_signed_request(url)
|
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Layout tests module that is necessary for the layout analyzer.
Layout tests are stored in Webkit SVN and LayoutTestCaseManager collects these
layout test cases (including description).
"""
import copy
import csv
import locale
import re
import sys
import urllib2
import pysvn
# Webkit SVN root location.
DEFAULT_LAYOUTTEST_LOCATION = (
'http://svn.webkit.org/repository/webkit/trunk/LayoutTests/')
# When parsing the test HTML file and finding the test description,
# this script tries to find the test description using sentences
# starting with these keywords. This is adhoc but it is the only way
# since there is no standard for writing test description.
KEYWORDS_FOR_TEST_DESCRIPTION = ['This test', 'Tests that', 'Test ']
# If cannot find the keywords, this script tries to find test case
# description by the following tags.
TAGS_FOR_TEST_DESCRIPTION = ['title', 'p', 'div']
# If cannot find the tags, this script tries to find the test case
# description in the sentence containing following words.
KEYWORD_FOR_TEST_DESCRIPTION_FAIL_SAFE = ['PASSED ', 'PASS:']
class LayoutTests(object):
"""A class to store test names in layout tests.
The test names (including regular expression patterns) are read from a CSV
file and used for getting layout test names from Webkit SVN.
"""
def __init__(self, layouttest_root_path=DEFAULT_LAYOUTTEST_LOCATION,
parent_location_list=None, filter_names=None,
recursion=False):
"""Initialize LayoutTests using root and CSV file.
Args:
layouttest_root_path: A location string where Webkit layout tests are
stored.
parent_location_list: A list of parent directories that are needed for
getting layout tests.
filter_names: A list of test name patterns that are used for filtering
test names (e.g., media/*.html).
recursion: a boolean indicating whether the test names are sought
recursively.
"""
if layouttest_root_path.startswith('http://'):
name_map = self.GetLayoutTestNamesFromSVN(parent_location_list,
layouttest_root_path,
recursion)
else:
# TODO(imasaki): support other forms such as CSV for reading test names.
pass
self.name_map = copy.copy(name_map)
if filter_names:
# Filter names.
for lt_name in name_map.iterkeys():
match = False
for filter_name in filter_names:
if re.search(filter_name, lt_name):
match = True
break
if not match:
del self.name_map[lt_name]
# We get description only for the filtered names.
for lt_name in self.name_map.iterkeys():
self.name_map[lt_name] = LayoutTests.GetTestDescriptionFromSVN(lt_name)
@staticmethod
def ExtractTestDescription(txt):
"""Extract the description description from test code in HTML.
Currently, we have 4 rules described in the code below.
(This example falls into rule 1):
<p>
This tests the intrinsic size of a video element is the default
300,150 before metadata is loaded, and 0,0 after
metadata is loaded for an audio-only file.
</p>
The strategy is very adhoc since the original test case files
(in HTML format) do not have standard way to store test description.
Args:
txt: A HTML text which may or may not contain test description.
Returns:
A string that contains test description. Returns 'UNKNOWN' if the
test description is not found.
"""
# (1) Try to find test description that contains keywords such as
# 'test that' and surrounded by p tag.
# This is the most common case.
for keyword in KEYWORDS_FOR_TEST_DESCRIPTION:
# Try to find <p> and </p>.
pattern = r'<p>(.*' + keyword + '.*)</p>'
matches = re.search(pattern, txt)
if matches is not None:
return matches.group(1).strip()
# (2) Try to find it by using more generic keywords such as 'PASS' etc.
for keyword in KEYWORD_FOR_TEST_DESCRIPTION_FAIL_SAFE:
# Try to find new lines.
pattern = r'\n(.*' + keyword + '.*)\n'
matches = re.search(pattern, txt)
if matches is not None:
# Remove 'p' tag.
text = matches.group(1).strip()
return text.replace('<p>', '').replace('</p>', '')
# (3) Try to find it by using HTML tag such as title.
for tag in TAGS_FOR_TEST_DESCRIPTION:
pattern = r'<' + tag + '>(.*)</' + tag + '>'
matches = re.search(pattern, txt)
if matches is not None:
return matches.group(1).strip()
# (4) Try to find it by using test description and remove 'p' tag.
for keyword in KEYWORDS_FOR_TEST_DESCRIPTION:
# Try to find <p> and </p>.
pattern = r'\n(.*' + keyword + '.*)\n'
matches = re.search(pattern, txt)
if matches is not None:
# Remove 'p' tag.
text = matches.group(1).strip()
return text.replace('<p>', '').replace('</p>', '')
# (5) cannot find test description using existing rules.
return 'UNKNOWN'
@staticmethod
def GetLayoutTestNamesFromSVN(parent_location_list,
layouttest_root_path, recursion):
"""Get LayoutTest names from Webkit SVN.
Args:
parent_location_list: a list of locations of parent directories. This is
used when getting layout tests using PySVN.list().
layouttest_root_path: the root path of the Webkit SVN directory.
recursion: a boolean indicating whether the test names are sought
recursively.
Returns:
a map containing test names as keys for de-dupe.
"""
client = pysvn.Client()
# Get directory structure in the Webkit SVN.
name_map = {}
for parent_location in parent_location_list:
if parent_location.endswith('/'):
file_list = client.list(layouttest_root_path + parent_location,
recurse=recursion)
for file_name in file_list:
if sys.stdout.isatty():
default_encoding = sys.stdout.encoding
else:
default_encoding = locale.getpreferredencoding()
file_name = file_name[0].repos_path.encode(default_encoding)
# Remove the word '/truck/LayoutTests'.
file_name = file_name.replace('/trunk/LayoutTests/', '')
if file_name.endswith('.html') or file_name.endswith('.svg'):
name_map[file_name] = True
return name_map
@staticmethod
def GetLayoutTestNamesFromCSV(csv_file_path):
"""Get layout test names from CSV file.
Args:
csv_file_path: the path for the CSV file containing test names (including
regular expression patterns). The CSV file content has one column and
each row contains a test name.
Returns:
a list of test names in string.
"""
file_object = file(csv_file_path, 'r')
reader = csv.reader(file_object)
names = [row[0] for row in reader]
file_object.close()
return names
@staticmethod
def GetParentDirectoryList(names):
"""Get parent directory list from test names.
Args:
names: a list of test names. The test names also have path information as
well (e.g., media/video-zoom.html).
Returns:
a list of parent directories for the given test names.
"""
pd_map = {}
for name in names:
p_dir = name[0:name.rfind('/') + 1]
pd_map[p_dir] = True
return list(pd_map.iterkeys())
def JoinWithTestExpectation(self, test_expectations):
"""Join layout tests with the test expectation file using test name as key.
Args:
test_expectations: a test expectations object.
Returns:
test_info_map contains test name as key and another map as value. The
other map contains test description and the test expectation
information which contains keyword (e.g., 'GPU') as key (we do
not care about values). The map data structure is used since we
have to look up these keywords several times.
"""
test_info_map = {}
for (lt_name, desc) in self.name_map.items():
test_info_map[lt_name] = {}
test_info_map[lt_name]['desc'] = desc
for (te_name, te_info) in (
test_expectations.all_test_expectation_info.items()):
if te_name == lt_name or (
te_name in lt_name and te_name.endswith('/')):
# Only keep the first match when found.
test_info_map[lt_name]['te_info'] = te_info
break
return test_info_map
@staticmethod
def GetTestDescriptionFromSVN(test_location,
root_path=DEFAULT_LAYOUTTEST_LOCATION):
"""Get test description of a layout test from SVN.
Using urllib2.urlopen(), this method gets the entire HTML and extracts its
test description using |ExtractTestDescription()|.
Args:
test_location: the location of the layout test.
root_path: the root path of the Webkit SVN directory.
Returns:
A test description string. Returns an empty string is returned if the
test description cannot be extracted.
Raises:
A URLError when the layout test is not available.
"""
if test_location.endswith('.html'):
url = root_path + test_location
try:
resp = urllib2.urlopen(url)
except urllib2.HTTPError:
# Some files with different languages cause this exception.
# Return an empty description in this case.
return ''
if resp.code == 200:
return LayoutTests.ExtractTestDescription(resp.read())
raise urllib2.URLError(
'Fail to get layout test HTML file from %s.' % url)
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import sys
import tempfile
import unittest
try:
from importlib import reload # Python 3.4+ only.
except ImportError:
# Otherwise, we will stick to Python 2's built-in reload.
pass
import py4j
from pyspark import SparkContext, SQLContext
from pyspark.sql import Row, SparkSession
from pyspark.sql.types import *
from pyspark.sql.window import Window
from pyspark.testing.utils import ReusedPySparkTestCase
class HiveContextSQLTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
cls.hive_available = True
cls.spark = None
try:
cls.sc._jvm.org.apache.hadoop.hive.conf.HiveConf()
except py4j.protocol.Py4JError:
cls.tearDownClass()
cls.hive_available = False
except TypeError:
cls.tearDownClass()
cls.hive_available = False
if cls.hive_available:
cls.spark = SparkSession.builder.enableHiveSupport().getOrCreate()
os.unlink(cls.tempdir.name)
if cls.hive_available:
cls.testData = [Row(key=i, value=str(i)) for i in range(100)]
cls.df = cls.sc.parallelize(cls.testData).toDF()
def setUp(self):
if not self.hive_available:
self.skipTest("Hive is not available.")
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name, ignore_errors=True)
if cls.spark is not None:
cls.spark.stop()
cls.spark = None
def test_save_and_load_table(self):
df = self.df
tmpPath = tempfile.mkdtemp()
shutil.rmtree(tmpPath)
df.write.saveAsTable("savedJsonTable", "json", "append", path=tmpPath)
actual = self.spark.catalog.createTable("externalJsonTable", tmpPath, "json")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE externalJsonTable")
df.write.saveAsTable("savedJsonTable", "json", "overwrite", path=tmpPath)
schema = StructType([StructField("value", StringType(), True)])
actual = self.spark.catalog.createTable("externalJsonTable", source="json",
schema=schema, path=tmpPath,
noUse="this options will not be used")
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.select("value").collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
defaultDataSourceName = self.spark.conf.get("spark.sql.sources.default",
"org.apache.spark.sql.parquet")
self.spark.sql("SET spark.sql.sources.default=org.apache.spark.sql.json")
df.write.saveAsTable("savedJsonTable", path=tmpPath, mode="overwrite")
actual = self.spark.catalog.createTable("externalJsonTable", path=tmpPath)
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM savedJsonTable").collect()))
self.assertEqual(sorted(df.collect()),
sorted(self.spark.sql("SELECT * FROM externalJsonTable").collect()))
self.assertEqual(sorted(df.collect()), sorted(actual.collect()))
self.spark.sql("DROP TABLE savedJsonTable")
self.spark.sql("DROP TABLE externalJsonTable")
self.spark.sql("SET spark.sql.sources.default=" + defaultDataSourceName)
shutil.rmtree(tmpPath)
def test_window_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.partitionBy("value").orderBy("key")
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 1, 1, 1, 1, 1),
("2", 1, 1, 1, 3, 1, 1, 1, 1),
("2", 1, 2, 1, 3, 2, 1, 1, 1),
("2", 2, 2, 2, 3, 3, 3, 2, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_without_partitionBy(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
w = Window.orderBy("key", df.value)
from pyspark.sql import functions as F
sel = df.select(df.value, df.key,
F.max("key").over(w.rowsBetween(0, 1)),
F.min("key").over(w.rowsBetween(0, 1)),
F.count("key").over(w.rowsBetween(float('-inf'), float('inf'))),
F.row_number().over(w),
F.rank().over(w),
F.dense_rank().over(w),
F.ntile(2).over(w))
rs = sorted(sel.collect())
expected = [
("1", 1, 1, 1, 4, 1, 1, 1, 1),
("2", 1, 1, 1, 4, 2, 2, 2, 1),
("2", 1, 2, 1, 4, 3, 2, 2, 2),
("2", 2, 2, 2, 4, 4, 4, 3, 2)
]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_window_functions_cumulative_sum(self):
df = self.spark.createDataFrame([("one", 1), ("two", 2)], ["key", "value"])
from pyspark.sql import functions as F
# Test cumulative sum
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values less than JVM's Long.MinValue and make sure we don't overflow
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.unboundedPreceding - 1, 0)))
rs = sorted(sel.collect())
expected = [("one", 1), ("two", 3)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
# Test boundary values greater than JVM's Long.MaxValue and make sure we don't overflow
frame_end = Window.unboundedFollowing + 1
sel = df.select(
df.key,
F.sum(df.value).over(Window.rowsBetween(Window.currentRow, frame_end)))
rs = sorted(sel.collect())
expected = [("one", 3), ("two", 2)]
for r, ex in zip(rs, expected):
self.assertEqual(tuple(r), ex[:len(r)])
def test_collect_functions(self):
df = self.spark.createDataFrame([(1, "1"), (2, "2"), (1, "2"), (1, "2")], ["key", "value"])
from pyspark.sql import functions
self.assertEqual(
sorted(df.select(functions.collect_set(df.key).alias('r')).collect()[0].r),
[1, 2])
self.assertEqual(
sorted(df.select(functions.collect_list(df.key).alias('r')).collect()[0].r),
[1, 1, 1, 2])
self.assertEqual(
sorted(df.select(functions.collect_set(df.value).alias('r')).collect()[0].r),
["1", "2"])
self.assertEqual(
sorted(df.select(functions.collect_list(df.value).alias('r')).collect()[0].r),
["1", "2", "2", "2"])
def test_limit_and_take(self):
df = self.spark.range(1, 1000, numPartitions=10)
def assert_runs_only_one_job_stage_and_task(job_group_name, f):
tracker = self.sc.statusTracker()
self.sc.setJobGroup(job_group_name, description="")
f()
jobs = tracker.getJobIdsForGroup(job_group_name)
self.assertEqual(1, len(jobs))
stages = tracker.getJobInfo(jobs[0]).stageIds
self.assertEqual(1, len(stages))
self.assertEqual(1, tracker.getStageInfo(stages[0]).numTasks)
# Regression test for SPARK-10731: take should delegate to Scala implementation
assert_runs_only_one_job_stage_and_task("take", lambda: df.take(1))
# Regression test for SPARK-17514: limit(n).collect() should the perform same as take(n)
assert_runs_only_one_job_stage_and_task("collect_limit", lambda: df.limit(1).collect())
def test_datetime_functions(self):
from pyspark.sql import functions
from datetime import date
df = self.spark.range(1).selectExpr("'2017-01-22' as dateCol")
parse_result = df.select(functions.to_date(functions.col("dateCol"))).first()
self.assertEquals(date(2017, 1, 22), parse_result['to_date(`dateCol`)'])
def test_unbounded_frames(self):
from pyspark.sql import functions as F
from pyspark.sql import window
df = self.spark.range(0, 3)
def rows_frame_match():
return "ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rowsBetween(-sys.maxsize, sys.maxsize))
).columns[0]
def range_frame_match():
return "RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING" in df.select(
F.count("*").over(window.Window.rangeBetween(-sys.maxsize, sys.maxsize))
).columns[0]
for new_maxsize in [2 ** 31 - 1, 2 ** 63 - 1, 2 ** 127 - 1]:
old_maxsize = sys.maxsize
sys.maxsize = new_maxsize
try:
# Manually reload window module to use monkey-patched sys.maxsize.
reload(window)
self.assertTrue(rows_frame_match())
self.assertTrue(range_frame_match())
finally:
sys.maxsize = old_maxsize
reload(window)
class SQLContextTests(unittest.TestCase):
def test_get_or_create(self):
sc = None
sql_context = None
try:
sc = SparkContext('local[4]', "SQLContextTests")
sql_context = SQLContext.getOrCreate(sc)
assert(isinstance(sql_context, SQLContext))
finally:
if sql_context is not None:
sql_context.sparkSession.stop()
if sc is not None:
sc.stop()
if __name__ == "__main__":
from pyspark.sql.tests.test_context import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
|
#!/usr/bin/env python
# Copyright 2019 The ANGLE project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
# This is a modified copy of the script in
# https://webrtc.googlesource.com/src/+/master/tools_webrtc/autoroller/roll_deps.py
# customized for ANGLE.
"""Script to automatically roll Chromium dependencies in the ANGLE DEPS file."""
import argparse
import base64
import collections
import logging
import os
import platform
import re
import subprocess
import sys
import urllib2
def FindSrcDirPath():
"""Returns the abs path to the root dir of the project."""
# Special cased for ANGLE.
return os.path.dirname(os.path.abspath(os.path.join(__file__, '..')))
ANGLE_CHROMIUM_DEPS = [
'build',
'buildtools',
'testing',
'third_party/googletest',
'third_party/libjpeg_turbo',
'third_party/jsoncpp',
'third_party/Python-Markdown',
'third_party/qemu-linux-x64',
'third_party/qemu-mac-x64',
'third_party/yasm',
'third_party/zlib',
'tools/clang',
'tools/md_browser',
'tools/memory',
]
ANGLE_URL = 'https://chromium.googlesource.com/angle/angle'
CHROMIUM_SRC_URL = 'https://chromium.googlesource.com/chromium/src'
CHROMIUM_COMMIT_TEMPLATE = CHROMIUM_SRC_URL + '/+/%s'
CHROMIUM_LOG_TEMPLATE = CHROMIUM_SRC_URL + '/+log/%s'
CHROMIUM_FILE_TEMPLATE = CHROMIUM_SRC_URL + '/+/%s/%s'
COMMIT_POSITION_RE = re.compile('^Cr-Commit-Position: .*#([0-9]+).*$')
CLANG_REVISION_RE = re.compile(r'^CLANG_REVISION = \'([0-9a-z]+)\'')
ROLL_BRANCH_NAME = 'roll_chromium_revision'
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
CHECKOUT_SRC_DIR = FindSrcDirPath()
CHECKOUT_ROOT_DIR = CHECKOUT_SRC_DIR
# Copied from tools/android/roll/android_deps/.../BuildConfigGenerator.groovy.
ANDROID_DEPS_START = r'=== ANDROID_DEPS Generated Code Start ==='
ANDROID_DEPS_END = r'=== ANDROID_DEPS Generated Code End ==='
# Location of automically gathered android deps.
ANDROID_DEPS_PATH = 'src/third_party/android_deps/'
# TODO(jmadill): Update this with ANGLE wrangler. http://anglebug.com/4059
NOTIFY_EMAIL = '[email protected]'
sys.path.append(os.path.join(CHECKOUT_SRC_DIR, 'build'))
import find_depot_tools
find_depot_tools.add_depot_tools_to_path()
CLANG_UPDATE_SCRIPT_URL_PATH = 'tools/clang/scripts/update.py'
CLANG_UPDATE_SCRIPT_LOCAL_PATH = os.path.join(CHECKOUT_SRC_DIR, 'tools', 'clang', 'scripts',
'update.py')
DepsEntry = collections.namedtuple('DepsEntry', 'path url revision')
ChangedDep = collections.namedtuple('ChangedDep', 'path url current_rev new_rev')
CipdDepsEntry = collections.namedtuple('CipdDepsEntry', 'path packages')
ChangedCipdPackage = collections.namedtuple('ChangedCipdPackage',
'path package current_version new_version')
ChromiumRevisionUpdate = collections.namedtuple('ChromiumRevisionUpdate', ('current_chromium_rev '
'new_chromium_rev '))
class RollError(Exception):
pass
def VarLookup(local_scope):
return lambda var_name: local_scope['vars'][var_name]
def ParseDepsDict(deps_content):
local_scope = {}
global_scope = {
'Var': VarLookup(local_scope),
'deps_os': {},
}
exec (deps_content, global_scope, local_scope)
return local_scope
def ParseLocalDepsFile(filename):
with open(filename, 'rb') as f:
deps_content = f.read()
return ParseDepsDict(deps_content)
def ParseCommitPosition(commit_message):
for line in reversed(commit_message.splitlines()):
m = COMMIT_POSITION_RE.match(line.strip())
if m:
return int(m.group(1))
logging.error('Failed to parse commit position id from:\n%s\n', commit_message)
sys.exit(-1)
def _RunCommand(command, working_dir=None, ignore_exit_code=False, extra_env=None,
input_data=None):
"""Runs a command and returns the output from that command.
If the command fails (exit code != 0), the function will exit the process.
Returns:
A tuple containing the stdout and stderr outputs as strings.
"""
working_dir = working_dir or CHECKOUT_SRC_DIR
logging.debug('CMD: %s CWD: %s', ' '.join(command), working_dir)
env = os.environ.copy()
if extra_env:
assert all(isinstance(value, str) for value in extra_env.values())
logging.debug('extra env: %s', extra_env)
env.update(extra_env)
p = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=working_dir,
universal_newlines=True)
std_output, err_output = p.communicate(input_data)
p.stdout.close()
p.stderr.close()
if not ignore_exit_code and p.returncode != 0:
logging.error('Command failed: %s\n'
'stdout:\n%s\n'
'stderr:\n%s\n', ' '.join(command), std_output, err_output)
sys.exit(p.returncode)
return std_output, err_output
def _GetBranches():
"""Returns a tuple of active,branches.
The 'active' is the name of the currently active branch and 'branches' is a
list of all branches.
"""
lines = _RunCommand(['git', 'branch'])[0].split('\n')
branches = []
active = ''
for line in lines:
if '*' in line:
# The assumption is that the first char will always be the '*'.
active = line[1:].strip()
branches.append(active)
else:
branch = line.strip()
if branch:
branches.append(branch)
return active, branches
def _ReadGitilesContent(url):
# Download and decode BASE64 content until
# https://code.google.com/p/gitiles/issues/detail?id=7 is fixed.
base64_content = ReadUrlContent(url + '?format=TEXT')
return base64.b64decode(base64_content[0])
def ReadRemoteCrFile(path_below_src, revision):
"""Reads a remote Chromium file of a specific revision. Returns a string."""
return _ReadGitilesContent(CHROMIUM_FILE_TEMPLATE % (revision, path_below_src))
def ReadRemoteCrCommit(revision):
"""Reads a remote Chromium commit message. Returns a string."""
return _ReadGitilesContent(CHROMIUM_COMMIT_TEMPLATE % revision)
def ReadUrlContent(url):
"""Connect to a remote host and read the contents. Returns a list of lines."""
conn = urllib2.urlopen(url)
try:
return conn.readlines()
except IOError as e:
logging.exception('Error connecting to %s. Error: %s', url, e)
raise
finally:
conn.close()
def GetMatchingDepsEntries(depsentry_dict, dir_path):
"""Gets all deps entries matching the provided path.
This list may contain more than one DepsEntry object.
Example: dir_path='src/testing' would give results containing both
'src/testing/gtest' and 'src/testing/gmock' deps entries for Chromium's DEPS.
Example 2: dir_path='src/build' should return 'src/build' but not
'src/buildtools'.
Returns:
A list of DepsEntry objects.
"""
result = []
for path, depsentry in depsentry_dict.iteritems():
if path == dir_path:
result.append(depsentry)
else:
parts = path.split('/')
if all(part == parts[i] for i, part in enumerate(dir_path.split('/'))):
result.append(depsentry)
return result
def BuildDepsentryDict(deps_dict):
"""Builds a dict of paths to DepsEntry objects from a raw parsed deps dict."""
result = {}
def AddDepsEntries(deps_subdict):
for path, dep in deps_subdict.iteritems():
if path in result:
continue
if not isinstance(dep, dict):
dep = {'url': dep}
if dep.get('dep_type') == 'cipd':
result[path] = CipdDepsEntry(path, dep['packages'])
else:
if '@' not in dep['url']:
continue
url, revision = dep['url'].split('@')
result[path] = DepsEntry(path, url, revision)
AddDepsEntries(deps_dict['deps'])
for deps_os in ['win', 'mac', 'unix', 'android', 'ios', 'unix']:
AddDepsEntries(deps_dict.get('deps_os', {}).get(deps_os, {}))
return result
def _FindChangedCipdPackages(path, old_pkgs, new_pkgs):
pkgs_equal = ({p['package'] for p in old_pkgs} == {p['package'] for p in new_pkgs})
assert pkgs_equal, 'Old: %s\n New: %s' % (old_pkgs, new_pkgs)
for old_pkg in old_pkgs:
for new_pkg in new_pkgs:
old_version = old_pkg['version']
new_version = new_pkg['version']
if (old_pkg['package'] == new_pkg['package'] and old_version != new_version):
logging.debug('Roll dependency %s to %s', path, new_version)
yield ChangedCipdPackage(path, old_pkg['package'], old_version, new_version)
def _FindNewDeps(old, new):
""" Gather dependencies only in |new| and return corresponding paths. """
old_entries = set(BuildDepsentryDict(old))
new_entries = set(BuildDepsentryDict(new))
return [path for path in new_entries - old_entries if path in ANGLE_CHROMIUM_DEPS]
def CalculateChangedDeps(angle_deps, new_cr_deps):
"""
Calculate changed deps entries based on entries defined in the ANGLE DEPS
file:
- If a shared dependency with the Chromium DEPS file: roll it to the same
revision as Chromium (i.e. entry in the new_cr_deps dict)
- If it's a Chromium sub-directory, roll it to the HEAD revision (notice
this means it may be ahead of the chromium_revision, but generally these
should be close).
- If it's another DEPS entry (not shared with Chromium), roll it to HEAD
unless it's configured to be skipped.
Returns:
A list of ChangedDep objects representing the changed deps.
"""
def ChromeURL(angle_deps_entry):
# Perform variable substitutions.
# This is a hack to get around the unsupported way this script parses DEPS.
# A better fix would be to use the gclient APIs to query and update DEPS.
# However this is complicated by how this script downloads DEPS remotely.
return angle_deps_entry.url.replace('{chromium_git}', 'https://chromium.googlesource.com')
result = []
angle_entries = BuildDepsentryDict(angle_deps)
new_cr_entries = BuildDepsentryDict(new_cr_deps)
for path, angle_deps_entry in angle_entries.iteritems():
if path not in ANGLE_CHROMIUM_DEPS:
continue
# All ANGLE Chromium dependencies are located in src/.
chrome_path = 'src/%s' % path
cr_deps_entry = new_cr_entries.get(chrome_path)
if cr_deps_entry:
assert type(cr_deps_entry) is type(angle_deps_entry)
if isinstance(cr_deps_entry, CipdDepsEntry):
result.extend(
_FindChangedCipdPackages(chrome_path, angle_deps_entry.packages,
cr_deps_entry.packages))
continue
# Use the revision from Chromium's DEPS file.
new_rev = cr_deps_entry.revision
assert ChromeURL(angle_deps_entry) == cr_deps_entry.url, (
'ANGLE DEPS entry %s has a different URL (%s) than Chromium (%s).' %
(path, ChromeURL(angle_deps_entry), cr_deps_entry.url))
else:
if isinstance(angle_deps_entry, DepsEntry):
# Use the HEAD of the deps repo.
stdout, _ = _RunCommand(['git', 'ls-remote', ChromeURL(angle_deps_entry), 'HEAD'])
new_rev = stdout.strip().split('\t')[0]
else:
# The dependency has been removed from chromium.
# This is handled by FindRemovedDeps.
continue
# Check if an update is necessary.
if angle_deps_entry.revision != new_rev:
logging.debug('Roll dependency %s to %s', path, new_rev)
result.append(
ChangedDep(path, ChromeURL(angle_deps_entry), angle_deps_entry.revision, new_rev))
return sorted(result)
def CalculateChangedClang(new_cr_rev):
def GetClangRev(lines):
for line in lines:
match = CLANG_REVISION_RE.match(line)
if match:
return match.group(1)
raise RollError('Could not parse Clang revision!')
with open(CLANG_UPDATE_SCRIPT_LOCAL_PATH, 'rb') as f:
current_lines = f.readlines()
current_rev = GetClangRev(current_lines)
new_clang_update_py = ReadRemoteCrFile(CLANG_UPDATE_SCRIPT_URL_PATH, new_cr_rev).splitlines()
new_rev = GetClangRev(new_clang_update_py)
return ChangedDep(CLANG_UPDATE_SCRIPT_LOCAL_PATH, None, current_rev, new_rev)
def GenerateCommitMessage(
rev_update,
current_commit_pos,
new_commit_pos,
changed_deps_list,
clang_change=None,
):
current_cr_rev = rev_update.current_chromium_rev[0:10]
new_cr_rev = rev_update.new_chromium_rev[0:10]
rev_interval = '%s..%s' % (current_cr_rev, new_cr_rev)
git_number_interval = '%s:%s' % (current_commit_pos, new_commit_pos)
commit_msg = [
'Roll chromium_revision %s (%s)\n' % (rev_interval, git_number_interval),
'Change log: %s' % (CHROMIUM_LOG_TEMPLATE % rev_interval),
'Full diff: %s\n' % (CHROMIUM_COMMIT_TEMPLATE % rev_interval)
]
def Section(adjective, deps):
noun = 'dependency' if len(deps) == 1 else 'dependencies'
commit_msg.append('%s %s' % (adjective, noun))
tbr_authors = ''
if changed_deps_list:
Section('Changed', changed_deps_list)
for c in changed_deps_list:
if isinstance(c, ChangedCipdPackage):
commit_msg.append('* %s: %s..%s' % (c.path, c.current_version, c.new_version))
else:
commit_msg.append(
'* %s: %s/+log/%s..%s' % (c.path, c.url, c.current_rev[0:10], c.new_rev[0:10]))
if changed_deps_list:
change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval, 'DEPS')
commit_msg.append('DEPS diff: %s\n' % change_url)
else:
commit_msg.append('No dependencies changed.')
if clang_change and clang_change.current_rev != clang_change.new_rev:
commit_msg.append(
'Clang version changed %s:%s' % (clang_change.current_rev, clang_change.new_rev))
change_url = CHROMIUM_FILE_TEMPLATE % (rev_interval, CLANG_UPDATE_SCRIPT_URL_PATH)
commit_msg.append('Details: %s\n' % change_url)
else:
commit_msg.append('No update to Clang.\n')
# TBR needs to be non-empty for Gerrit to process it.
git_author = _RunCommand(['git', 'config', 'user.email'],
working_dir=CHECKOUT_SRC_DIR)[0].splitlines()[0]
tbr_authors = git_author + ',' + tbr_authors
commit_msg.append('TBR=%s' % tbr_authors)
commit_msg.append('BUG=None')
return '\n'.join(commit_msg)
def UpdateDepsFile(deps_filename, rev_update, changed_deps, new_cr_content):
"""Update the DEPS file with the new revision."""
with open(deps_filename, 'rb') as deps_file:
deps_content = deps_file.read()
# Update the chromium_revision variable.
deps_content = deps_content.replace(rev_update.current_chromium_rev,
rev_update.new_chromium_rev)
with open(deps_filename, 'wb') as deps_file:
deps_file.write(deps_content)
# Update each individual DEPS entry.
for dep in changed_deps:
local_dep_dir = os.path.join(CHECKOUT_ROOT_DIR, dep.path)
if not os.path.isdir(local_dep_dir):
raise RollError('Cannot find local directory %s. Either run\n'
'gclient sync --deps=all\n'
'or make sure the .gclient file for your solution contains all '
'platforms in the target_os list, i.e.\n'
'target_os = ["android", "unix", "mac", "ios", "win"];\n'
'Then run "gclient sync" again.' % local_dep_dir)
if isinstance(dep, ChangedCipdPackage):
package = dep.package.format() # Eliminate double curly brackets
update = '%s:%s@%s' % (dep.path, package, dep.new_version)
else:
update = '%s@%s' % (dep.path, dep.new_rev)
gclient_cmd = 'gclient'
if platform.system() == 'Windows':
gclient_cmd += '.bat'
_RunCommand([gclient_cmd, 'setdep', '--revision', update], working_dir=CHECKOUT_SRC_DIR)
def _IsTreeClean():
stdout, _ = _RunCommand(['git', 'status', '--porcelain'])
if len(stdout) == 0:
return True
logging.error('Dirty/unversioned files:\n%s', stdout)
return False
def _EnsureUpdatedMasterBranch(dry_run):
current_branch = _RunCommand(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])[0].splitlines()[0]
if current_branch != 'master':
logging.error('Please checkout the master branch and re-run this script.')
if not dry_run:
sys.exit(-1)
logging.info('Updating master branch...')
_RunCommand(['git', 'pull'])
def _CreateRollBranch(dry_run):
logging.info('Creating roll branch: %s', ROLL_BRANCH_NAME)
if not dry_run:
_RunCommand(['git', 'checkout', '-b', ROLL_BRANCH_NAME])
def _RemovePreviousRollBranch(dry_run):
active_branch, branches = _GetBranches()
if active_branch == ROLL_BRANCH_NAME:
active_branch = 'master'
if ROLL_BRANCH_NAME in branches:
logging.info('Removing previous roll branch (%s)', ROLL_BRANCH_NAME)
if not dry_run:
_RunCommand(['git', 'checkout', active_branch])
_RunCommand(['git', 'branch', '-D', ROLL_BRANCH_NAME])
def _LocalCommit(commit_msg, dry_run):
logging.info('Committing changes locally.')
if not dry_run:
_RunCommand(['git', 'add', '--update', '.'])
_RunCommand(['git', 'commit', '-m', commit_msg])
def ChooseCQMode(skip_cq, cq_over, current_commit_pos, new_commit_pos):
if skip_cq:
return 0
if (new_commit_pos - current_commit_pos) < cq_over:
return 1
return 2
def _UploadCL(commit_queue_mode):
"""Upload the committed changes as a changelist to Gerrit.
commit_queue_mode:
- 2: Submit to commit queue.
- 1: Run trybots but do not submit to CQ.
- 0: Skip CQ, upload only.
"""
cmd = ['git', 'cl', 'upload', '--force', '--bypass-hooks', '--send-mail']
cmd.extend(['--cc', NOTIFY_EMAIL])
if commit_queue_mode >= 2:
logging.info('Sending the CL to the CQ...')
cmd.extend(['--use-commit-queue'])
elif commit_queue_mode >= 1:
logging.info('Starting CQ dry run...')
cmd.extend(['--cq-dry-run'])
extra_env = {
'EDITOR': 'true',
'SKIP_GCE_AUTH_FOR_GIT': '1',
}
stdout, stderr = _RunCommand(cmd, extra_env=extra_env)
logging.debug('Output from "git cl upload":\nstdout:\n%s\n\nstderr:\n%s', stdout, stderr)
def GetRollRevisionRanges(opts, angle_deps):
current_cr_rev = angle_deps['vars']['chromium_revision']
new_cr_rev = opts.revision
if not new_cr_rev:
stdout, _ = _RunCommand(['git', 'ls-remote', CHROMIUM_SRC_URL, 'HEAD'])
head_rev = stdout.strip().split('\t')[0]
logging.info('No revision specified. Using HEAD: %s', head_rev)
new_cr_rev = head_rev
return ChromiumRevisionUpdate(current_cr_rev, new_cr_rev)
def main():
p = argparse.ArgumentParser()
p.add_argument(
'--clean',
action='store_true',
default=False,
help='Removes any previous local roll branch.')
p.add_argument(
'-r',
'--revision',
help=('Chromium Git revision to roll to. Defaults to the '
'Chromium HEAD revision if omitted.'))
p.add_argument(
'--dry-run',
action='store_true',
default=False,
help=('Calculate changes and modify DEPS, but don\'t create '
'any local branch, commit, upload CL or send any '
'tryjobs.'))
p.add_argument(
'-i',
'--ignore-unclean-workdir',
action='store_true',
default=False,
help=('Ignore if the current branch is not master or if there '
'are uncommitted changes (default: %(default)s).'))
grp = p.add_mutually_exclusive_group()
grp.add_argument(
'--skip-cq',
action='store_true',
default=False,
help='Skip sending the CL to the CQ (default: %(default)s)')
grp.add_argument(
'--cq-over',
type=int,
default=1,
help=('Commit queue dry run if the revision difference '
'is below this number (default: %(default)s)'))
p.add_argument(
'-v',
'--verbose',
action='store_true',
default=False,
help='Be extra verbose in printing of log messages.')
opts = p.parse_args()
if opts.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if not opts.ignore_unclean_workdir and not _IsTreeClean():
logging.error('Please clean your local checkout first.')
return 1
if opts.clean:
_RemovePreviousRollBranch(opts.dry_run)
if not opts.ignore_unclean_workdir:
_EnsureUpdatedMasterBranch(opts.dry_run)
deps_filename = os.path.join(CHECKOUT_SRC_DIR, 'DEPS')
angle_deps = ParseLocalDepsFile(deps_filename)
rev_update = GetRollRevisionRanges(opts, angle_deps)
current_commit_pos = ParseCommitPosition(ReadRemoteCrCommit(rev_update.current_chromium_rev))
new_commit_pos = ParseCommitPosition(ReadRemoteCrCommit(rev_update.new_chromium_rev))
new_cr_content = ReadRemoteCrFile('DEPS', rev_update.new_chromium_rev)
new_cr_deps = ParseDepsDict(new_cr_content)
changed_deps = CalculateChangedDeps(angle_deps, new_cr_deps)
clang_change = CalculateChangedClang(rev_update.new_chromium_rev)
commit_msg = GenerateCommitMessage(
rev_update, current_commit_pos, new_commit_pos, changed_deps, clang_change=clang_change)
logging.debug('Commit message:\n%s', commit_msg)
_CreateRollBranch(opts.dry_run)
if not opts.dry_run:
UpdateDepsFile(deps_filename, rev_update, changed_deps, new_cr_content)
if _IsTreeClean():
logging.info("No DEPS changes detected, skipping CL creation.")
else:
_LocalCommit(commit_msg, opts.dry_run)
commit_queue_mode = ChooseCQMode(opts.skip_cq, opts.cq_over, current_commit_pos,
new_commit_pos)
logging.info('Uploading CL...')
if not opts.dry_run:
_UploadCL(commit_queue_mode)
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
# Copyright (c) 2013 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.api.v2 import attributes as attr
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import db_base_plugin_v2
from neutron.db import portsecurity_db
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import portsecurity as psec
from neutron import manager
from neutron.tests.unit import test_db_plugin
from oslo.config import cfg
DB_PLUGIN_KLASS = ('neutron.tests.unit.test_extension_allowedaddresspairs.'
'AllowedAddressPairTestPlugin')
class AllowedAddressPairTestCase(test_db_plugin.NeutronDbPluginV2TestCase):
def setUp(self, plugin=None, ext_mgr=None):
super(AllowedAddressPairTestCase, self).setUp(plugin)
# Check if a plugin supports security groups
plugin_obj = manager.NeutronManager.get_plugin()
self._skip_port_security = ('port-security' not in
plugin_obj.supported_extension_aliases)
class AllowedAddressPairTestPlugin(portsecurity_db.PortSecurityDbMixin,
db_base_plugin_v2.NeutronDbPluginV2,
addr_pair_db.AllowedAddressPairsMixin):
"""Test plugin that implements necessary calls on create/delete port for
associating ports with port security and allowed address pairs.
"""
supported_extension_aliases = ["allowed-address-pairs"]
def create_port(self, context, port):
p = port['port']
with context.session.begin(subtransactions=True):
neutron_db = super(AllowedAddressPairTestPlugin, self).create_port(
context, port)
p.update(neutron_db)
if attr.is_attr_set(p.get(addr_pair.ADDRESS_PAIRS)):
self._process_create_allowed_address_pairs(
context, p,
p[addr_pair.ADDRESS_PAIRS])
else:
p[addr_pair.ADDRESS_PAIRS] = None
return port['port']
def update_port(self, context, id, port):
delete_addr_pairs = self._check_update_deletes_allowed_address_pairs(
port)
has_addr_pairs = self._check_update_has_allowed_address_pairs(port)
with context.session.begin(subtransactions=True):
ret_port = super(AllowedAddressPairTestPlugin, self).update_port(
context, id, port)
# copy values over - but not fixed_ips
port['port'].pop('fixed_ips', None)
ret_port.update(port['port'])
if (delete_addr_pairs or has_addr_pairs):
# delete address pairds and readd them
self._delete_allowed_address_pairs(context, id)
self._process_create_allowed_address_pairs(
context, ret_port,
ret_port[addr_pair.ADDRESS_PAIRS])
return ret_port
class AllowedAddressPairDBTestCase(AllowedAddressPairTestCase):
def setUp(self, plugin=None, ext_mgr=None):
plugin = plugin or DB_PLUGIN_KLASS
super(AllowedAddressPairDBTestCase,
self).setUp(plugin=plugin, ext_mgr=ext_mgr)
class TestAllowedAddressPairs(AllowedAddressPairDBTestCase):
def test_create_port_allowed_address_pairs(self):
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_create_port_security_true_allowed_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=True,
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_create_port_security_false_allowed_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=False,
allowed_address_pairs=address_pairs)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 409)
def test_create_port_zero_prefix_ip(self):
address_pairs = [{'mac_address': 'invalid_mac',
'ip_address': '0.0.0.0/0'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_port_bad_mac(self):
address_pairs = [{'mac_address': 'invalid_mac',
'ip_address': '10.0.0.1'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_port_bad_ip(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1222'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_missing_ip_field(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_create_duplicate_mac_ip(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'},
{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_more_than_max_allowed_address_pair(self):
cfg.CONF.set_default('max_allowed_address_pair', 3)
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'},
{'mac_address': '00:00:00:00:00:02',
'ip_address': '10.0.0.2'},
{'mac_address': '00:00:00:00:00:03',
'ip_address': '10.0.0.3'},
{'mac_address': '00:00:00:00:00:04',
'ip_address': '10.0.0.4'}]
self._create_port_with_address_pairs(address_pairs, 400)
def test_equal_to_max_allowed_address_pair(self):
cfg.CONF.set_default('max_allowed_address_pair', 3)
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'},
{'mac_address': '00:00:00:00:00:02',
'ip_address': '10.0.0.2'},
{'mac_address': '00:00:00:00:00:03',
'ip_address': '10.0.0.3'}]
self._create_port_with_address_pairs(address_pairs, 201)
def test_create_overlap_with_fixed_ip(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.2'}]
with self.network() as network:
with self.subnet(network=network, cidr='10.0.0.0/24') as subnet:
fixed_ips = [{'subnet_id': subnet['subnet']['id'],
'ip_address': '10.0.0.2'}]
res = self._create_port(self.fmt, network['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,
'fixed_ips'),
allowed_address_pairs=address_pairs,
fixed_ips=fixed_ips)
self.assertEqual(res.status_int, 201)
port = self.deserialize(self.fmt, res)
self._delete('ports', port['port']['id'])
def test_create_port_extra_args(self):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1',
'icbb': 'agreed'}]
self._create_port_with_address_pairs(address_pairs, 400)
def _create_port_with_address_pairs(self, address_pairs, ret_code):
with self.network() as net:
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, ret_code)
if ret_code == 201:
self._delete('ports', port['port']['id'])
def test_update_add_address_pairs(self):
with self.network() as net:
res = self._create_port(self.fmt, net['network']['id'])
port = self.deserialize(self.fmt, res)
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
update_port = {'port': {addr_pair.ADDRESS_PAIRS:
address_pairs}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS],
address_pairs)
self._delete('ports', port['port']['id'])
def test_create_address_gets_port_mac(self):
with self.network() as net:
address_pairs = [{'ip_address': '23.23.23.23'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)['port']
port_addr_mac = port[addr_pair.ADDRESS_PAIRS][0]['mac_address']
self.assertEqual(port_addr_mac,
port['mac_address'])
self._delete('ports', port['id'])
def test_update_port_security_off_address_pairs(self):
if self._skip_port_security:
self.skipTest("Plugin does not implement port-security extension")
with self.network() as net:
with self.subnet(network=net):
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=('port_security_enabled',
addr_pair.ADDRESS_PAIRS,),
port_security_enabled=True,
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
update_port = {'port': {psec.PORTSECURITY: False}}
# If plugin implements security groups we also need to remove
# the security group on port.
plugin_obj = manager.NeutronManager.get_plugin()
if 'security-groups' in plugin_obj.supported_extension_aliases:
update_port['port']['security_groups'] = []
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 409)
self._delete('ports', port['port']['id'])
def test_create_port_remove_allowed_address_pairs(self):
with self.network() as net:
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.0.1'}]
res = self._create_port(self.fmt, net['network']['id'],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
port = self.deserialize(self.fmt, res)
update_port = {'port': {addr_pair.ADDRESS_PAIRS: []}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(port['port'][addr_pair.ADDRESS_PAIRS], [])
self._delete('ports', port['port']['id'])
class TestAllowedAddressPairsXML(TestAllowedAddressPairs):
fmt = 'xml'
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
from measurement_stats import angle
from measurement_stats import ops
from measurement_stats import value
class Point2D(object):
"""A class for..."""
def __init__(self, x=None, y=None):
if x is None:
x = value.ValueUncertainty()
if y is None:
y = value.ValueUncertainty()
self.x = x
self.y = y
@property
def length(self):
""" The length of the vector
:return:
:rtype: value.ValueUncertainty
"""
return self.distance_from(self.__class__())
@property
def nonzero(self):
return self.x.value != 0.0 or self.x.value != 0.0
def copy_from(self, source):
"""
:param source:
:return:
"""
self.x.copy(source.x)
self.y.copy(source.y)
def clone(self):
"""
:return:
"""
return self.__class__(x=self.x.clone(), y=self.y.clone())
def invert(self):
""" Switches the sign of the x and y values so that x = -x and y = -y.
"""
self.x.value = -self.x.value
self.y.value = -self.y.value
def rotate(self, rotation_angle, origin=None):
""" Rotates the position value by the specified angle using a standard
2D rotation matrix formulation. If an origin Position2D instance is
not specified the rotation will occur around the origin. Also, if
an origin is specified, the uncertainty in that origin value will
be propagated through to the uncertainty of the rotated result.
:param rotation_angle:
:param origin: (optional)
:return:
"""
if origin is None:
origin = self.__class__(
value.ValueUncertainty(0, 0),
value.ValueUncertainty(0, 0)
)
a = rotation_angle.radians
x = self.x.raw - origin.x.raw
y = self.y.raw - origin.y.raw
self.x.update(
x * math.cos(a) - y*math.sin(a) + origin.x.raw,
ops.sqrt_sum_of_squares(self.x.uncertainty, origin.x.uncertainty)
)
self.y.update(
y * math.cos(a) + x*math.sin(a) + origin.y.raw,
ops.sqrt_sum_of_squares(self.y.uncertainty, origin.y.uncertainty)
)
return self
def distance_from(self, point):
"""
:param point:
:return:
"""
x_delta = self.x - point.x
y_delta = self.y - point.y
return (x_delta * x_delta + y_delta * y_delta) ** 0.5
def serialize(self):
"""toDict doc..."""
return dict(
x=self.x.serialize(),
y=self.y.serialize()
)
def normalize(self):
"""normalize doc..."""
length = self.length
if length == 0.0:
return False
self.x /= length
self.y /= length
return True
def angle_between(self, point):
"""
:param point:
:return:
"""
my_length = self.length
pos_length = point.length
numerator = self.x*point.x + self.y*point.y
denominator = my_length * pos_length
if value.equivalent(denominator.value, 0.0, 1e-6):
return angle.Angle(radians=0.0, uncertainty=0.5*math.pi)
result = numerator/denominator
if value.equivalent(result.value, 1.0, 1e-5):
return angle.Angle()
try:
if value.equivalent(result.value, -1.0, 1e-5):
a = math.pi
else:
a = math.acos(result.raw)
except Exception:
print('[ERROR]: Unable to calculate angle between', result)
return angle.Angle()
if value.equivalent(a, math.pi, 1e-5):
return angle.Angle(radians=a, uncertainty_degrees=180.0)
try:
aUnc = abs(1.0 / (1.0 - result.raw * result.raw) ** 0.5) * \
result.raw_uncertainty
except Exception as err:
print('[ERROR]: Unable to calculate angle between uncertainty',
result, a)
return angle.Angle()
return angle.Angle(radians=a, uncertainty=aUnc)
def __pow__(self, power, modulo=None):
try:
return self.__class__(
x=self.x ** power.x,
y=self.y ** power.y
)
except Exception:
return self.__class__(
x=self.x ** power,
y=self.y ** power
)
def __add__(self, other):
try:
return self.__class__(
x=self.x + other.x,
y=self.y + other.y
)
except Exception:
return self.__class__(
x=self.x + other,
y=self.y + other
)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
try:
return self.__class__(
x=self.x - other.x,
y=self.y - other.y
)
except Exception:
return self.__class__(
x=self.x - other,
y=self.y - other
)
def __rsub__(self, other):
try:
return self.__class__(
x=other.x - self.x,
y=other.y - self.y
)
except Exception:
return self.__class__(
x=other - self.x,
y=other - self.y
)
def __mul__(self, other):
try:
return self.__class__(x=self.x*other.x, y=self.y*other.y)
except Exception:
return self.__class__(x=self.x*other, y=self.y*other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
try:
return self.__class__(x=self.x/other.x, y=self.y/other.y)
except Exception:
return self.__class__(x=self.x/other, y=self.y/other)
def __rtruediv__(self, other):
return self.__truediv__(other)
def __div__(self, other):
return self.__truediv__(other)
def __rdiv__(self, other):
return self.__rtruediv__(other)
def __repr__(self):
return self.__str__()
def __unicode__(self):
return '<{} {} {}>'.format(
self.__class__.__name__,
self.x.label,
self.y.label
)
def __str__(self):
return '{}'.format(self.__unicode__())
def create_point(x=0.0, y=0.0, x_unc=0.001, y_unc=0.001):
"""
:param x:
:param y:
:param x_unc:
:param y_unc:
:return:
"""
return Point2D(
x=value.ValueUncertainty(x, x_unc),
y=value.ValueUncertainty(y, y_unc)
)
def closest_point_on_line(point, line_start, line_end, contained=True):
"""
Finds the closest point on a line to the specified point using the formulae
discussed in the "another formula" section of:
wikipedia.org/wiki/Distance_from_a_point_to_a_line#Another_formula
"""
length = line_start.distance_from(line_end)
if not length:
raise ValueError('Cannot calculate point. Invalid line segment.')
s = line_start
e = line_end
delta_x = e.x.raw - s.x.raw
delta_y = e.y.raw - s.y.raw
rotate = False
slope = 0.0
slope_unc = 0.0
try:
slope = delta_y / delta_x
slope_unc = (
abs(1.0 / delta_x) * (
s.y.raw_uncertainty + e.y.raw_uncertainty
) + abs(
slope / delta_x
) * (
s.x.raw_uncertainty + e.x.raw_uncertainty
)
)
except Exception:
rotate = True
raise
if rotate or (abs(slope) > 1.0 and abs(slope_unc / slope) > 0.5):
a = angle.Angle(degrees=20.0)
e2 = e.clone().rotate(a, s)
p2 = point.clone().rotate(a, s)
print(point, p2)
print(e, e2)
result = closest_point_on_line(p2, s, e2, contained)
if result is None:
return result
a.degrees = -20.0
result.rotate(a, s)
return result
intercept = s.y.raw - slope * s.x.raw
denom = slope * slope + 1.0
numer = point.x.raw + slope * (point.y.raw - intercept)
x = numer / denom
y = (slope * numer) / denom + intercept
if contained:
# Check to see if point is between start and end values
x_range = sorted([s.x.raw, e.x.raw])
y_range = sorted([s.y.raw, e.y.raw])
eps = 1e-8
x_min = x - eps
x_max = x + eps
y_min = y - eps
y_max = y + eps
out_of_bounds = (
x_range[1] < x_min or
x_max < x_range[0] or
y_range[1] < y_min or
y_max < y_range[0]
)
if out_of_bounds:
return None
start_dist = ops.sqrt_sum_of_squares(s.x.raw - x, s.y.raw - y)
end_dist = ops.sqrt_sum_of_squares(e.x.raw - x, e.y.raw - y)
x_unc = (
start_dist / length.raw * s.x.raw_uncertainty +
end_dist / length.raw * e.x.raw_uncertainty
)
x_unc = math.sqrt(x_unc ** 2 + point.x.raw_uncertainty ** 2)
y_unc = (
start_dist / length.raw * s.y.raw_uncertainty +
end_dist / length.raw * e.y.raw_uncertainty
)
y_unc = math.sqrt(y_unc ** 2 + point.y.raw_uncertainty ** 2)
return create_point(x=x, y=y, x_unc=x_unc, y_unc=y_unc)
|
|
# -*- coding: utf-8 -*-
# PyKov is Python package for the creation, manipulation and study of Markov
# Chains.
# Copyright (C) 2014 Riccardo Scalco
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Email: [email protected]
"""Pykov documentation.
.. module:: A Python module for finite Markov chains.
:platform: Unix, Windows, Mac
.. moduleauthor::
Riccardo Scalco <[email protected]>
"""
import random
import math
import six
import numpy
import scipy.sparse as ss
import scipy.sparse.linalg as ssl
__date__ = 'Nov 2014'
__version__ = 1.1
__license__ = 'GNU General Public License Version 3'
__authors__ = 'Riccardo Scalco'
__many_thanks_to__ = 'Sandra Steiner, Nicky Van Foreest, Adel Qalieh'
def _del_cache(fn):
"""
Delete cache.
"""
def wrapper(*args, **kwargs):
self = args[0]
try:
del(self._states)
except AttributeError:
pass
try:
del(self._succ)
except AttributeError:
pass
try:
del(self._pred)
except AttributeError:
pass
try:
del(self._steady)
except AttributeError:
pass
try:
del(self._guess)
except AttributeError:
pass
try:
del(self._fundamental_matrix)
except AttributeError:
pass
return fn(*args, **kwargs)
return wrapper
class PykovError(Exception):
"""
Exception definition form Pykov Errors.
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Vector(dict):
"""
"""
def __init__(self, data=None, **kwargs):
"""
>>> pykov.Vector({'A':.3, 'B':.7})
{'A':.3, 'B':.7}
>>> pykov.Vector(A=.3, B=.7)
{'A':.3, 'B':.7}
"""
if data:
self.update([item for item in six.iteritems(data)
if abs(item[1]) > numpy.finfo(numpy.float).eps])
if len(kwargs):
self.update([item for item in six.iteritems(kwargs)
if abs(item[1]) > numpy.finfo(numpy.float).eps])
def __getitem__(self, key):
"""
>>> q = pykov.Vector(C=.4, B=.6)
>>> q['C']
0.4
>>> q['Z']
0.0
"""
try:
return dict.__getitem__(self, key)
except KeyError:
return 0.0
def __setitem__(self, key, value):
"""
>>> q = pykov.Vector(C=.4, B=.6)
>>> q['Z']=.2
>>> q
{'C': 0.4, 'B': 0.6, 'Z': 0.2}
>>> q['Z']=0
>>> q
{'C': 0.4, 'B': 0.6}
"""
if abs(value) > numpy.finfo(numpy.float).eps:
dict.__setitem__(self, key, value)
elif key in self:
del(self[key])
def __mul__(self, M):
"""
>>> p = pykov.Vector(A=.3, B=.7)
>>> p * 3
{'A': 0.9, 'B': 2.1}
>>> q = pykov.Vector(C=.5, B=.5)
>>> p * q
0.35
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> p * T
{'A': 0.91, 'B': 0.09}
>>> T * p
{'A': 0.42, 'B': 0.3}
"""
if isinstance(M, int) or isinstance(M, float):
return self.__rmul__(M)
if isinstance(M, Matrix):
e2p, p2e = M._el2pos_()
x = self._toarray(e2p)
A = M._dok_(e2p).tocsr().transpose()
y = A.dot(x)
result = Vector()
result._fromarray(y, e2p)
return result
elif isinstance(M, Vector):
result = 0
for state, value in six.iteritems(self):
result += value * M[state]
return result
else:
raise TypeError('unsupported operand type(s) for *:' +
' \'Vector\' and ' + repr(type(M))[7:-1])
def __rmul__(self, M):
"""
>>> p = pykov.Vector(A=.3, B=.7)
>>> 3 * p
{'A': 0.9, 'B': 2.1}
"""
if isinstance(M, int) or isinstance(M, float):
result = Vector()
for state, value in six.iteritems():
result[state] = value * M
return result
else:
raise TypeError('unsupported operand type(s) for *: ' +
repr(type(M))[7:-1] + ' and \'Vector\'')
def __add__(self, v):
"""
>>> p = pykov.Vector(A=.3, B=.7)
>>> q = pykov.Vector(C=.5, B=.5)
>>> p + q
{'A': 0.3, 'C': 0.5, 'B': 1.2}
"""
if isinstance(v, Vector):
result = Vector()
for state in set(six.iterkeys(self)) | set(v.keys()):
result[state] = self[state] + v[state]
return result
else:
raise TypeError('unsupported operand type(s) for +:' +
' \'Vector\' and ' + repr(type(v))[7:-1])
def __sub__(self, v):
"""
>>> p = pykov.Vector(A=.3, B=.7)
>>> q = pykov.Vector(C=.5, B=.5)
>>> p - q
{'A': 0.3, 'C': -0.5, 'B': 0.2}
>>> q - p
{'A': -0.3, 'C': 0.5, 'B': -0.2}
"""
if isinstance(v, Vector):
result = Vector()
for state in set(six.iterkeys(self)) | set(v.keys()):
result[state] = self[state] - v[state]
return result
else:
raise TypeError('unsupported operand type(s) for -:' +
' \'Vector\' and ' + repr(type(v))[7:-1])
def _toarray(self, el2pos):
"""
>>> p = pykov.Vector(A=.3, B=.7)
>>> el2pos = {'A': 1, 'B': 0}
>>> v = p._toarray(el2pos)
>>> v
array([ 0.7, 0.3])
"""
p = numpy.zeros(len(el2pos))
for key, value in six.iteritems(self):
p[el2pos[key]] = value
return p
def _fromarray(self, arr, el2pos):
"""
>>> p = pykov.Vector()
>>> el2pos = {'A': 1, 'B': 0}
>>> v = numpy.array([ 0.7, 0.3])
>>> p._fromarray(v,el2pos)
>>> p
{'A': 0.3, 'B': 0.7}
"""
for elem, pos in el2pos.items():
self[elem] = arr[pos]
return None
def sort(self, reverse=False):
"""
List of (state,probability) sorted according the probability.
>>> p = pykov.Vector({'A':.3, 'B':.1, 'C':.6})
>>> p.sort()
[('B', 0.1), ('A', 0.3), ('C', 0.6)]
>>> p.sort(reverse=True)
[('C', 0.6), ('A', 0.3), ('B', 0.1)]
"""
res = list(six.iteritems(self))
res.sort(key=lambda lst: lst[1], reverse=reverse)
return res
def normalize(self):
"""
Normalize the vector so that the entries sum is 1.
>>> p = pykov.Vector({'A':3, 'B':1, 'C':6})
>>> p.normalize()
>>> p
{'A': 0.3, 'C': 0.6, 'B': 0.1}
"""
s = self.sum()
for k in six.iterkeys(self):
self[k] = self[k] / s
def choose(self):
"""
Choose a state according to its probability.
>>> p = pykov.Vector(A=.3, B=.7)
>>> p.choose()
'B'
.. seealso::
`Kevin Parks recipe <http://code.activestate.com/recipes/117241/>`_
"""
n = random.uniform(0, 1)
for state, prob in six.iteritems(self):
if n < prob:
break
n = n - prob
return state
def entropy(self):
"""
Return the entropy.
.. math::
H(p) = \sum_i p_i \ln p_i
.. seealso::
Khinchin, A. I.
Mathematical Foundations of Information Theory
Dover, 1957.
>>> p = pykov.Vector(A=.3, B=.7)
>>> p.entropy()
0.6108643020548935
"""
return -sum([v * math.log(v) for v in self.values()])
def relative_entropy(self, p):
"""
Return the Kullback-Leibler distance.
.. math::
d(q,p) = \sum_i q_i \ln (q_i/p_i)
.. note::
The Kullback-Leibler distance is not symmetric.
>>> p = pykov.Vector(A=.3, B=.7)
>>> q = pykov.Vector(A=.4, B=.6)
>>> p.relative_entropy(q)
0.02160085414354654
>>> q.relative_entropy(p)
0.022582421084357485
"""
states = set(six.iterkeys(self)) & set(p.keys())
return sum([self[s] * math.log(self[s] / p[s]) for s in states])
def copy(self):
"""
Return a shallow copy.
>>> p = pykov.Vector(A=.3, B=.7)
>>> q = p.copy()
>>> p['C'] = 1.
>>> q
{'A': 0.3, 'B': 0.7}
"""
return Vector(self)
def sum(self):
"""
Sum the values.
>>> p = pykov.Vector(A=.3, B=.7)
>>> p.sum()
1.0
"""
return float(sum(self.values()))
def dist(self, v):
"""
Return the distance between the two probability vectors.
.. math::
d(q,p) = \sum_i |q_i - p_i|
>>> p = pykov.Vector(A=.3, B=.7)
>>> q = pykov.Vector(C=.5, B=.5)
>>> q.dist(p)
1.0
"""
if isinstance(v, Vector):
result = 0
for state in set(six.iterkeys(self)) | set(v.keys()):
result += abs(v[state] - self[state])
return result
class Matrix(dict):
"""
"""
def __init__(self, data=None):
"""
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
"""
if data:
self.update([item for item in six.iteritems(data)
if abs(item[1]) > numpy.finfo(numpy.float).eps])
def __getitem__(self, *args):
"""
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T[('A','B')]
0.3
>>> T['A','B']
0.3
>>>
0.0
"""
try:
return dict.__getitem__(self, args[0])
except KeyError:
return 0.0
@_del_cache
def __setitem__(self, key, value):
"""
>>> T = pykov.Matrix()
>>> T[('A','B')] = .3
>>> T
{('A', 'B'): 0.3}
>>> T['A','A'] = .7
>>> T
{('A', 'B'): 0.3, ('A', 'A'): 0.7}
>>> T['B','B'] = 0
>>> T
{('A', 'B'): 0.3, ('A', 'A'): 0.7}
>>> T['A','A'] = 0
>>> T
{('A', 'B'): 0.3}
>>> T = pykov.Matrix({('A','B'): 3, ('A','A'): 7, ('B','A'): .1})
>>> T.states()
{'A', 'B'}
>>> T['A','C']=1
>>> T.states()
{'A', 'B', 'C'}
>>> T['A','C']=0
>>> T.states()
{'A', 'B'}
"""
if abs(value) > numpy.finfo(numpy.float).eps:
dict.__setitem__(self, key, value)
elif key in self:
del(self[key])
@_del_cache
def __delitem__(self, key):
"""
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> del(T['B', 'A'])
>>> T
{('A', 'B'): 0.3, ('A', 'A'): 0.7}
"""
dict.__delitem__(self, key)
@_del_cache
def pop(self, key):
"""
Remove specified key and return the corresponding value.
See: help(dict.pop)
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.pop(('A','B'))
0.3
>>> T
{('B', 'A'): 1.0, ('A', 'A'): 0.7}
"""
return dict.pop(self, key)
@_del_cache
def popitem(self):
"""
Remove and return some (key, value) pair as a 2-tuple.
See: help(dict.popitem)
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.popitem()
(('B', 'A'), 1.0)
>>> T
{('A', 'B'): 0.3, ('A', 'A'): 0.7}
"""
return dict.popitem(self)
@_del_cache
def clear(self):
"""
Remove all keys.
See: help(dict.clear)
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.clear()
>>> T
{}
"""
dict.clear(self)
@_del_cache
def update(self, other):
"""
Update with keys and their values present in other.
See: help(dict.update)
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> d = {('B', 'C'):2}
>>> T.update(d)
>>> T
{('B', 'A'): 1.0, ('B', 'C'): 2, ('A', 'B'): 0.3, ('A', 'A'): 0.7}
"""
dict.update(self, other)
@_del_cache
def setdefault(self, k, *args):
"""
See: help(dict.setdefault)
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.setdefault(('A','A'),1)
0.7
>>> T
{('B', 'A'): 1.0, ('A', 'B'): 0.3, ('A', 'A'): 0.7}
>>> T.setdefault(('A','C'),1)
1
>>> T
{('B', 'A'): 1.0, ('A', 'B'): 0.3, ('A', 'A'): 0.7, ('A', 'C'): 1}
"""
return dict.setdefault(self, k, *args)
def copy(self):
"""
Return a shallow copy.
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> W = T.copy()
>>> T[('B','B')] = 1.
>>> W
{('B', 'A'): 1.0, ('A', 'B'): 0.3, ('A', 'A'): 0.7}
"""
return Matrix(self)
def _dok_(self, el2pos, method=''):
"""
"""
m = len(el2pos)
S = ss.dok_matrix((m, m))
if method == '':
for k, v in six.iteritems(self):
i = el2pos[k[0]]
j = el2pos[k[1]]
S[i, j] = float(v)
elif method == 'transpose':
for k, v in six.iteritems(self):
i = el2pos[k[0]]
j = el2pos[k[1]]
S[j, i] = float(v)
return S
def _from_dok_(self, mat, pos2el):
"""
"""
for ii, val in mat.items():
self[pos2el[ii[0]], pos2el[ii[1]]] = val
return None
def _numpy_mat(self, el2pos):
"""
Return a numpy.matrix object from a dictionary.
-- Parameters --
t_ij : the dict, values must be real numbers, keys should be tuples of
two strings.
el2pos : see _map()
"""
m = len(el2pos)
T = numpy.matrix(numpy.zeros((m, m)))
for k, v in six.iteritems(self):
T[el2pos[k[0]], el2pos[k[1]]] = v
return T
def _from_numpy_mat(self, T, pos2el):
"""
Return a dictionary from a numpy.matrix object.
-- Parameters --
T : the numpy.matrix.
pos2el : see _map()
"""
for i in range(len(T)):
for j in range(len(T)):
if T[i, j]:
self[(pos2el[i], pos2el[j])] = T[i, j]
return None
def _el2pos_(self):
"""
"""
el2pos = {}
pos2el = {}
for pos, element in enumerate(list(self.states())):
el2pos[element] = pos
pos2el[pos] = element
return el2pos, pos2el
def stochastic(self):
"""
Make a right stochastic matrix.
Set the sum of every row equal to one,
raise ``PykovError`` if it is not possible.
>>> T = pykov.Matrix({('A','B'): 3, ('A','A'): 7, ('B','A'): .2})
>>> T.stochastic()
>>> T
{('B', 'A'): 1.0, ('A', 'B'): 0.3, ('A', 'A'): 0.7}
>>> T[('A','C')]=1
>>> T.stochastic()
pykov.PykovError: 'Zero links from node C'
"""
s = {}
for k, v in self.succ().items():
summ = float(sum(v.values()))
if summ:
s[k] = summ
else:
raise PykovError('Zero links from state ' + k)
for k in six.iterkeys(self):
self[k] = self[k] / s[k[0]]
def pred(self, key=None):
"""
Return the precedessors of a state (if not indicated, of all states).
In Matrix notation: return the coloum of the indicated state.
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.pred()
{'A': {'A': 0.7, 'B': 1.0}, 'B': {'A': 0.3}}
>>> T.pred('A')
{'A': 0.7, 'B': 1.0}
"""
try:
if key is not None:
return self._pred[key]
else:
return self._pred
except AttributeError:
self._pred = dict([(state, Vector()) for state in self.states()])
for link, probability in six.iteritems(self):
self._pred[link[1]][link[0]] = probability
if key is not None:
return self._pred[key]
else:
return self._pred
def succ(self, key=None):
"""
Return the successors of a state (if not indicated, of all states).
In Matrix notation: return the row of the indicated state.
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.succ()
{'A': {'A': 0.7, 'B': 0.3}, 'B': {'A': 1.0}}
>>> T.succ('A')
{'A': 0.7, 'B': 0.3}
"""
try:
if key is not None:
return self._succ[key]
else:
return self._succ
except AttributeError:
self._succ = dict([(state, Vector()) for state in self.states()])
for link, probability in six.iteritems(self):
self._succ[link[0]][link[1]] = probability
if key is not None:
return self._succ[key]
else:
return self._succ
def remove(self, states):
"""
Return a copy of the Chain, without the indicated states.
.. warning::
All the links where the states appear are deleted, so that the result
will not be in general a stochastic matrix.
..
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.remove(['B'])
{('A', 'A'): 0.7}
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.,
('C','D'): .5, ('D','C'): 1., ('C','B'): .5})
>>> T.remove(['A','B'])
{('C', 'D'): 0.5, ('D', 'C'): 1.0}
"""
return Matrix(dict([(key, value) for key, value in six.iteritems(self) if
key[0] not in states and key[1] not in states]))
def states(self):
"""
Return the set of states.
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.states()
{'A', 'B'}
"""
try:
return self._states
except AttributeError:
self._states = set()
for link in six.iterkeys(self):
self._states.add(link[0])
self._states.add(link[1])
return self._states
def __mul__(self, v):
"""
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T * 3
{('B', 'A'): 3.0, ('A', 'B'): 0.9, ('A', 'A'): 2.1}
>>> p = pykov.Vector(A=.3, B=.7)
>>> T * p
{'A': 0.42, 'B': 0.3}
>>> W = pykov.Matrix({('N', 'M'): 0.5, ('M', 'N'): 0.7,
('M', 'M'): 0.3, ('O', 'N'): 0.5,
('O', 'O'): 0.5, ('N', 'O'): 0.5})
>>> W * W
{('N', 'M'): 0.15, ('M', 'N'): 0.21, ('M', 'O'): 0.35,
('M', 'M'): 0.44, ('O', 'M'): 0.25, ('O', 'N'): 0.25,
('O', 'O'): 0.5, ('N', 'O'): 0.25, ('N', 'N'): 0.6}
"""
if isinstance(v, Vector):
e2p, p2e = self._el2pos_()
x = v._toarray(e2p)
M = self._dok_(e2p).tocsr()
y = M.dot(x)
result = Vector()
result._fromarray(y, e2p)
return result
elif isinstance(v, Matrix):
e2p, p2e = self._el2pos_()
M = self._dok_(e2p).tocsr()
N = v._dok_(e2p).tocsr()
C = M.dot(N).todok()
if 'Chain' in repr(self.__class__):
res = Chain()
elif 'Matrix' in repr(self.__class__):
res = Matrix()
res._from_dok_(C, p2e)
return res
elif isinstance(v, int) or isinstance(v, float):
return Matrix(dict([(key, value * v) for key, value in
six.iteritems(self)]))
else:
raise TypeError('unsupported operand type(s) for *:' +
' \'Matrix\' and ' + repr(type(v))[7:-1])
def __rmul__(self, v):
"""
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> 3 * T
{('B', 'A'): 3.0, ('A', 'B'): 0.9, ('A', 'A'): 2.1}
"""
if isinstance(v, int) or isinstance(v, float):
return Matrix(dict([(key, value * v) for key, value in
six.iteritems(self)]))
else:
raise TypeError('unsupported operand type(s) for *:' +
' \'Matrix\' and ' + repr(type(v))[7:-1])
def __add__(self, M):
"""
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> I = pykov.Matrix({('A','A'):1, ('B','B'):1})
>>> T + I
{('B', 'A'): 1.0, ('A', 'B'): 0.3, ('A', 'A'): 1.7, ('B', 'B'): 1.0}
"""
if isinstance(M, Matrix):
result = Matrix()
for link in set(six.iterkeys(self)) | set(M.keys()):
result[link] = self[link] + M[link]
return result
else:
raise TypeError('unsupported operand type(s) for +:' +
' \'Matrix\' and ' + repr(type(M))[7:-1])
def __sub__(self, M):
"""
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> I = pykov.Matrix({('A','A'):1, ('B','B'):1})
>>> T - I
{('B', 'A'): 1.0, ('A', 'B'): 0.3, ('A', 'A'): -0.3, ('B', 'B'): -1}
"""
if isinstance(M, Matrix):
result = Matrix()
for link in set(six.iterkeys(self)) | set(M.keys()):
result[link] = self[link] - M[link]
return result
else:
raise TypeError('unsupported operand type(s) for -:' +
' \'Matrix\' and ' + repr(type(M))[7:-1])
def trace(self):
"""
Return the Matrix trace.
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.trace()
0.7
"""
return sum([self[k, k] for k in self.states()])
def eye(self):
"""
Return the Identity Matrix.
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.eye()
{('A', 'A'): 1., ('B', 'B'): 1.}
"""
return Matrix(dict([((state, state), 1.) for state in self.states()]))
def ones(self):
"""
Return a ``Vector`` instance with entries equal to one.
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.ones()
{'A': 1.0, 'B': 1.0}
"""
return Vector(dict([(state, 1.) for state in self.states()]))
def transpose(self):
"""
Return the transpose Matrix.
>>> T = pykov.Matrix({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.transpose()
{('B', 'A'): 0.3, ('A', 'B'): 1.0, ('A', 'A'): 0.7}
"""
return Matrix(dict([((key[1], key[0]), value) for key, value in
six.iteritems(self)]))
def _UMPFPACKSolve(self, b, x=None, method='UMFPACK_A'):
"""
UMFPACK ( U nsymmetric M ulti F Rontal PACK age)
Parameters
----------
method:
"UMFPACK_A" : \mathbf{A} x = b (default)
"UMFPACK_At" : \mathbf{A}^T x = b
References
----------
A column pre-ordering strategy for the unsymmetric-pattern multifrontal
method, T. A. Davis, ACM Transactions on Mathematical Software, vol 30,
no. 2, June 2004, pp. 165-195.
"""
e2p, p2e = self._el2pos_()
if method == "UMFPACK_At":
A = self._dok_(e2p).tocsr().transpose()
else:
A = self._dok_(e2p).tocsr()
bb = b._toarray(e2p)
x = ssl.spsolve(A, bb, use_umfpack=True)
res = Vector()
res._fromarray(x, e2p)
return res
class Chain(Matrix):
"""
"""
def move(self, state):
"""
Do one step from the indicated state, and return the final state.
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.move('A')
'B'
"""
return self.succ(state).choose()
def pow(self, p, n):
"""
Find the probability distribution after n steps, starting from an
initial ``Vector``.
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> p = pykov.Vector(A=1)
>>> T.pow(p,3)
{'A': 0.7629999999999999, 'B': 0.23699999999999996}
>>> p * T * T * T
{'A': 0.7629999999999999, 'B': 0.23699999999999996}
"""
e2p, p2e = self._el2pos_()
A = self._dok_(e2p, 'transpose').tocsr()
x = p._toarray(e2p)
for i in range(n):
y = A.dot(x)
x = y.copy()
res = Vector()
res._fromarray(y, e2p)
return res
def steady(self):
"""
With the assumption of ergodicity, return the steady state.
.. note::
Inverse iteration method (P is the Markov chain)
.. math::
Q = \mathbf{I} - P
Q^T x = e
e = (0,0,\dots,0,1)
..
..
.. seealso::
W. Stewart: Introduction to the Numerical Solution of Markov Chains,
Princeton University Press, Chichester, West Sussex, 1994.
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.steady()
{'A': 0.7692307692307676, 'B': 0.23076923076923028}
"""
try:
return self._steady
except AttributeError:
e2p, p2e = self._el2pos_()
m = len(e2p)
P = self._dok_(e2p).tocsr()
Q = ss.eye(m, format='csr') - P
e = numpy.zeros(m)
e[-1] = 1.
Q = Q.transpose()
# not elegant singular matrix error
Q[0, 0] = Q[0, 0] + _machineEpsilon()
x = ssl.spsolve(Q, e, use_umfpack=True)
x = x/sum(x)
res = Vector()
res._fromarray(x, e2p)
self._steady = res
return res
def entropy(self, p=None, norm=False):
"""
Return the ``Chain`` entropy, calculated with the indicated probability
Vector (the steady state by default).
.. math::
H_i = \sum_j P_{ij} \ln P_{ij}
H = \sum \pi_i H_i
.. seealso::
Khinchin, A. I.
Mathematical Foundations of Information Theory
Dover, 1957.
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.entropy()
0.46989561696530169
With normalization entropy belongs to [0,1]
>>> T.entropy(norm=True)
0.33895603665233132
"""
if not p:
p = self.steady()
H = 0.
for state in self.states():
H += p[state] * sum([v * math.log(v) for v in
self.succ(state).values()])
if norm:
n = len(self.states())
return -H / (n * math.log(n))
return -H
def mfpt_to(self, state):
"""
Return the Mean First Passage Times of every state to the indicated
state.
.. seealso::
Kemeny J. G.; Snell, J. L.
Finite Markov Chains.
Springer-Verlag: New York, 1976.
>>> d = {('R', 'N'): 0.25, ('R', 'S'): 0.25, ('S', 'R'): 0.25,
('R', 'R'): 0.5, ('N', 'S'): 0.5, ('S', 'S'): 0.5,
('S', 'N'): 0.25, ('N', 'R'): 0.5, ('N', 'N'): 0.0}
>>> T = pykov.Chain(d)
>>> T.mfpt_to('R')
{'S': 3.333333333333333, 'N': 2.666666666666667}
"""
if len(self.states()) == 2:
self.states().remove(state)
other = self.states().pop()
self.states().add(state)
self.states().add(other)
return Vector({other: 1. / self[other, state]})
T = self.remove([state])
T = T.eye() - T
return T._UMPFPACKSolve(T.ones())
def adjacency(self):
"""
Return the adjacency matrix.
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.adjacency()
{('B', 'A'): 1, ('A', 'B'): 1, ('A', 'A'): 1}
"""
return Matrix(dict.fromkeys(self, 1))
def walk(self, steps, start=None, stop=None):
"""
Return a random walk of n steps, starting and stopping at the
indicated states.
.. note::
If not indicated, then the starting state is chosen according
to its steady probability.
If the stopping state is reached before to do n steps, then the walker
stops.
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.walk(10)
['B', 'A', 'B', 'A', 'A', 'B', 'A', 'A', 'A', 'B', 'A']
>>> T.walk(10,'B','B')
['B', 'A', 'A', 'A', 'A', 'A', 'B']
"""
if not start:
start = self.steady().choose()
if not stop:
result = [start]
for i in range(steps):
result.append(self.move(result[-1]))
return result
if stop:
result = [start]
for i in range(steps):
result.append(self.move(result[-1]))
if result[-1] == stop:
return result
return result
def walk_probability(self, walk):
"""
Given a walk, return the log of its probability.
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.walk_probability(['A','A','B','A','A'])
-1.917322692203401
>>> probability = math.exp(-1.917322692203401)
0.147
>>> p = T.walk_probability(['A','B','B','B','A'])
>>> math.exp(p)
0.0
"""
res = 0
for step in zip(walk[:-1], walk[1:]):
if not self[step]:
return -float('Inf')
res += math.log(self[step])
return res
def mixing_time(self, cutoff=.25, jump=1, p=None):
"""
Return the mixing time.
If the initial distribution (p) is not indicated,
then it is set to p={'less probable state':1}.
.. note::
The mixing time is calculated here as the number of steps (n) needed to
have
.. math::
|p(n)-\pi| < 0.25
p(n)=p P^n
\pi=\pi P
..
The parameter ``jump`` controls the iteration step, for example with
``jump=2`` n has values 2,4,6,8,..
..
>>> d = {('R','R'):1./2, ('R','N'):1./4, ('R','S'):1./4,
('N','R'):1./2, ('N','N'):0., ('N','S'):1./2,
('S','R'):1./4, ('S','N'):1./4, ('S','S'):1./2}
>>> T = pykov.Chain(d)
>>> T.mixing_time()
2
"""
res = []
d = 1
n = 0
if not p:
p = Vector({self.steady().sort()[0][0]: 1})
res.append(p.dist(self.steady()))
while d > cutoff:
n = n + jump
p = self.pow(p, jump)
d = p.dist(self.steady())
res.append(d)
return n
def absorbing_time(self, transient_set):
"""
Mean number of steps needed to leave the transient set.
Return the ``Vector tau``, the ``tau[i]`` is the mean number of steps needed
to leave the transient set starting from state ``i``. The parameter
``transient_set`` is a subset of nodes.
.. note::
If the starting point is a ``Vector p``, then it is sufficient to
calculate ``p * tau`` in order to weigh the mean times according the
initial conditions.
.. seealso:
Kemeny J. G.; Snell, J. L.
Finite Markov Chains.
Springer-Verlag: New York, 1976.
>>> d = {('R','R'):1./2, ('R','N'):1./4, ('R','S'):1./4,
('N','R'):1./2, ('N','N'):0., ('N','S'):1./2,
('S','R'):1./4, ('S','N'):1./4, ('S','S'):1./2}
>>> T = pykov.Chain(d)
>>> p = pykov.Vector({'N':.3, 'S':.7})
>>> tau = T.absorbing_time(p.keys())
>>> p * tau
3.1333333333333329
"""
Q = self.remove(self.states() - set(transient_set))
K = Q.eye() - Q
# means
tau = K._UMPFPACKSolve(K.ones())
return tau
def absorbing_tour(self, p, transient_set=None):
"""
Return a ``Vector v``, ``v[i]`` is the mean of the total number of times
the process is in a given transient state ``i`` before to leave the
transient set.
.. note::
``v.sum()`` is equal to ``p * tau`` (see :meth:`absorbing_time` method).
In not specified, the ``transient set`` is defined
by means of the ``Vector p``.
.. seealso::
Kemeny J. G.; Snell, J. L.
Finite Markov Chains.
Springer-Verlag: New York, 1976.
>>> d = {('R','R'):1./2, ('R','N'):1./4, ('R','S'):1./4,
('N','R'):1./2, ('N','N'):0., ('N','S'):1./2,
('S','R'):1./4, ('S','N'):1./4, ('S','S'):1./2}
>>> T = pykov.Chain(d)
>>> p = pykov.Vector({'N':.3, 'S':.7})
>>> T.absorbing_tour(p)
{'S': 2.2666666666666666, 'N': 0.8666666666666669}
"""
if transient_set:
Q = self.remove(self.states() - transient_set)
else:
Q = self.remove(self.states() - set(p.keys()))
K = Q.eye() - Q
return K._UMPFPACKSolve(p, method='UMFPACK_At')
def fundamental_matrix(self):
"""
Return the fundamental matrix.
.. seealso::
Kemeny J. G.; Snell, J. L.
Finite Markov Chains.
Springer-Verlag: New York, 1976.
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.fundamental_matrix()
{('B', 'A'): 0.17751479289940991, ('A', 'B'): 0.053254437869822958,
('A', 'A'): 0.94674556213017902, ('B', 'B'): 0.82248520710059214}
"""
try:
return self._fundamental_matrix
except AttributeError:
el2pos, pos2el = self._el2pos_()
p = self.steady()._toarray(el2pos)
P = self._numpy_mat(el2pos)
d = len(p)
A = numpy.matrix([p for i in range(d)])
I = numpy.matrix(numpy.identity(d))
E = numpy.matrix(numpy.ones((d, d)))
D = numpy.zeros((d, d))
diag = 1. / p
for pos, val in enumerate(diag):
D[pos, pos] = val
Z = numpy.linalg.inv(I - P + A)
res = Matrix()
res._from_numpy_mat(Z, pos2el)
self._fundamental_matrix = res
return res
def kemeny_constant(self):
"""
Return the Kemeny constant of the transition matrix.
>>> T = pykov.Chain({('A','B'): .3, ('A','A'): .7, ('B','A'): 1.})
>>> T.kemeny_constant()
1.7692307692307712
"""
Z = self.fundamental_matrix()
return Z.trace()
def readmat(filename):
"""
Read an external file and return a Chain.
The file must be of the form:
A A .7
A B .3
B A 1
Example
-------
>>> P = pykov.readmat('/mypath/mat')
>>> P
{('B', 'A'): 1.0, ('A', 'B'): 0.3, ('A', 'A'): 0.7}
"""
with open(filename) as f:
P = Chain()
for line in f:
tmp = line.split()
P[(tmp[0], tmp[1])] = float(tmp[2])
return P
def readtrj(filename):
"""
In the case the :class:`Chain` instance must be created from a finite chain
of states, the transition matrix is not fully defined.
The function defines the transition probabilities as the maximum likelihood
probabilities calculated along the chain. Having the file ``/mypath/trj``
with the following format::
1
1
1
2
1
3
the :class:`Chain` instance defined from that chain is:
>>> t = pykov.readtrj('/mypath/trj')
>>> t
(1, 1, 1, 2, 1, 3)
>>> p, P = maximum_likelihood_probabilities(t,lag_time=1, separator='0')
>>> p
{1: 0.6666666666666666, 2: 0.16666666666666666, 3: 0.16666666666666666}
>>> P
{(1, 2): 0.25, (1, 3): 0.25, (1, 1): 0.5, (2, 1): 1.0, (3, 3): 1.0}
>>> type(P)
<class 'pykov.Chain'>
>>> type(p)
<class 'pykov.Vector'>
"""
with open(filename) as f:
return tuple(line.strip() for line in f)
def _writefile(mylist, filename):
"""
Export in a file the list.
mylist could be a list of list.
Example
-------
>>> L = [[2,3],[4,5]]
>>> pykov.writefile(L,'tmp')
>>> l = [1,2]
>>> pykov.writefile(l,'tmp')
"""
try:
L = [[str(i) for i in line] for line in mylist]
except TypeError:
L = [str(i) for i in mylist]
with open(filename, mode='w') as f:
tmp = '\n'.join('\t'.join(x) for x in L)
f.write(tmp)
return None
def transitions(trj, nsteps=1, lag_time=1, separator='0'):
"""
Return the temporal list of transitions observed.
Parameters
----------
trj : the symbolic trajectory.
nsteps : number of steps.
lag_time : step length.
separator: the special symbol indicating the presence of sub-trajectories.
Example
-------
>>> trj = [1,2,1,0,2,3,1,0,2,3,2,3,1,2,3]
>>> pykov.transitions(trj,1,1,0)
[(1, 2), (2, 1), (2, 3), (3, 1), (2, 3), (3, 2), (2, 3), (3, 1), (1, 2),
(2, 3)]
>>> pykov.transitions(trj,1,2,0)
[(1, 1), (2, 1), (2, 2), (3, 3), (2, 1), (3, 2), (1, 3)]
>>> pykov.transitions(trj,2,2,0)
[(2, 2, 1), (3, 3, 2), (2, 1, 3)]
"""
result = []
for pos in range(len(trj) - nsteps * lag_time):
if separator not in trj[pos:(pos + nsteps * lag_time + 1)]:
tmp = trj[pos:(pos + nsteps * lag_time + 1):lag_time]
result.append(tuple(tmp))
return result
def maximum_likelihood_probabilities(trj, lag_time=1, separator='0'):
"""
Return a Chain calculated by means of maximum likelihood probabilities.
Return two objects:
p : a Vector object, the probability distribution over the nodes.
T : a Chain object, the Markov chain.
Parameters
----------
trj : the symbolic trajectory.
lag_time : number of steps defining a transition.
separator: the special symbol indicating the presence of sub-trajectories.
Example
-------
>>> t = [1,2,3,2,3,2,1,2,2,3,3,2]
>>> p, T = pykov.maximum_likelihood_probabilities(t)
>>> p
{1: 0.18181818181818182, 2: 0.4545454545454546, 3: 0.36363636363636365}
>>> T
{(1, 2): 1.0, (3, 2): 0.7499999999999999, (2, 3): 0.5999999999999999, (3,
3): 0.25, (2, 2): 0.19999999999999998, (2, 1): 0.19999999999999998}
"""
q_ij = {}
tr = transitions(trj, nsteps=1, lag_time=lag_time, separator=separator)
_remove_dead_branch(tr)
tot = len(tr)
for step in tr:
q_ij[step] = q_ij.get(step, 0.) + 1
for key in q_ij.keys():
q_ij[key] = q_ij[key] / tot
p_i = {}
for k, v in q_ij.items():
p_i[k[0]] = p_i.get(k[0], 0) + v
t_ij = {}
for k, v in q_ij.items():
t_ij[k] = v / p_i[k[0]]
T = Chain(t_ij)
p = Vector(p_i)
T._guess = Vector(p_i)
return p, T
def _remove_dead_branch(transitions_list):
"""
Remove dead branchs inserting a selfloop in every node that has not
outgoing links.
Example
-------
>>> trj = [1,2,3,1,2,3,2,2,4,3,5]
>>> tr = pykov.transitions(trj, nsteps=1)
>>> tr
[(1, 2), (2, 3), (3, 1), (1, 2), (2, 3), (3, 2), (2, 2), (2, 4), (4, 3),
(3, 5)]
>>> pykov._remove_dead_branch(tr)
>>> tr
[(1, 2), (2, 3), (3, 1), (1, 2), (2, 3), (3, 2), (2, 2), (2, 4), (4, 3),
(3, 5), (5, 5)]
"""
head_set = set()
tail_set = set()
for step in transitions_list:
head_set.add(step[1])
tail_set.add(step[0])
for head in head_set:
if head not in tail_set:
transitions_list.append((head, head))
return None
def _machineEpsilon(func=float):
"""
should be the same result of: numpy.finfo(numpy.float).eps
"""
machine_epsilon = func(1)
while func(1) + func(machine_epsilon) != func(1):
machine_epsilon_last = machine_epsilon
machine_epsilon = func(machine_epsilon) / func(2)
return machine_epsilon_last
|
|
"""Test functions for matrix module
"""
from __future__ import division, absolute_import, print_function
from numpy.testing import (
TestCase, run_module_suite, assert_equal, assert_array_equal,
assert_array_max_ulp, assert_array_almost_equal, assert_raises, rand,
)
from numpy import (
arange, rot90, add, fliplr, flipud, zeros, ones, eye, array, diag,
histogram2d, tri, mask_indices, triu_indices, triu_indices_from,
tril_indices, tril_indices_from, vander,
)
import numpy as np
from numpy.compat import asbytes_nested
def get_mat(n):
data = arange(n)
data = add.outer(data, data)
return data
class TestEye(TestCase):
def test_basic(self):
assert_equal(eye(4),
array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]))
assert_equal(eye(4, dtype='f'),
array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]], 'f'))
assert_equal(eye(3) == 1,
eye(3, dtype=bool))
def test_diag(self):
assert_equal(eye(4, k=1),
array([[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]]))
assert_equal(eye(4, k=-1),
array([[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]]))
def test_2d(self):
assert_equal(eye(4, 3),
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]]))
assert_equal(eye(3, 4),
array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0]]))
def test_diag2d(self):
assert_equal(eye(3, 4, k=2),
array([[0, 0, 1, 0],
[0, 0, 0, 1],
[0, 0, 0, 0]]))
assert_equal(eye(4, 3, k=-2),
array([[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[0, 1, 0]]))
def test_eye_bounds(self):
assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]])
assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]])
assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]])
assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]])
assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]])
assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]])
assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]])
assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]])
assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]])
def test_strings(self):
assert_equal(eye(2, 2, dtype='S3'),
asbytes_nested([['1', ''], ['', '1']]))
def test_bool(self):
assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]])
class TestDiag(TestCase):
def test_vector(self):
vals = (100 * arange(5)).astype('l')
b = zeros((5, 5))
for k in range(5):
b[k, k] = vals[k]
assert_equal(diag(vals), b)
b = zeros((7, 7))
c = b.copy()
for k in range(5):
b[k, k + 2] = vals[k]
c[k + 2, k] = vals[k]
assert_equal(diag(vals, k=2), b)
assert_equal(diag(vals, k=-2), c)
def test_matrix(self, vals=None):
if vals is None:
vals = (100 * get_mat(5) + 1).astype('l')
b = zeros((5,))
for k in range(5):
b[k] = vals[k, k]
assert_equal(diag(vals), b)
b = b * 0
for k in range(3):
b[k] = vals[k, k + 2]
assert_equal(diag(vals, 2), b[:3])
for k in range(3):
b[k] = vals[k + 2, k]
assert_equal(diag(vals, -2), b[:3])
def test_fortran_order(self):
vals = array((100 * get_mat(5) + 1), order='F', dtype='l')
self.test_matrix(vals)
def test_diag_bounds(self):
A = [[1, 2], [3, 4], [5, 6]]
assert_equal(diag(A, k=2), [])
assert_equal(diag(A, k=1), [2])
assert_equal(diag(A, k=0), [1, 4])
assert_equal(diag(A, k=-1), [3, 6])
assert_equal(diag(A, k=-2), [5])
assert_equal(diag(A, k=-3), [])
def test_failure(self):
self.assertRaises(ValueError, diag, [[[1]]])
class TestFliplr(TestCase):
def test_basic(self):
self.assertRaises(ValueError, fliplr, ones(4))
a = get_mat(4)
b = a[:, ::-1]
assert_equal(fliplr(a), b)
a = [[0, 1, 2],
[3, 4, 5]]
b = [[2, 1, 0],
[5, 4, 3]]
assert_equal(fliplr(a), b)
class TestFlipud(TestCase):
def test_basic(self):
a = get_mat(4)
b = a[::-1, :]
assert_equal(flipud(a), b)
a = [[0, 1, 2],
[3, 4, 5]]
b = [[3, 4, 5],
[0, 1, 2]]
assert_equal(flipud(a), b)
class TestRot90(TestCase):
def test_basic(self):
self.assertRaises(ValueError, rot90, ones(4))
a = [[0, 1, 2],
[3, 4, 5]]
b1 = [[2, 5],
[1, 4],
[0, 3]]
b2 = [[5, 4, 3],
[2, 1, 0]]
b3 = [[3, 0],
[4, 1],
[5, 2]]
b4 = [[0, 1, 2],
[3, 4, 5]]
for k in range(-3, 13, 4):
assert_equal(rot90(a, k=k), b1)
for k in range(-2, 13, 4):
assert_equal(rot90(a, k=k), b2)
for k in range(-1, 13, 4):
assert_equal(rot90(a, k=k), b3)
for k in range(0, 13, 4):
assert_equal(rot90(a, k=k), b4)
def test_axes(self):
a = ones((50, 40, 3))
assert_equal(rot90(a).shape, (40, 50, 3))
class TestHistogram2d(TestCase):
def test_simple(self):
x = array(
[0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891])
y = array(
[0.09233859, 0.18626021, 0.34556073, 0.39676747, 0.53881673])
xedges = np.linspace(0, 1, 10)
yedges = np.linspace(0, 1, 10)
H = histogram2d(x, y, (xedges, yedges))[0]
answer = array(
[[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
assert_array_equal(H.T, answer)
H = histogram2d(x, y, xedges)[0]
assert_array_equal(H.T, answer)
H, xedges, yedges = histogram2d(list(range(10)), list(range(10)))
assert_array_equal(H, eye(10, 10))
assert_array_equal(xedges, np.linspace(0, 9, 11))
assert_array_equal(yedges, np.linspace(0, 9, 11))
def test_asym(self):
x = array([1, 1, 2, 3, 4, 4, 4, 5])
y = array([1, 3, 2, 0, 1, 2, 3, 4])
H, xed, yed = histogram2d(
x, y, (6, 5), range=[[0, 6], [0, 5]], normed=True)
answer = array(
[[0., 0, 0, 0, 0],
[0, 1, 0, 1, 0],
[0, 0, 1, 0, 0],
[1, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 1]])
assert_array_almost_equal(H, answer/8., 3)
assert_array_equal(xed, np.linspace(0, 6, 7))
assert_array_equal(yed, np.linspace(0, 5, 6))
def test_norm(self):
x = array([1, 2, 3, 1, 2, 3, 1, 2, 3])
y = array([1, 1, 1, 2, 2, 2, 3, 3, 3])
H, xed, yed = histogram2d(
x, y, [[1, 2, 3, 5], [1, 2, 3, 5]], normed=True)
answer = array([[1, 1, .5],
[1, 1, .5],
[.5, .5, .25]])/9.
assert_array_almost_equal(H, answer, 3)
def test_all_outliers(self):
r = rand(100) + 1. + 1e6 # histogramdd rounds by decimal=6
H, xed, yed = histogram2d(r, r, (4, 5), range=([0, 1], [0, 1]))
assert_array_equal(H, 0)
def test_empty(self):
a, edge1, edge2 = histogram2d([], [], bins=([0, 1], [0, 1]))
assert_array_max_ulp(a, array([[0.]]))
a, edge1, edge2 = histogram2d([], [], bins=4)
assert_array_max_ulp(a, np.zeros((4, 4)))
def test_binparameter_combination(self):
x = array(
[0, 0.09207008, 0.64575234, 0.12875982, 0.47390599,
0.59944483, 1])
y = array(
[0, 0.14344267, 0.48988575, 0.30558665, 0.44700682,
0.15886423, 1])
edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)
H, xe, ye = histogram2d(x, y, (edges, 4))
answer = array(
[[ 2., 0., 0., 0.],
[ 0., 1., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 1., 0., 0.],
[ 1., 0., 0., 0.],
[ 0., 1., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 0.],
[ 0., 0., 0., 1.]])
assert_array_equal(H, answer)
assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1]))
H, xe, ye = histogram2d(x, y, (4, edges))
answer = array(
[[ 1., 1., 0., 1., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])
assert_array_equal(H, answer)
assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
class TestTri(TestCase):
def test_dtype(self):
out = array([[1, 0, 0],
[1, 1, 0],
[1, 1, 1]])
assert_array_equal(tri(3), out)
assert_array_equal(tri(3, dtype=bool), out.astype(bool))
def test_tril_triu_ndim2():
for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']:
a = np.ones((2, 2), dtype=dtype)
b = np.tril(a)
c = np.triu(a)
yield assert_array_equal, b, [[1, 0], [1, 1]]
yield assert_array_equal, c, b.T
# should return the same dtype as the original array
yield assert_equal, b.dtype, a.dtype
yield assert_equal, c.dtype, a.dtype
def test_tril_triu_ndim3():
for dtype in np.typecodes['AllFloat'] + np.typecodes['AllInteger']:
a = np.array([
[[1, 1], [1, 1]],
[[1, 1], [1, 0]],
[[1, 1], [0, 0]],
], dtype=dtype)
a_tril_desired = np.array([
[[1, 0], [1, 1]],
[[1, 0], [1, 0]],
[[1, 0], [0, 0]],
], dtype=dtype)
a_triu_desired = np.array([
[[1, 1], [0, 1]],
[[1, 1], [0, 0]],
[[1, 1], [0, 0]],
], dtype=dtype)
a_triu_observed = np.triu(a)
a_tril_observed = np.tril(a)
yield assert_array_equal, a_triu_observed, a_triu_desired
yield assert_array_equal, a_tril_observed, a_tril_desired
yield assert_equal, a_triu_observed.dtype, a.dtype
yield assert_equal, a_tril_observed.dtype, a.dtype
def test_tril_triu_with_inf():
# Issue 4859
arr = np.array([[1, 1, np.inf],
[1, 1, 1],
[np.inf, 1, 1]])
out_tril = np.array([[1, 0, 0],
[1, 1, 0],
[np.inf, 1, 1]])
out_triu = out_tril.T
assert_array_equal(np.triu(arr), out_triu)
assert_array_equal(np.tril(arr), out_tril)
def test_tril_triu_dtype():
# Issue 4916
# tril and triu should return the same dtype as input
for c in np.typecodes['All']:
if c == 'V':
continue
arr = np.zeros((3, 3), dtype=c)
assert_equal(np.triu(arr).dtype, arr.dtype)
assert_equal(np.tril(arr).dtype, arr.dtype)
# check special cases
arr = np.array([['2001-01-01T12:00', '2002-02-03T13:56'],
['2004-01-01T12:00', '2003-01-03T13:45']],
dtype='datetime64')
assert_equal(np.triu(arr).dtype, arr.dtype)
assert_equal(np.tril(arr).dtype, arr.dtype)
arr = np.zeros((3,3), dtype='f4,f4')
assert_equal(np.triu(arr).dtype, arr.dtype)
assert_equal(np.tril(arr).dtype, arr.dtype)
def test_mask_indices():
# simple test without offset
iu = mask_indices(3, np.triu)
a = np.arange(9).reshape(3, 3)
yield (assert_array_equal, a[iu], array([0, 1, 2, 4, 5, 8]))
# Now with an offset
iu1 = mask_indices(3, np.triu, 1)
yield (assert_array_equal, a[iu1], array([1, 2, 5]))
def test_tril_indices():
# indices without and with offset
il1 = tril_indices(4)
il2 = tril_indices(4, k=2)
il3 = tril_indices(4, m=5)
il4 = tril_indices(4, k=2, m=5)
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
b = np.arange(1, 21).reshape(4, 5)
# indexing:
yield (assert_array_equal, a[il1],
array([1, 5, 6, 9, 10, 11, 13, 14, 15, 16]))
yield (assert_array_equal, b[il3],
array([1, 6, 7, 11, 12, 13, 16, 17, 18, 19]))
# And for assigning values:
a[il1] = -1
yield (assert_array_equal, a,
array([[-1, 2, 3, 4],
[-1, -1, 7, 8],
[-1, -1, -1, 12],
[-1, -1, -1, -1]]))
b[il3] = -1
yield (assert_array_equal, b,
array([[-1, 2, 3, 4, 5],
[-1, -1, 8, 9, 10],
[-1, -1, -1, 14, 15],
[-1, -1, -1, -1, 20]]))
# These cover almost the whole array (two diagonals right of the main one):
a[il2] = -10
yield (assert_array_equal, a,
array([[-10, -10, -10, 4],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]]))
b[il4] = -10
yield (assert_array_equal, b,
array([[-10, -10, -10, 4, 5],
[-10, -10, -10, -10, 10],
[-10, -10, -10, -10, -10],
[-10, -10, -10, -10, -10]]))
class TestTriuIndices(object):
def test_triu_indices(self):
iu1 = triu_indices(4)
iu2 = triu_indices(4, k=2)
iu3 = triu_indices(4, m=5)
iu4 = triu_indices(4, k=2, m=5)
a = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]])
b = np.arange(1, 21).reshape(4, 5)
# Both for indexing:
yield (assert_array_equal, a[iu1],
array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
yield (assert_array_equal, b[iu3],
array([1, 2, 3, 4, 5, 7, 8, 9, 10, 13, 14, 15, 19, 20]))
# And for assigning values:
a[iu1] = -1
yield (assert_array_equal, a,
array([[-1, -1, -1, -1],
[5, -1, -1, -1],
[9, 10, -1, -1],
[13, 14, 15, -1]]))
b[iu3] = -1
yield (assert_array_equal, b,
array([[-1, -1, -1, -1, -1],
[6, -1, -1, -1, -1],
[11, 12, -1, -1, -1],
[16, 17, 18, -1, -1]]))
# These cover almost the whole array (two diagonals right of the
# main one):
a[iu2] = -10
yield (assert_array_equal, a,
array([[-1, -1, -10, -10],
[5, -1, -1, -10],
[9, 10, -1, -1],
[13, 14, 15, -1]]))
b[iu4] = -10
yield (assert_array_equal, b,
array([[-1, -1, -10, -10, -10],
[6, -1, -1, -10, -10],
[11, 12, -1, -1, -10],
[16, 17, 18, -1, -1]]))
class TestTrilIndicesFrom(object):
def test_exceptions(self):
assert_raises(ValueError, tril_indices_from, np.ones((2,)))
assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2)))
# assert_raises(ValueError, tril_indices_from, np.ones((2, 3)))
class TestTriuIndicesFrom(object):
def test_exceptions(self):
assert_raises(ValueError, triu_indices_from, np.ones((2,)))
assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2)))
# assert_raises(ValueError, triu_indices_from, np.ones((2, 3)))
class TestVander(object):
def test_basic(self):
c = np.array([0, 1, -2, 3])
v = vander(c)
powers = np.array([[0, 0, 0, 0, 1],
[1, 1, 1, 1, 1],
[16, -8, 4, -2, 1],
[81, 27, 9, 3, 1]])
# Check default value of N:
yield (assert_array_equal, v, powers[:, 1:])
# Check a range of N values, including 0 and 5 (greater than default)
m = powers.shape[1]
for n in range(6):
v = vander(c, N=n)
yield (assert_array_equal, v, powers[:, m-n:m])
def test_dtypes(self):
c = array([11, -12, 13], dtype=np.int8)
v = vander(c)
expected = np.array([[121, 11, 1],
[144, -12, 1],
[169, 13, 1]])
yield (assert_array_equal, v, expected)
c = array([1.0+1j, 1.0-1j])
v = vander(c, N=3)
expected = np.array([[2j, 1+1j, 1],
[-2j, 1-1j, 1]])
# The data is floating point, but the values are small integers,
# so assert_array_equal *should* be safe here (rather than, say,
# assert_array_almost_equal).
yield (assert_array_equal, v, expected)
if __name__ == "__main__":
run_module_suite()
|
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for iSCSI deploy mechanism."""
import os
import tempfile
from ironic_lib import disk_utils
from ironic_lib import utils as ironic_utils
import mock
from oslo_config import cfg
from oslo_utils import fileutils
from oslo_utils import uuidutils
from ironic.common import dhcp_factory
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import keystone
from ironic.common import pxe_utils
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.conductor import utils as manager_utils
from ironic.drivers.modules import agent_base_vendor
from ironic.drivers.modules import agent_client
from ironic.drivers.modules import deploy_utils
from ironic.drivers.modules import fake
from ironic.drivers.modules import iscsi_deploy
from ironic.drivers.modules import pxe
from ironic.tests.unit.conductor import mgr_utils
from ironic.tests.unit.db import base as db_base
from ironic.tests.unit.db import utils as db_utils
from ironic.tests.unit.objects import utils as obj_utils
CONF = cfg.CONF
INST_INFO_DICT = db_utils.get_test_pxe_instance_info()
DRV_INFO_DICT = db_utils.get_test_pxe_driver_info()
DRV_INTERNAL_INFO_DICT = db_utils.get_test_pxe_driver_internal_info()
class IscsiDeployValidateParametersTestCase(db_base.DbTestCase):
def test_parse_instance_info_good(self):
# make sure we get back the expected things
node = obj_utils.create_test_node(
self.context, driver='fake_pxe',
instance_info=INST_INFO_DICT,
driver_internal_info=DRV_INTERNAL_INFO_DICT
)
info = iscsi_deploy.parse_instance_info(node)
self.assertIsNotNone(info.get('image_source'))
self.assertIsNotNone(info.get('root_gb'))
self.assertEqual(0, info.get('ephemeral_gb'))
self.assertIsNone(info.get('configdrive'))
def test_parse_instance_info_missing_instance_source(self):
# make sure error is raised when info is missing
info = dict(INST_INFO_DICT)
del info['image_source']
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
self.assertRaises(exception.MissingParameterValue,
iscsi_deploy.parse_instance_info,
node)
def test_parse_instance_info_missing_root_gb(self):
# make sure error is raised when info is missing
info = dict(INST_INFO_DICT)
del info['root_gb']
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
self.assertRaises(exception.MissingParameterValue,
iscsi_deploy.parse_instance_info,
node)
def test_parse_instance_info_invalid_root_gb(self):
info = dict(INST_INFO_DICT)
info['root_gb'] = 'foobar'
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
self.assertRaises(exception.InvalidParameterValue,
iscsi_deploy.parse_instance_info,
node)
def test_parse_instance_info_valid_ephemeral_gb(self):
ephemeral_gb = 10
ephemeral_fmt = 'test-fmt'
info = dict(INST_INFO_DICT)
info['ephemeral_gb'] = ephemeral_gb
info['ephemeral_format'] = ephemeral_fmt
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
data = iscsi_deploy.parse_instance_info(node)
self.assertEqual(ephemeral_gb, data.get('ephemeral_gb'))
self.assertEqual(ephemeral_fmt, data.get('ephemeral_format'))
def test_parse_instance_info_invalid_ephemeral_gb(self):
info = dict(INST_INFO_DICT)
info['ephemeral_gb'] = 'foobar'
info['ephemeral_format'] = 'exttest'
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
self.assertRaises(exception.InvalidParameterValue,
iscsi_deploy.parse_instance_info,
node)
def test_parse_instance_info_valid_ephemeral_missing_format(self):
ephemeral_gb = 10
ephemeral_fmt = 'test-fmt'
info = dict(INST_INFO_DICT)
info['ephemeral_gb'] = ephemeral_gb
info['ephemeral_format'] = None
self.config(default_ephemeral_format=ephemeral_fmt, group='pxe')
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
instance_info = iscsi_deploy.parse_instance_info(node)
self.assertEqual(ephemeral_fmt, instance_info['ephemeral_format'])
def test_parse_instance_info_valid_preserve_ephemeral_true(self):
info = dict(INST_INFO_DICT)
for opt in ['true', 'TRUE', 'True', 't',
'on', 'yes', 'y', '1']:
info['preserve_ephemeral'] = opt
node = obj_utils.create_test_node(
self.context, uuid=uuidutils.generate_uuid(),
instance_info=info,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
data = iscsi_deploy.parse_instance_info(node)
self.assertTrue(data.get('preserve_ephemeral'))
def test_parse_instance_info_valid_preserve_ephemeral_false(self):
info = dict(INST_INFO_DICT)
for opt in ['false', 'FALSE', 'False', 'f',
'off', 'no', 'n', '0']:
info['preserve_ephemeral'] = opt
node = obj_utils.create_test_node(
self.context, uuid=uuidutils.generate_uuid(),
instance_info=info,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
data = iscsi_deploy.parse_instance_info(node)
self.assertFalse(data.get('preserve_ephemeral'))
def test_parse_instance_info_invalid_preserve_ephemeral(self):
info = dict(INST_INFO_DICT)
info['preserve_ephemeral'] = 'foobar'
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
self.assertRaises(exception.InvalidParameterValue,
iscsi_deploy.parse_instance_info,
node)
def test_parse_instance_info_invalid_ephemeral_disk(self):
info = dict(INST_INFO_DICT)
info['ephemeral_gb'] = 10
info['swap_mb'] = 0
info['root_gb'] = 20
info['preserve_ephemeral'] = True
drv_internal_dict = {'instance': {'ephemeral_gb': 9,
'swap_mb': 0,
'root_gb': 20}}
drv_internal_dict.update(DRV_INTERNAL_INFO_DICT)
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=drv_internal_dict,
)
self.assertRaises(exception.InvalidParameterValue,
iscsi_deploy.parse_instance_info,
node)
def test__check_disk_layout_unchanged_fails(self):
info = dict(INST_INFO_DICT)
info['ephemeral_gb'] = 10
info['swap_mb'] = 0
info['root_gb'] = 20
info['preserve_ephemeral'] = True
drv_internal_dict = {'instance': {'ephemeral_gb': 20,
'swap_mb': 0,
'root_gb': 20}}
drv_internal_dict.update(DRV_INTERNAL_INFO_DICT)
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=drv_internal_dict,
)
self.assertRaises(exception.InvalidParameterValue,
iscsi_deploy._check_disk_layout_unchanged,
node, info)
def test__check_disk_layout_unchanged(self):
info = dict(INST_INFO_DICT)
info['ephemeral_gb'] = 10
info['swap_mb'] = 0
info['root_gb'] = 20
info['preserve_ephemeral'] = True
drv_internal_dict = {'instance': {'ephemeral_gb': 10,
'swap_mb': 0,
'root_gb': 20}}
drv_internal_dict.update(DRV_INTERNAL_INFO_DICT)
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=drv_internal_dict,
)
self.assertIsNone(iscsi_deploy._check_disk_layout_unchanged(node,
info))
def test__save_disk_layout(self):
info = dict(INST_INFO_DICT)
info['ephemeral_gb'] = 10
info['swap_mb'] = 0
info['root_gb'] = 10
info['preserve_ephemeral'] = False
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
iscsi_deploy._save_disk_layout(node, info)
node.refresh()
for param in ('ephemeral_gb', 'swap_mb', 'root_gb'):
self.assertEqual(
info[param], node.driver_internal_info['instance'][param]
)
def test_parse_instance_info_configdrive(self):
info = dict(INST_INFO_DICT)
info['configdrive'] = 'http://1.2.3.4/cd'
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
instance_info = iscsi_deploy.parse_instance_info(node)
self.assertEqual('http://1.2.3.4/cd', instance_info['configdrive'])
def test_parse_instance_info_nonglance_image(self):
info = INST_INFO_DICT.copy()
info['image_source'] = 'file:///image.qcow2'
info['kernel'] = 'file:///image.vmlinuz'
info['ramdisk'] = 'file:///image.initrd'
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
iscsi_deploy.parse_instance_info(node)
def test_parse_instance_info_nonglance_image_no_kernel(self):
info = INST_INFO_DICT.copy()
info['image_source'] = 'file:///image.qcow2'
info['ramdisk'] = 'file:///image.initrd'
node = obj_utils.create_test_node(
self.context, instance_info=info,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
self.assertRaises(exception.MissingParameterValue,
iscsi_deploy.parse_instance_info, node)
def test_parse_instance_info_whole_disk_image(self):
driver_internal_info = dict(DRV_INTERNAL_INFO_DICT)
driver_internal_info['is_whole_disk_image'] = True
node = obj_utils.create_test_node(
self.context, instance_info=INST_INFO_DICT,
driver_internal_info=driver_internal_info,
)
instance_info = iscsi_deploy.parse_instance_info(node)
self.assertIsNotNone(instance_info.get('image_source'))
self.assertIsNotNone(instance_info.get('root_gb'))
self.assertEqual(0, instance_info.get('swap_mb'))
self.assertEqual(0, instance_info.get('ephemeral_gb'))
self.assertIsNone(instance_info.get('configdrive'))
def test_parse_instance_info_whole_disk_image_missing_root(self):
info = dict(INST_INFO_DICT)
del info['root_gb']
node = obj_utils.create_test_node(self.context, instance_info=info)
self.assertRaises(exception.InvalidParameterValue,
iscsi_deploy.parse_instance_info, node)
class IscsiDeployPrivateMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(IscsiDeployPrivateMethodsTestCase, self).setUp()
n = {
'driver': 'fake_pxe',
'instance_info': INST_INFO_DICT,
'driver_info': DRV_INFO_DICT,
'driver_internal_info': DRV_INTERNAL_INFO_DICT,
}
mgr_utils.mock_the_extension_manager(driver="fake_pxe")
self.node = obj_utils.create_test_node(self.context, **n)
def test__get_image_dir_path(self):
self.assertEqual(os.path.join(CONF.pxe.images_path,
self.node.uuid),
iscsi_deploy._get_image_dir_path(self.node.uuid))
def test__get_image_file_path(self):
self.assertEqual(os.path.join(CONF.pxe.images_path,
self.node.uuid,
'disk'),
iscsi_deploy._get_image_file_path(self.node.uuid))
class IscsiDeployMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(IscsiDeployMethodsTestCase, self).setUp()
instance_info = dict(INST_INFO_DICT)
instance_info['deploy_key'] = 'fake-56789'
n = {
'driver': 'fake_pxe',
'instance_info': instance_info,
'driver_info': DRV_INFO_DICT,
'driver_internal_info': DRV_INTERNAL_INFO_DICT,
}
mgr_utils.mock_the_extension_manager(driver="fake_pxe")
self.node = obj_utils.create_test_node(self.context, **n)
@mock.patch.object(disk_utils, 'get_image_mb', autospec=True)
def test_check_image_size(self, get_image_mb_mock):
get_image_mb_mock.return_value = 1000
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.instance_info['root_gb'] = 1
iscsi_deploy.check_image_size(task)
get_image_mb_mock.assert_called_once_with(
iscsi_deploy._get_image_file_path(task.node.uuid))
@mock.patch.object(disk_utils, 'get_image_mb', autospec=True)
def test_check_image_size_fails(self, get_image_mb_mock):
get_image_mb_mock.return_value = 1025
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.instance_info['root_gb'] = 1
self.assertRaises(exception.InstanceDeployFailure,
iscsi_deploy.check_image_size,
task)
get_image_mb_mock.assert_called_once_with(
iscsi_deploy._get_image_file_path(task.node.uuid))
@mock.patch.object(deploy_utils, 'fetch_images', autospec=True)
def test_cache_instance_images_master_path(self, mock_fetch_image):
temp_dir = tempfile.mkdtemp()
self.config(images_path=temp_dir, group='pxe')
self.config(instance_master_path=os.path.join(temp_dir,
'instance_master_path'),
group='pxe')
fileutils.ensure_tree(CONF.pxe.instance_master_path)
(uuid, image_path) = iscsi_deploy.cache_instance_image(None, self.node)
mock_fetch_image.assert_called_once_with(None,
mock.ANY,
[(uuid, image_path)], True)
self.assertEqual('glance://image_uuid', uuid)
self.assertEqual(os.path.join(temp_dir,
self.node.uuid,
'disk'),
image_path)
@mock.patch.object(ironic_utils, 'unlink_without_raise', autospec=True)
@mock.patch.object(utils, 'rmtree_without_raise', autospec=True)
@mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True)
def test_destroy_images(self, mock_cache, mock_rmtree, mock_unlink):
self.config(images_path='/path', group='pxe')
iscsi_deploy.destroy_images('uuid')
mock_cache.return_value.clean_up.assert_called_once_with()
mock_unlink.assert_called_once_with('/path/uuid/disk')
mock_rmtree.assert_called_once_with('/path/uuid')
def _test_build_deploy_ramdisk_options(self, mock_alnum, api_url,
expected_root_device=None,
expected_boot_option='netboot',
expected_boot_mode='bios'):
fake_key = '0123456789ABCDEFGHIJKLMNOPQRSTUV'
fake_disk = 'fake-disk'
self.config(disk_devices=fake_disk, group='pxe')
mock_alnum.return_value = fake_key
expected_iqn = 'iqn.2008-10.org.openstack:%s' % self.node.uuid
expected_opts = {
'iscsi_target_iqn': expected_iqn,
'deployment_id': self.node.uuid,
'deployment_key': fake_key,
'disk': fake_disk,
'ironic_api_url': api_url,
'boot_option': expected_boot_option,
'boot_mode': expected_boot_mode,
'coreos.configdrive': 0,
}
if expected_root_device:
expected_opts['root_device'] = expected_root_device
opts = iscsi_deploy.build_deploy_ramdisk_options(self.node)
self.assertEqual(expected_opts, opts)
mock_alnum.assert_called_once_with(32)
# assert deploy_key was injected in the node
self.assertIn('deploy_key', self.node.instance_info)
@mock.patch.object(keystone, 'get_service_url', autospec=True)
@mock.patch.object(utils, 'random_alnum', autospec=True)
def test_build_deploy_ramdisk_options(self, mock_alnum, mock_get_url):
fake_api_url = 'http://127.0.0.1:6385'
self.config(api_url=fake_api_url, group='conductor')
self._test_build_deploy_ramdisk_options(mock_alnum, fake_api_url)
# As we are getting the Ironic api url from the config file
# assert keystone wasn't called
self.assertFalse(mock_get_url.called)
@mock.patch.object(keystone, 'get_service_url', autospec=True)
@mock.patch.object(utils, 'random_alnum', autospec=True)
def test_build_deploy_ramdisk_options_keystone(self, mock_alnum,
mock_get_url):
fake_api_url = 'http://127.0.0.1:6385'
mock_get_url.return_value = fake_api_url
self._test_build_deploy_ramdisk_options(mock_alnum, fake_api_url)
# As the Ironic api url is not specified in the config file
# assert we are getting it from keystone
mock_get_url.assert_called_once_with()
@mock.patch.object(keystone, 'get_service_url', autospec=True)
@mock.patch.object(utils, 'random_alnum', autospec=True)
def test_build_deploy_ramdisk_options_root_device(self, mock_alnum,
mock_get_url):
self.node.properties['root_device'] = {'wwn': 123456}
expected = 'wwn=123456'
fake_api_url = 'http://127.0.0.1:6385'
self.config(api_url=fake_api_url, group='conductor')
self._test_build_deploy_ramdisk_options(mock_alnum, fake_api_url,
expected_root_device=expected)
@mock.patch.object(keystone, 'get_service_url', autospec=True)
@mock.patch.object(utils, 'random_alnum', autospec=True)
def test_build_deploy_ramdisk_options_boot_option(self, mock_alnum,
mock_get_url):
self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
expected = 'local'
fake_api_url = 'http://127.0.0.1:6385'
self.config(api_url=fake_api_url, group='conductor')
self._test_build_deploy_ramdisk_options(mock_alnum, fake_api_url,
expected_boot_option=expected)
@mock.patch.object(keystone, 'get_service_url', autospec=True)
@mock.patch.object(utils, 'random_alnum', autospec=True)
def test_build_deploy_ramdisk_options_whole_disk_image(self, mock_alnum,
mock_get_url):
"""Tests a hack to boot_option for whole disk images.
This hack is in place to fix bug #1441556.
"""
self.node.instance_info = {'capabilities': '{"boot_option": "local"}'}
dii = self.node.driver_internal_info
dii['is_whole_disk_image'] = True
self.node.driver_internal_info = dii
self.node.save()
expected = 'netboot'
fake_api_url = 'http://127.0.0.1:6385'
self.config(api_url=fake_api_url, group='conductor')
self._test_build_deploy_ramdisk_options(mock_alnum, fake_api_url,
expected_boot_option=expected)
@mock.patch.object(iscsi_deploy, '_save_disk_layout', autospec=True)
@mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True)
def test_continue_deploy_fail(self, deploy_mock, power_mock,
mock_image_cache, mock_disk_layout):
kwargs = {'address': '123456', 'iqn': 'aaa-bbb', 'key': 'fake-56789'}
deploy_mock.side_effect = iter([
exception.InstanceDeployFailure("test deploy error")])
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
params = iscsi_deploy.get_deploy_info(task.node, **kwargs)
self.assertRaises(exception.InstanceDeployFailure,
iscsi_deploy.continue_deploy,
task, **kwargs)
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertIsNotNone(task.node.last_error)
deploy_mock.assert_called_once_with(**params)
power_mock.assert_called_once_with(task, states.POWER_OFF)
mock_image_cache.assert_called_once_with()
mock_image_cache.return_value.clean_up.assert_called_once_with()
self.assertFalse(mock_disk_layout.called)
@mock.patch.object(iscsi_deploy, '_save_disk_layout', autospec=True)
@mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True)
def test_continue_deploy_ramdisk_fails(self, deploy_mock, power_mock,
mock_image_cache, mock_disk_layout):
kwargs = {'address': '123456', 'iqn': 'aaa-bbb', 'key': 'fake-56789',
'error': 'test ramdisk error'}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InstanceDeployFailure,
iscsi_deploy.continue_deploy,
task, **kwargs)
self.assertIsNotNone(task.node.last_error)
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
power_mock.assert_called_once_with(task, states.POWER_OFF)
mock_image_cache.assert_called_once_with()
mock_image_cache.return_value.clean_up.assert_called_once_with()
self.assertFalse(deploy_mock.called)
self.assertFalse(mock_disk_layout.called)
@mock.patch.object(iscsi_deploy, '_save_disk_layout', autospec=True)
@mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True)
def test_continue_deploy_fail_no_root_uuid_or_disk_id(
self, deploy_mock, power_mock, mock_image_cache, mock_disk_layout):
kwargs = {'address': '123456', 'iqn': 'aaa-bbb', 'key': 'fake-56789'}
deploy_mock.return_value = {}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
params = iscsi_deploy.get_deploy_info(task.node, **kwargs)
self.assertRaises(exception.InstanceDeployFailure,
iscsi_deploy.continue_deploy,
task, **kwargs)
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertIsNotNone(task.node.last_error)
deploy_mock.assert_called_once_with(**params)
power_mock.assert_called_once_with(task, states.POWER_OFF)
mock_image_cache.assert_called_once_with()
mock_image_cache.return_value.clean_up.assert_called_once_with()
self.assertFalse(mock_disk_layout.called)
@mock.patch.object(iscsi_deploy, '_save_disk_layout', autospec=True)
@mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True)
def test_continue_deploy_fail_empty_root_uuid(
self, deploy_mock, power_mock, mock_image_cache, mock_disk_layout):
kwargs = {'address': '123456', 'iqn': 'aaa-bbb', 'key': 'fake-56789'}
deploy_mock.return_value = {'root uuid': ''}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
params = iscsi_deploy.get_deploy_info(task.node, **kwargs)
self.assertRaises(exception.InstanceDeployFailure,
iscsi_deploy.continue_deploy,
task, **kwargs)
self.assertEqual(states.DEPLOYFAIL, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertIsNotNone(task.node.last_error)
deploy_mock.assert_called_once_with(**params)
power_mock.assert_called_once_with(task, states.POWER_OFF)
mock_image_cache.assert_called_once_with()
mock_image_cache.return_value.clean_up.assert_called_once_with()
self.assertFalse(mock_disk_layout.called)
@mock.patch.object(iscsi_deploy, '_save_disk_layout', autospec=True)
@mock.patch.object(iscsi_deploy, 'LOG', autospec=True)
@mock.patch.object(iscsi_deploy, 'get_deploy_info', autospec=True)
@mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True)
def test_continue_deploy(self, deploy_mock, power_mock, mock_image_cache,
mock_deploy_info, mock_log, mock_disk_layout):
kwargs = {'address': '123456', 'iqn': 'aaa-bbb', 'key': 'fake-56789'}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
mock_deploy_info.return_value = {
'address': '123456',
'boot_option': 'netboot',
'configdrive': "I've got the power",
'ephemeral_format': None,
'ephemeral_mb': 0,
'image_path': (u'/var/lib/ironic/images/1be26c0b-03f2-4d2e-ae87-'
u'c02d7f33c123/disk'),
'iqn': 'aaa-bbb',
'lun': '1',
'node_uuid': u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
'port': '3260',
'preserve_ephemeral': True,
'root_mb': 102400,
'swap_mb': 0,
}
log_params = mock_deploy_info.return_value.copy()
# Make sure we don't log the full content of the configdrive
log_params['configdrive'] = '***'
expected_dict = {
'node': self.node.uuid,
'params': log_params,
}
uuid_dict_returned = {'root uuid': '12345678-87654321'}
deploy_mock.return_value = uuid_dict_returned
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
mock_log.isEnabledFor.return_value = True
retval = iscsi_deploy.continue_deploy(task, **kwargs)
mock_log.debug.assert_called_once_with(
mock.ANY, expected_dict)
self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertIsNone(task.node.last_error)
mock_image_cache.assert_called_once_with()
mock_image_cache.return_value.clean_up.assert_called_once_with()
self.assertEqual(uuid_dict_returned, retval)
mock_disk_layout.assert_called_once_with(task.node, mock.ANY)
@mock.patch.object(iscsi_deploy, 'LOG', autospec=True)
@mock.patch.object(iscsi_deploy, 'get_deploy_info', autospec=True)
@mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(deploy_utils, 'deploy_disk_image', autospec=True)
def test_continue_deploy_whole_disk_image(
self, deploy_mock, power_mock, mock_image_cache, mock_deploy_info,
mock_log):
kwargs = {'address': '123456', 'iqn': 'aaa-bbb', 'key': 'fake-56789'}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
mock_deploy_info.return_value = {
'address': '123456',
'image_path': (u'/var/lib/ironic/images/1be26c0b-03f2-4d2e-ae87-'
u'c02d7f33c123/disk'),
'iqn': 'aaa-bbb',
'lun': '1',
'node_uuid': u'1be26c0b-03f2-4d2e-ae87-c02d7f33c123',
'port': '3260',
}
log_params = mock_deploy_info.return_value.copy()
expected_dict = {
'node': self.node.uuid,
'params': log_params,
}
uuid_dict_returned = {'disk identifier': '87654321'}
deploy_mock.return_value = uuid_dict_returned
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.driver_internal_info['is_whole_disk_image'] = True
mock_log.isEnabledFor.return_value = True
retval = iscsi_deploy.continue_deploy(task, **kwargs)
mock_log.debug.assert_called_once_with(
mock.ANY, expected_dict)
self.assertEqual(states.DEPLOYWAIT, task.node.provision_state)
self.assertEqual(states.ACTIVE, task.node.target_provision_state)
self.assertIsNone(task.node.last_error)
mock_image_cache.assert_called_once_with()
mock_image_cache.return_value.clean_up.assert_called_once_with()
self.assertEqual(uuid_dict_returned, retval)
def test_get_deploy_info_boot_option_default(self):
instance_info = self.node.instance_info
instance_info['deploy_key'] = 'key'
self.node.instance_info = instance_info
kwargs = {'address': '1.1.1.1', 'iqn': 'target-iqn', 'key': 'key'}
ret_val = iscsi_deploy.get_deploy_info(self.node, **kwargs)
self.assertEqual('1.1.1.1', ret_val['address'])
self.assertEqual('target-iqn', ret_val['iqn'])
self.assertEqual('netboot', ret_val['boot_option'])
def test_get_deploy_info_netboot_specified(self):
instance_info = self.node.instance_info
instance_info['deploy_key'] = 'key'
instance_info['capabilities'] = {'boot_option': 'netboot'}
self.node.instance_info = instance_info
kwargs = {'address': '1.1.1.1', 'iqn': 'target-iqn', 'key': 'key'}
ret_val = iscsi_deploy.get_deploy_info(self.node, **kwargs)
self.assertEqual('1.1.1.1', ret_val['address'])
self.assertEqual('target-iqn', ret_val['iqn'])
self.assertEqual('netboot', ret_val['boot_option'])
def test_get_deploy_info_localboot(self):
instance_info = self.node.instance_info
instance_info['deploy_key'] = 'key'
instance_info['capabilities'] = {'boot_option': 'local'}
self.node.instance_info = instance_info
kwargs = {'address': '1.1.1.1', 'iqn': 'target-iqn', 'key': 'key'}
ret_val = iscsi_deploy.get_deploy_info(self.node, **kwargs)
self.assertEqual('1.1.1.1', ret_val['address'])
self.assertEqual('target-iqn', ret_val['iqn'])
self.assertEqual('local', ret_val['boot_option'])
@mock.patch.object(iscsi_deploy, 'continue_deploy', autospec=True)
@mock.patch.object(iscsi_deploy, 'build_deploy_ramdisk_options',
autospec=True)
def test_do_agent_iscsi_deploy_okay(self, build_options_mock,
continue_deploy_mock):
build_options_mock.return_value = {'deployment_key': 'abcdef',
'iscsi_target_iqn': 'iqn-qweqwe'}
agent_client_mock = mock.MagicMock(spec_set=agent_client.AgentClient)
agent_client_mock.start_iscsi_target.return_value = {
'command_status': 'SUCCESS', 'command_error': None}
driver_internal_info = {'agent_url': 'http://1.2.3.4:1234'}
self.node.driver_internal_info = driver_internal_info
self.node.save()
uuid_dict_returned = {'root uuid': 'some-root-uuid'}
continue_deploy_mock.return_value = uuid_dict_returned
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
ret_val = iscsi_deploy.do_agent_iscsi_deploy(
task, agent_client_mock)
build_options_mock.assert_called_once_with(task.node)
agent_client_mock.start_iscsi_target.assert_called_once_with(
task.node, 'iqn-qweqwe')
continue_deploy_mock.assert_called_once_with(
task, error=None, iqn='iqn-qweqwe', key='abcdef',
address='1.2.3.4')
self.assertEqual(
'some-root-uuid',
task.node.driver_internal_info['root_uuid_or_disk_id'])
self.assertEqual(ret_val, uuid_dict_returned)
@mock.patch.object(iscsi_deploy, 'build_deploy_ramdisk_options',
autospec=True)
def test_do_agent_iscsi_deploy_start_iscsi_failure(self,
build_options_mock):
build_options_mock.return_value = {'deployment_key': 'abcdef',
'iscsi_target_iqn': 'iqn-qweqwe'}
agent_client_mock = mock.MagicMock(spec_set=agent_client.AgentClient)
agent_client_mock.start_iscsi_target.return_value = {
'command_status': 'FAILED', 'command_error': 'booom'}
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InstanceDeployFailure,
iscsi_deploy.do_agent_iscsi_deploy,
task, agent_client_mock)
build_options_mock.assert_called_once_with(task.node)
agent_client_mock.start_iscsi_target.assert_called_once_with(
task.node, 'iqn-qweqwe')
self.node.refresh()
self.assertEqual(states.DEPLOYFAIL, self.node.provision_state)
self.assertEqual(states.ACTIVE, self.node.target_provision_state)
self.assertIsNotNone(self.node.last_error)
def test_validate_pass_bootloader_info_input(self):
params = {'key': 'some-random-key', 'address': '1.2.3.4',
'error': '', 'status': 'SUCCEEDED'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.instance_info['deploy_key'] = 'some-random-key'
# Assert that the method doesn't raise
iscsi_deploy.validate_pass_bootloader_info_input(task, params)
def test_validate_pass_bootloader_info_missing_status(self):
params = {'key': 'some-random-key', 'address': '1.2.3.4'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.MissingParameterValue,
iscsi_deploy.validate_pass_bootloader_info_input,
task, params)
def test_validate_pass_bootloader_info_missing_key(self):
params = {'status': 'SUCCEEDED', 'address': '1.2.3.4'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.MissingParameterValue,
iscsi_deploy.validate_pass_bootloader_info_input,
task, params)
def test_validate_pass_bootloader_info_missing_address(self):
params = {'status': 'SUCCEEDED', 'key': 'some-random-key'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.MissingParameterValue,
iscsi_deploy.validate_pass_bootloader_info_input,
task, params)
def test_validate_pass_bootloader_info_input_invalid_key(self):
params = {'key': 'some-other-key', 'address': '1.2.3.4',
'status': 'SUCCEEDED'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.instance_info['deploy_key'] = 'some-random-key'
self.assertRaises(exception.InvalidParameterValue,
iscsi_deploy.validate_pass_bootloader_info_input,
task, params)
def test_validate_bootloader_install_status(self):
kwargs = {'key': 'abcdef', 'status': 'SUCCEEDED', 'error': ''}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.instance_info['deploy_key'] = 'abcdef'
# Nothing much to assert except that it shouldn't raise.
iscsi_deploy.validate_bootloader_install_status(task, kwargs)
@mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
def test_validate_bootloader_install_status_install_failed(
self, set_fail_state_mock):
kwargs = {'key': 'abcdef', 'status': 'FAILED', 'error': 'some-error'}
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.node.provision_state = states.DEPLOYING
task.node.target_provision_state = states.ACTIVE
task.node.instance_info['deploy_key'] = 'abcdef'
self.assertRaises(exception.InstanceDeployFailure,
iscsi_deploy.validate_bootloader_install_status,
task, kwargs)
set_fail_state_mock.assert_called_once_with(task, mock.ANY)
@mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed',
autospec=True)
def test_finish_deploy(self, notify_mock):
self.node.provision_state = states.DEPLOYING
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
iscsi_deploy.finish_deploy(task, '1.2.3.4')
notify_mock.assert_called_once_with('1.2.3.4')
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
@mock.patch.object(deploy_utils, 'set_failed_state', autospec=True)
@mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed',
autospec=True)
def test_finish_deploy_notify_fails(self, notify_mock,
set_fail_state_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
notify_mock.side_effect = RuntimeError()
self.assertRaises(exception.InstanceDeployFailure,
iscsi_deploy.finish_deploy, task, '1.2.3.4')
set_fail_state_mock.assert_called_once_with(task, mock.ANY)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed',
autospec=True)
def test_finish_deploy_ssh_with_local_boot(self, notify_mock,
node_power_mock):
instance_info = dict(INST_INFO_DICT)
instance_info['capabilities'] = {'boot_option': 'local'}
n = {
'uuid': uuidutils.generate_uuid(),
'driver': 'fake_ssh',
'instance_info': instance_info,
'provision_state': states.DEPLOYING,
'target_provision_state': states.ACTIVE,
}
mgr_utils.mock_the_extension_manager(driver="fake_ssh")
node = obj_utils.create_test_node(self.context, **n)
with task_manager.acquire(self.context, node.uuid,
shared=False) as task:
iscsi_deploy.finish_deploy(task, '1.2.3.4')
notify_mock.assert_called_once_with('1.2.3.4')
self.assertEqual(states.ACTIVE, task.node.provision_state)
self.assertEqual(states.NOSTATE, task.node.target_provision_state)
node_power_mock.assert_called_once_with(task, states.REBOOT)
@mock.patch.object(keystone, 'get_service_url', autospec=True)
def test_validate_good_api_url_from_config_file(self, mock_ks):
# not present in the keystone catalog
mock_ks.side_effect = exception.KeystoneFailure
self.config(group='conductor', api_url='http://foo')
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
iscsi_deploy.validate(task)
self.assertFalse(mock_ks.called)
@mock.patch.object(keystone, 'get_service_url', autospec=True)
def test_validate_good_api_url_from_keystone(self, mock_ks):
# present in the keystone catalog
mock_ks.return_value = 'http://127.0.0.1:1234'
# not present in the config file
self.config(group='conductor', api_url=None)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
iscsi_deploy.validate(task)
mock_ks.assert_called_once_with()
@mock.patch.object(keystone, 'get_service_url', autospec=True)
def test_validate_fail_no_api_url(self, mock_ks):
# not present in the keystone catalog
mock_ks.side_effect = exception.KeystoneFailure
# not present in the config file
self.config(group='conductor', api_url=None)
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
iscsi_deploy.validate, task)
mock_ks.assert_called_once_with()
def test_validate_invalid_root_device_hints(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.properties['root_device'] = {'size': 'not-int'}
self.assertRaises(exception.InvalidParameterValue,
iscsi_deploy.validate, task)
class ISCSIDeployTestCase(db_base.DbTestCase):
def setUp(self):
super(ISCSIDeployTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_pxe")
self.driver = driver_factory.get_driver("fake_pxe")
self.driver.vendor = iscsi_deploy.VendorPassthru()
self.node = obj_utils.create_test_node(
self.context, driver='fake_pxe',
instance_info=INST_INFO_DICT,
driver_info=DRV_INFO_DICT,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
self.node.driver_internal_info['agent_url'] = 'http://1.2.3.4:1234'
self.task = mock.MagicMock(spec=task_manager.TaskManager)
self.task.shared = False
self.task.node = self.node
self.task.driver = self.driver
self.task.context = self.context
dhcp_factory.DHCPFactory._dhcp_provider = None
def test_get_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual({}, task.driver.deploy.get_properties())
@mock.patch.object(iscsi_deploy, 'validate', autospec=True)
@mock.patch.object(deploy_utils, 'validate_capabilities', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'validate', autospec=True)
def test_validate(self, pxe_validate_mock,
validate_capabilities_mock, validate_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.deploy.validate(task)
pxe_validate_mock.assert_called_once_with(task.driver.boot, task)
validate_capabilities_mock.assert_called_once_with(task.node)
validate_mock.assert_called_once_with(task)
@mock.patch.object(pxe.PXEBoot, 'prepare_instance', autospec=True)
def test_prepare_node_active(self, prepare_instance_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.provision_state = states.ACTIVE
task.driver.deploy.prepare(task)
prepare_instance_mock.assert_called_once_with(
task.driver.boot, task)
@mock.patch.object(deploy_utils, 'build_agent_options', autospec=True)
@mock.patch.object(iscsi_deploy, 'build_deploy_ramdisk_options',
autospec=True)
@mock.patch.object(pxe.PXEBoot, 'prepare_ramdisk', autospec=True)
def test_prepare_node_deploying(self, mock_prepare_ramdisk,
mock_iscsi_options, mock_agent_options):
mock_iscsi_options.return_value = {'a': 'b'}
mock_agent_options.return_value = {'c': 'd'}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.provision_state = states.DEPLOYWAIT
task.driver.deploy.prepare(task)
mock_iscsi_options.assert_called_once_with(task.node)
mock_agent_options.assert_called_once_with(task.node)
mock_prepare_ramdisk.assert_called_once_with(
task.driver.boot, task, {'a': 'b', 'c': 'd'})
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
@mock.patch.object(iscsi_deploy, 'check_image_size', autospec=True)
@mock.patch.object(iscsi_deploy, 'cache_instance_image', autospec=True)
def test_deploy(self, mock_cache_instance_image,
mock_check_image_size, mock_node_power_action):
with task_manager.acquire(self.context,
self.node.uuid, shared=False) as task:
state = task.driver.deploy.deploy(task)
self.assertEqual(state, states.DEPLOYWAIT)
mock_cache_instance_image.assert_called_once_with(
self.context, task.node)
mock_check_image_size.assert_called_once_with(task)
mock_node_power_action.assert_called_once_with(task, states.REBOOT)
@mock.patch.object(manager_utils, 'node_power_action', autospec=True)
def test_tear_down(self, node_power_action_mock):
with task_manager.acquire(self.context,
self.node.uuid, shared=False) as task:
state = task.driver.deploy.tear_down(task)
self.assertEqual(state, states.DELETED)
node_power_action_mock.assert_called_once_with(task,
states.POWER_OFF)
@mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider')
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp')
@mock.patch.object(pxe.PXEBoot, 'clean_up_instance', autospec=True)
@mock.patch.object(pxe.PXEBoot, 'clean_up_ramdisk', autospec=True)
@mock.patch.object(iscsi_deploy, 'destroy_images', autospec=True)
def test_clean_up(self, destroy_images_mock, clean_up_ramdisk_mock,
clean_up_instance_mock, clean_dhcp_mock,
set_dhcp_provider_mock):
with task_manager.acquire(self.context,
self.node.uuid, shared=False) as task:
task.driver.deploy.clean_up(task)
destroy_images_mock.assert_called_once_with(task.node.uuid)
clean_up_ramdisk_mock.assert_called_once_with(
task.driver.boot, task)
clean_up_instance_mock.assert_called_once_with(
task.driver.boot, task)
set_dhcp_provider_mock.assert_called_once_with()
clean_dhcp_mock.assert_called_once_with(task)
@mock.patch.object(deploy_utils, 'prepare_inband_cleaning', autospec=True)
def test_prepare_cleaning(self, prepare_inband_cleaning_mock):
prepare_inband_cleaning_mock.return_value = states.CLEANWAIT
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertEqual(
states.CLEANWAIT, task.driver.deploy.prepare_cleaning(task))
prepare_inband_cleaning_mock.assert_called_once_with(
task, manage_boot=True)
@mock.patch.object(deploy_utils, 'tear_down_inband_cleaning',
autospec=True)
def test_tear_down_cleaning(self, tear_down_cleaning_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.deploy.tear_down_cleaning(task)
tear_down_cleaning_mock.assert_called_once_with(
task, manage_boot=True)
@mock.patch('ironic.drivers.modules.deploy_utils.agent_get_clean_steps',
autospec=True)
def test_get_clean_steps(self, mock_get_clean_steps):
# Test getting clean steps
self.config(group='deploy', erase_devices_priority=10)
mock_steps = [{'priority': 10, 'interface': 'deploy',
'step': 'erase_devices'}]
self.node.driver_internal_info = {'agent_url': 'foo'}
self.node.save()
mock_get_clean_steps.return_value = mock_steps
with task_manager.acquire(self.context, self.node.uuid) as task:
steps = task.driver.deploy.get_clean_steps(task)
mock_get_clean_steps.assert_called_once_with(
task, interface='deploy',
override_priorities={
'erase_devices': 10})
self.assertEqual(mock_steps, steps)
@mock.patch('ironic.drivers.modules.deploy_utils.agent_get_clean_steps',
autospec=True)
def test_get_clean_steps_no_agent_url(self, mock_get_clean_steps):
# Test getting clean steps
self.node.driver_internal_info = {}
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
steps = task.driver.deploy.get_clean_steps(task)
self.assertEqual([], steps)
self.assertFalse(mock_get_clean_steps.called)
@mock.patch.object(deploy_utils, 'agent_execute_clean_step', autospec=True)
def test_execute_clean_step(self, agent_execute_clean_step_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.deploy.execute_clean_step(
task, {'some-step': 'step-info'})
agent_execute_clean_step_mock.assert_called_once_with(
task, {'some-step': 'step-info'})
class TestVendorPassthru(db_base.DbTestCase):
def setUp(self):
super(TestVendorPassthru, self).setUp()
mgr_utils.mock_the_extension_manager()
self.driver = driver_factory.get_driver("fake")
self.driver.vendor = iscsi_deploy.VendorPassthru()
self.node = obj_utils.create_test_node(
self.context, driver='fake',
instance_info=INST_INFO_DICT,
driver_info=DRV_INFO_DICT,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
self.node.driver_internal_info['agent_url'] = 'http://1.2.3.4:1234'
self.task = mock.MagicMock(spec=task_manager.TaskManager)
self.task.shared = False
self.task.node = self.node
self.task.driver = self.driver
self.task.context = self.context
def test_validate_good(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.instance_info['deploy_key'] = 'fake-56789'
task.driver.vendor.validate(task, method='pass_deploy_info',
address='123456', iqn='aaa-bbb',
key='fake-56789')
def test_validate_pass_deploy_info_during_cleaning(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.node.provision_state = states.CLEANWAIT
# Assert that it doesn't raise.
self.assertIsNone(
task.driver.vendor.validate(task, method='pass_deploy_info',
address='123456', iqn='aaa-bbb',
key='fake-56789'))
def test_validate_fail(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.vendor.validate,
task, method='pass_deploy_info',
key='fake-56789')
def test_validate_key_notmatch(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.vendor.validate,
task, method='pass_deploy_info',
address='123456', iqn='aaa-bbb',
key='fake-12345')
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'notify_conductor_resume_clean',
autospec=True)
@mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(iscsi_deploy, 'LOG', spec=['warning'])
def test__initiate_cleaning(self, log_mock, set_node_cleaning_steps_mock,
notify_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.vendor._initiate_cleaning(task)
log_mock.warning.assert_called_once_with(mock.ANY, mock.ANY)
set_node_cleaning_steps_mock.assert_called_once_with(task)
notify_mock.assert_called_once_with(self.driver.vendor, task)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'notify_conductor_resume_clean',
autospec=True)
@mock.patch.object(manager_utils, 'cleaning_error_handler', autospec=True)
@mock.patch.object(manager_utils, 'set_node_cleaning_steps', autospec=True)
@mock.patch.object(iscsi_deploy, 'LOG', spec=['warning'])
def test__initiate_cleaning_exception(
self, log_mock, set_node_cleaning_steps_mock,
cleaning_error_handler_mock, notify_mock):
set_node_cleaning_steps_mock.side_effect = RuntimeError()
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.vendor._initiate_cleaning(task)
log_mock.warning.assert_called_once_with(mock.ANY, mock.ANY)
set_node_cleaning_steps_mock.assert_called_once_with(task)
cleaning_error_handler_mock.assert_called_once_with(task, mock.ANY)
self.assertFalse(notify_mock.called)
@mock.patch.object(fake.FakeBoot, 'prepare_instance', autospec=True)
@mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed',
autospec=True)
@mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True)
@mock.patch.object(deploy_utils, 'deploy_partition_image', autospec=True)
def _test_pass_deploy_info_deploy(self, is_localboot, mock_deploy,
mock_image_cache,
notify_mock,
fakeboot_prepare_instance_mock):
# set local boot
i_info = self.node.instance_info
if is_localboot:
i_info['capabilities'] = '{"boot_option": "local"}'
i_info['deploy_key'] = 'fake-56789'
self.node.instance_info = i_info
self.node.power_state = states.POWER_ON
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
root_uuid = "12345678-1234-1234-1234-1234567890abcxyz"
mock_deploy.return_value = {'root uuid': root_uuid}
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.vendor.pass_deploy_info(
task, address='123456', iqn='aaa-bbb', key='fake-56789')
self.node.refresh()
self.assertEqual(states.POWER_ON, self.node.power_state)
self.assertIn('root_uuid_or_disk_id', self.node.driver_internal_info)
self.assertIsNone(self.node.last_error)
mock_image_cache.assert_called_once_with()
mock_image_cache.return_value.clean_up.assert_called_once_with()
notify_mock.assert_called_once_with('123456')
fakeboot_prepare_instance_mock.assert_called_once_with(mock.ANY, task)
@mock.patch.object(fake.FakeBoot, 'prepare_instance', autospec=True)
@mock.patch.object(deploy_utils, 'notify_ramdisk_to_proceed',
autospec=True)
@mock.patch.object(iscsi_deploy, 'InstanceImageCache', autospec=True)
@mock.patch.object(deploy_utils, 'deploy_disk_image', autospec=True)
def _test_pass_deploy_info_whole_disk_image(self, is_localboot,
mock_deploy,
mock_image_cache,
notify_mock,
fakeboot_prep_inst_mock):
i_info = self.node.instance_info
# set local boot
if is_localboot:
i_info['capabilities'] = '{"boot_option": "local"}'
i_info['deploy_key'] = 'fake-56789'
self.node.instance_info = i_info
self.node.power_state = states.POWER_ON
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
disk_id = '0x12345678'
mock_deploy.return_value = {'disk identifier': disk_id}
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.driver_internal_info['is_whole_disk_image'] = True
task.driver.vendor.pass_deploy_info(task, address='123456',
iqn='aaa-bbb',
key='fake-56789')
self.node.refresh()
self.assertEqual(states.POWER_ON, self.node.power_state)
self.assertIsNone(self.node.last_error)
mock_image_cache.assert_called_once_with()
mock_image_cache.return_value.clean_up.assert_called_once_with()
notify_mock.assert_called_once_with('123456')
fakeboot_prep_inst_mock.assert_called_once_with(mock.ANY, task)
def test_pass_deploy_info_deploy(self):
self._test_pass_deploy_info_deploy(False)
self.assertEqual(states.ACTIVE, self.node.provision_state)
self.assertEqual(states.NOSTATE, self.node.target_provision_state)
def test_pass_deploy_info_localboot(self):
self._test_pass_deploy_info_deploy(True)
self.assertEqual(states.DEPLOYWAIT, self.node.provision_state)
self.assertEqual(states.ACTIVE, self.node.target_provision_state)
def test_pass_deploy_info_whole_disk_image(self):
self._test_pass_deploy_info_whole_disk_image(False)
self.assertEqual(states.ACTIVE, self.node.provision_state)
self.assertEqual(states.NOSTATE, self.node.target_provision_state)
def test_pass_deploy_info_whole_disk_image_localboot(self):
self._test_pass_deploy_info_whole_disk_image(True)
self.assertEqual(states.ACTIVE, self.node.provision_state)
self.assertEqual(states.NOSTATE, self.node.target_provision_state)
def test_pass_deploy_info_invalid(self):
self.node.power_state = states.POWER_ON
self.node.provision_state = states.AVAILABLE
self.node.target_provision_state = states.NOSTATE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidState,
task.driver.vendor.pass_deploy_info,
task, address='123456', iqn='aaa-bbb',
key='fake-56789', error='test ramdisk error')
self.node.refresh()
self.assertEqual(states.AVAILABLE, self.node.provision_state)
self.assertEqual(states.NOSTATE, self.node.target_provision_state)
self.assertEqual(states.POWER_ON, self.node.power_state)
@mock.patch.object(iscsi_deploy.VendorPassthru, 'pass_deploy_info')
def test_pass_deploy_info_lock_elevated(self, mock_deploy_info):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.vendor.pass_deploy_info(
task, address='123456', iqn='aaa-bbb', key='fake-56789')
# lock elevated w/o exception
self.assertEqual(1, mock_deploy_info.call_count,
"pass_deploy_info was not called once.")
@mock.patch.object(iscsi_deploy.VendorPassthru,
'_initiate_cleaning', autospec=True)
def test_pass_deploy_info_cleaning(self, initiate_cleaning_mock):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.node.provision_state = states.CLEANWAIT
task.driver.vendor.pass_deploy_info(
task, address='123456', iqn='aaa-bbb', key='fake-56789')
initiate_cleaning_mock.assert_called_once_with(
task.driver.vendor, task)
# Asserting if we are still on CLEANWAIT state confirms that
# we return from pass_deploy_info method after initiating
# cleaning.
self.assertEqual(states.CLEANWAIT, task.node.provision_state)
def test_vendor_routes(self):
expected = ['heartbeat', 'pass_deploy_info',
'pass_bootloader_install_info']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
vendor_routes = task.driver.vendor.vendor_routes
self.assertIsInstance(vendor_routes, dict)
self.assertEqual(sorted(expected), sorted(list(vendor_routes)))
def test_driver_routes(self):
expected = ['lookup']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
driver_routes = task.driver.vendor.driver_routes
self.assertIsInstance(driver_routes, dict)
self.assertEqual(sorted(expected), sorted(list(driver_routes)))
@mock.patch.object(iscsi_deploy, 'validate_bootloader_install_status',
autospec=True)
@mock.patch.object(iscsi_deploy, 'finish_deploy', autospec=True)
def test_pass_bootloader_install_info(self, finish_deploy_mock,
validate_input_mock):
kwargs = {'method': 'pass_deploy_info', 'address': '123456'}
self.node.provision_state = states.DEPLOYWAIT
self.node.target_provision_state = states.ACTIVE
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.vendor.pass_bootloader_install_info(task, **kwargs)
finish_deploy_mock.assert_called_once_with(task, '123456')
validate_input_mock.assert_called_once_with(task, kwargs)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'reboot_and_finish_deploy', autospec=True)
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', autospec=True)
def test_continue_deploy_netboot(self, do_agent_iscsi_deploy_mock,
reboot_and_finish_deploy_mock):
uuid_dict_returned = {'root uuid': 'some-root-uuid'}
do_agent_iscsi_deploy_mock.return_value = uuid_dict_returned
self.driver.vendor.continue_deploy(self.task)
do_agent_iscsi_deploy_mock.assert_called_once_with(
self.task, self.driver.vendor._client)
reboot_and_finish_deploy_mock.assert_called_once_with(
mock.ANY, self.task)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'reboot_and_finish_deploy', autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'configure_local_boot', autospec=True)
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', autospec=True)
def test_continue_deploy_localboot(self, do_agent_iscsi_deploy_mock,
configure_local_boot_mock,
reboot_and_finish_deploy_mock):
self.node.instance_info = {
'capabilities': {'boot_option': 'local'}}
self.node.save()
uuid_dict_returned = {'root uuid': 'some-root-uuid'}
do_agent_iscsi_deploy_mock.return_value = uuid_dict_returned
self.driver.vendor.continue_deploy(self.task)
do_agent_iscsi_deploy_mock.assert_called_once_with(
self.task, self.driver.vendor._client)
configure_local_boot_mock.assert_called_once_with(
self.task.driver.vendor, self.task, root_uuid='some-root-uuid',
efi_system_part_uuid=None)
reboot_and_finish_deploy_mock.assert_called_once_with(
self.task.driver.vendor, self.task)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'reboot_and_finish_deploy', autospec=True)
@mock.patch.object(agent_base_vendor.BaseAgentVendor,
'configure_local_boot', autospec=True)
@mock.patch.object(iscsi_deploy, 'do_agent_iscsi_deploy', autospec=True)
def test_continue_deploy_localboot_uefi(self, do_agent_iscsi_deploy_mock,
configure_local_boot_mock,
reboot_and_finish_deploy_mock):
self.node.instance_info = {
'capabilities': {'boot_option': 'local'}}
self.node.save()
uuid_dict_returned = {'root uuid': 'some-root-uuid',
'efi system partition uuid': 'efi-part-uuid'}
do_agent_iscsi_deploy_mock.return_value = uuid_dict_returned
self.driver.vendor.continue_deploy(self.task)
do_agent_iscsi_deploy_mock.assert_called_once_with(
self.task, self.driver.vendor._client)
configure_local_boot_mock.assert_called_once_with(
self.task.driver.vendor, self.task, root_uuid='some-root-uuid',
efi_system_part_uuid='efi-part-uuid')
reboot_and_finish_deploy_mock.assert_called_once_with(
self.task.driver.vendor, self.task)
# Cleanup of iscsi_deploy with pxe boot interface
class CleanUpFullFlowTestCase(db_base.DbTestCase):
def setUp(self):
super(CleanUpFullFlowTestCase, self).setUp()
self.config(image_cache_size=0, group='pxe')
# Configure node
mgr_utils.mock_the_extension_manager(driver="fake_pxe")
instance_info = INST_INFO_DICT
instance_info['deploy_key'] = 'fake-56789'
self.node = obj_utils.create_test_node(
self.context, driver='fake_pxe',
instance_info=instance_info,
driver_info=DRV_INFO_DICT,
driver_internal_info=DRV_INTERNAL_INFO_DICT,
)
self.port = obj_utils.create_test_port(self.context,
node_id=self.node.id)
# Configure temporary directories
pxe_temp_dir = tempfile.mkdtemp()
self.config(tftp_root=pxe_temp_dir, group='pxe')
tftp_master_dir = os.path.join(CONF.pxe.tftp_root,
'tftp_master')
self.config(tftp_master_path=tftp_master_dir, group='pxe')
os.makedirs(tftp_master_dir)
instance_temp_dir = tempfile.mkdtemp()
self.config(images_path=instance_temp_dir,
group='pxe')
instance_master_dir = os.path.join(CONF.pxe.images_path,
'instance_master')
self.config(instance_master_path=instance_master_dir,
group='pxe')
os.makedirs(instance_master_dir)
self.pxe_config_dir = os.path.join(CONF.pxe.tftp_root, 'pxelinux.cfg')
os.makedirs(self.pxe_config_dir)
# Populate some file names
self.master_kernel_path = os.path.join(CONF.pxe.tftp_master_path,
'kernel')
self.master_instance_path = os.path.join(CONF.pxe.instance_master_path,
'image_uuid')
self.node_tftp_dir = os.path.join(CONF.pxe.tftp_root,
self.node.uuid)
os.makedirs(self.node_tftp_dir)
self.kernel_path = os.path.join(self.node_tftp_dir,
'kernel')
self.node_image_dir = iscsi_deploy._get_image_dir_path(self.node.uuid)
os.makedirs(self.node_image_dir)
self.image_path = iscsi_deploy._get_image_file_path(self.node.uuid)
self.config_path = pxe_utils.get_pxe_config_file_path(self.node.uuid)
self.mac_path = pxe_utils._get_pxe_mac_path(self.port.address)
# Create files
self.files = [self.config_path, self.master_kernel_path,
self.master_instance_path]
for fname in self.files:
# NOTE(dtantsur): files with 0 size won't be cleaned up
with open(fname, 'w') as fp:
fp.write('test')
os.link(self.config_path, self.mac_path)
os.link(self.master_kernel_path, self.kernel_path)
os.link(self.master_instance_path, self.image_path)
dhcp_factory.DHCPFactory._dhcp_provider = None
@mock.patch('ironic.common.dhcp_factory.DHCPFactory._set_dhcp_provider')
@mock.patch('ironic.common.dhcp_factory.DHCPFactory.clean_dhcp')
@mock.patch.object(pxe, '_get_instance_image_info', autospec=True)
@mock.patch.object(pxe, '_get_deploy_image_info', autospec=True)
def test_clean_up_with_master(self, mock_get_deploy_image_info,
mock_get_instance_image_info,
clean_dhcp_mock, set_dhcp_provider_mock):
image_info = {'kernel': ('kernel_uuid',
self.kernel_path)}
mock_get_instance_image_info.return_value = image_info
mock_get_deploy_image_info.return_value = {}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.deploy.clean_up(task)
mock_get_instance_image_info.assert_called_with(task.node,
task.context)
mock_get_deploy_image_info.assert_called_with(task.node)
set_dhcp_provider_mock.assert_called_once_with()
clean_dhcp_mock.assert_called_once_with(task)
for path in ([self.kernel_path, self.image_path, self.config_path]
+ self.files):
self.assertFalse(os.path.exists(path),
'%s is not expected to exist' % path)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.