gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""
Shared Folder Rules:
* All Permissions are handle at folder level
* Listing Bookmarks and Folder Bookmarks inheritance Permissions from the folder it is part of.
* Folder Bookmark are the only Bookmark Entries that have Bookmark Permissions
* Root Folders can never been public, users will be able to share any folder
* Every Record has BookmarkPermission
Shared Folder Actions:
* Displaying all nested bookmarks for a user
* Adding a new listing bookmark under a user root folder (level 1)
* Adding a new listing bookmark under a user existing folder (level 2)
* Delete Listing Bookmark from user bookmarks
* Moving bookmark between two different folder bookmark
Shared Folder Permission Actions:
* Getting a list of owners and viewers of a shared folder bookmark (as an owner)
* Getting denied access to see owners and viewers of a shared folder bookmark (as a viewer)
* An Owner adding new listing bookmark to a shared folder bookmark
* An Viewer adding new listing bookmark to a shared folder bookmark and getting denied
https://aml-development.github.io/ozp-backend/features/shared_folders_2018/
---------
query = models.BookmarkEntry.objects.filter(
id=id,
bookmark_permission__profile=request_profile # Validate to make sure user can see folder)
).first()
SELECT *
FROM "ozpcenter_bookmarkentry"
INNER JOIN "ozpcenter_bookmarkpermission" ON ("ozpcenter_bookmarkentry"."id" = "ozpcenter_bookmarkpermission"."bookmark_id")
WHERE ("ozpcenter_bookmarkentry"."id" = 21 AND "ozpcenter_bookmarkpermission"."profile_id" = 1)
ORDER BY "ozpcenter_bookmarkentry"."id" ASC LIMIT 1
request_profile = Profile.objects.first()
import ozpcenter.api.bookmark.model_access as model_access
root_folder = model_access.create_get_user_root_bookmark_folder(request_profile)
[b for b in BookmarkEntry.objects.filter(bookmark_parent__bookmark_permission__profile=request_profile, bookmark_parent=root_folder)]
"""
import copy
import ozpcenter.models as models
from ozpcenter.pubsub import dispatcher
from ozpcenter import errors
from ozpcenter.pipe import pipes
from ozpcenter.pipe import pipeline
from ozpcenter.recommend import recommend_utils
FOLDER_TYPE = models.BookmarkEntry.FOLDER
LISTING_TYPE = models.BookmarkEntry.LISTING
BOOKMARK_TYPE_CHOICES = [choice[0] for choice in models.BookmarkEntry.TYPE_CHOICES]
FOLDER_QUERY_ORDER_MAPPING = {
'created_date': 'created_date',
'-created_date': '-created_date',
'title': 'title',
'-title': '-title'
}
LISTING_QUERY_ORDER_MAPPING = copy.deepcopy(FOLDER_QUERY_ORDER_MAPPING)
LISTING_QUERY_ORDER_MAPPING['title'] = 'listing__title'
LISTING_QUERY_ORDER_MAPPING['-title'] = '-listing__title'
def check_permission_for_bookmark_entry(request_profile, bookmark_entry, allow_owner=True, allow_viewer=False):
"""
Check Permission for bookmark entry
OWNER can view/add/edit/delete BookmarkEntry and BookmarkPermission models
VIEWERs have no permissions
"""
bookmark_entry_type = bookmark_entry.type
profile_bookmark_permission = None
if bookmark_entry_type == FOLDER_TYPE:
profile_bookmark_permission = models.BookmarkPermission.objects.filter(
profile=request_profile,
bookmark=bookmark_entry)
elif bookmark_entry_type == LISTING_TYPE:
profile_bookmark_permission = models.BookmarkPermission.objects.filter(
profile=request_profile,
bookmark__bookmarkentry=bookmark_entry)
# Convert query to BookmarkPermission object
if profile_bookmark_permission:
profile_bookmark_permission = profile_bookmark_permission.first()
if not profile_bookmark_permission:
raise errors.PermissionDenied('request profile does not have permission to view permissions')
elif profile_bookmark_permission.user_type == models.BookmarkPermission.VIEWER:
if allow_viewer is False:
raise errors.PermissionDenied('request profile does not have permission to view permissions because of user_type')
elif profile_bookmark_permission.user_type == models.BookmarkPermission.OWNER:
if allow_owner is False:
raise errors.PermissionDenied('request profile does not have permission to view permissions because of user_type')
else:
raise errors.PermissionDenied('request profile does not have permission')
return profile_bookmark_permission
def _validate_create_bookmark_entry(request_profile=None, entry_type=None, folder_title=None, listing=None, is_root=None):
"""
Validate values for creating bookmark entries
"""
is_folder_type = (entry_type == FOLDER_TYPE)
is_listing_type = (entry_type == LISTING_TYPE)
is_root = is_root if is_root is True else False
try:
assert request_profile is not None, 'To create bookmark entry, profile is required'
assert entry_type in BOOKMARK_TYPE_CHOICES, 'Entry Type needs to be one of the following: {}'.format(BOOKMARK_TYPE_CHOICES)
if is_folder_type:
if is_root is False and folder_title is None:
raise AssertionError('Bookmark {} Entry require folder_title and is_root kwargs'.format(FOLDER_TYPE))
elif is_listing_type and listing is None:
raise AssertionError('Bookmark {} Entry require listing object'.format(LISTING_TYPE))
except AssertionError as err:
raise errors.PermissionDenied(err)
def create_bookmark_entry(request_profile=None, entry_type=None, folder_title=None, listing=None, is_root=None):
"""
Create BookmarkEntry
Args:
request_profile
entry_type
folder_title
listing
is_root
"""
is_folder_type = (entry_type == FOLDER_TYPE)
is_listing_type = (entry_type == LISTING_TYPE)
is_root = is_root if is_root is True else False
_validate_create_bookmark_entry(request_profile, entry_type, folder_title, listing, is_root)
# Add Bookmark Entry
bookmark_entry = models.BookmarkEntry()
bookmark_entry.type = entry_type
bookmark_entry.is_root = False
if is_folder_type:
if is_root:
bookmark_entry.is_root = True
bookmark_entry.title = 'ROOT'
else:
bookmark_entry.title = folder_title
elif is_listing_type:
bookmark_entry.listing = listing
bookmark_entry.creator_profile = request_profile
bookmark_entry.save()
return bookmark_entry
def get_bookmark_entry_by_id(request_profile, id):
"""
Get bookmark entry by id and filter based on permissions
Only owner or viewer of folder should be able to see it
"""
# Validate to make sure user can see folder - bookmark_permission__profile
query = models.BookmarkEntry.objects.filter(
id=id,
bookmark_parent__bookmark_permission__profile=request_profile
).first()
if query is None:
raise errors.PermissionDenied('Can not view bookmarks')
return query
def get_user_permissions_for_bookmark_entry(request_profile, bookmark_entry):
"""
Get permissions for bookmark_entry
Access Control
Only Owners should be able to view all the permissions of a bookmark_entry
Only Owners should be able to edit permissions of a bookmark_entry
Access Control handle by get_bookmark_entry_by_id
"""
check_permission_for_bookmark_entry(request_profile, bookmark_entry)
if bookmark_entry.type != FOLDER_TYPE:
raise errors.PermissionDenied('Can only check permissions for folder bookmarks')
query = models.BookmarkPermission.objects.filter(bookmark=bookmark_entry)
return query
def get_bookmark_permission_by_id(request_profile, bookmark_entry, id):
"""
Get bookmark entry by id and filter based on permissions
Only owner or viewer of folder should be able to see it
"""
query = get_user_permissions_for_bookmark_entry(request_profile, bookmark_entry)
return query.get(id=id)
def add_profile_permission_for_bookmark_entry(request_profile, bookmark_entry_folder_to_share, target_profile, target_bookmark, target_user_type=None, share=False):
"""
Add Profile Permission to Bookmark Entry
Args:
request_profile:
Profile performing the action (view, add, edit, delete)
bookmark_entry_folder_to_share:
The folder `request_profile` is trying to share/add permission
target_profile:
The profile the `bookmark_entry_folder_to_share` should go to
target_bookmark:
target_user_type:
Steps:
check to see if `request_profile` has owner permission on the folder `bookmark_entry_folder_to_share` trying to be shared
Defaults the target_user_type to BookmarkPermission.VIEWER
"""
check_permission_for_bookmark_entry(request_profile, bookmark_entry_folder_to_share)
target_user_type = target_user_type if target_user_type else models.BookmarkPermission.VIEWER
# Add Bookmark entry to bookmark_entry_folder_to_share folder, add behaves like a set
target_bookmark.bookmark_parent.add(bookmark_entry_folder_to_share)
if target_bookmark.type == FOLDER_TYPE:
bookmark_permission_folder = models.BookmarkPermission()
if share:
# If sharing `bookmark_entry_folder_to_share` to a different user
# it should add permission to `bookmark_entry_folder_to_share` so that `target_profile`
# has access to it
bookmark_permission_folder.bookmark = bookmark_entry_folder_to_share
else:
bookmark_permission_folder.bookmark = target_bookmark
bookmark_permission_folder.profile = target_profile
bookmark_permission_folder.user_type = target_user_type
bookmark_permission_folder.save()
elif target_bookmark.type == 'LISTING':
# LISTINGs inherit folder permissions
pass
def share_bookmark_entry(request_profile, bookmark_entry_folder_to_share, target_profile, target_profile_bookmark_entry=None, target_user_type=None):
"""
Add Profile Permission to Bookmark Entry
Args:
request_profile:
Profile performing the action (view, add, edit, delete)
bookmark_entry_folder_to_share:
The folder `request_profile` is trying to share/add permission
target_profile:
The profile the `bookmark_entry_folder_to_share` should go to
target_bookmark:
target_user_type:
Steps:
check to see if `request_profile` has owner permission on the folder `bookmark_entry_folder_to_share` trying to be shared
Defaults the target_user_type to BookmarkPermission.VIEWER
"""
check_permission_for_bookmark_entry(request_profile, bookmark_entry_folder_to_share)
target_user_type = target_user_type if target_user_type else models.BookmarkPermission.VIEWER
if bookmark_entry_folder_to_share.type != FOLDER_TYPE:
raise errors.PermissionDenied('bookmark_entry needs to be a folder type')
# Check if target profile already has permissions for BookmarkEntry
existing_target_profile_permission = models.BookmarkPermission.objects.filter(
bookmark=bookmark_entry_folder_to_share,
profile=target_profile
).first()
if existing_target_profile_permission:
raise errors.PermissionDenied('target user already has access to folder')
# Add Bookmark entry to bookmark_entry_folder_to_share folder, add behaves like a set
# target_profile_bookmark_entry.bookmark_parent.add(bookmark_entry_folder_to_share)
if not target_profile_bookmark_entry:
target_profile_bookmark_entry = create_get_user_root_bookmark_folder(target_profile)
bookmark_entry_folder_to_share.bookmark_parent.add(target_profile_bookmark_entry)
if bookmark_entry_folder_to_share.type == FOLDER_TYPE:
bookmark_permission_folder = models.BookmarkPermission()
bookmark_permission_folder.bookmark = bookmark_entry_folder_to_share
bookmark_permission_folder.profile = target_profile
bookmark_permission_folder.user_type = target_user_type
bookmark_permission_folder.save()
return bookmark_permission_folder
elif target_bookmark.type == LISTING_TYPE:
# LISTINGs inherit folder permissions
pass
def update_profile_permission_for_bookmark_entry(request_profile, bookmark_permission_entry, user_type):
"""
Update Profile Permission for Bookmark Entry
Assumes all the permission checks happen before this is called for bookmark_permission_entry
"""
bookmark_permission_entry_profile = bookmark_permission_entry.profile
if request_profile == bookmark_permission_entry_profile:
raise errors.PermissionDenied('can only update permissions for other users')
bookmark_permission_entry.user_type = user_type
bookmark_permission_entry.save()
return bookmark_permission_entry
def remove_profile_permission_for_bookmark_entry(request_profile, target_profile, target_bookmark):
"""
Remove profile from bookmark
"""
check_permission_for_bookmark_entry(request_profile, target_bookmark)
profile_bookmark_permission = models.BookmarkPermission.objects.filter(profile=target_profile, bookmark=target_bookmark).first()
# if only owner:
# target_bookmark.delete()
# # TODO: Does this delete all re
def create_get_user_root_bookmark_folder(request_profile):
"""
Create or Get user's root folder
profile = Profile.objects.first()
"""
# Get Bookmark Entry where it is root folder and joins bookmark_permission using Bookmark Entry's id and user_type is OWNER
bookmark_entry_query = models.BookmarkEntry.objects.filter(
bookmark_permission__profile=request_profile,
bookmark_permission__user_type='OWNER', # TODO: should index by user_type?
is_root=True)
bookmark_entry_query_count = bookmark_entry_query.count()
if bookmark_entry_query_count:
if bookmark_entry_query_count >= 2:
print('A USER SHOULD NOT HAVE MORE THAN ONE ROOT FOLDER')
return bookmark_entry_query.first()
else:
bookmark_entry = create_bookmark_entry(request_profile, FOLDER_TYPE, folder_title='ROOT', is_root=True)
bookmark_permission = models.BookmarkPermission()
bookmark_permission.bookmark = bookmark_entry
bookmark_permission.profile = request_profile
bookmark_permission.user_type = models.BookmarkPermission.OWNER
bookmark_permission.save()
return bookmark_entry
def create_folder_bookmark_for_profile(request_profile, folder_name, bookmark_entry_folder=None, bookmark_children=None, listing_object=None):
"""
Create Folder Bookmark for profile
Args:
profile (models.Profile): Profile
folder_name (String): Folder name
bookmark_entry_folder: (models.BookmarkEntry): Entry folder
bookmark_children: (List of Integers)
"""
bookmark_entry_folder = bookmark_entry_folder if bookmark_entry_folder else create_get_user_root_bookmark_folder(request_profile)
bookmark_children = bookmark_children if bookmark_children else []
if bookmark_entry_folder.type != FOLDER_TYPE:
raise errors.PermissionDenied('bookmark_entry needs to be a folder type')
# Only owners of bookmark_entry_folder should be able to add to folder
check_permission_for_bookmark_entry(request_profile, bookmark_entry_folder)
bookmark_folder_entry = create_bookmark_entry(request_profile, FOLDER_TYPE, folder_title=folder_name, is_root=False)
# Add Permission so that user can see folder and make them owner
add_profile_permission_for_bookmark_entry(
request_profile,
bookmark_entry_folder,
request_profile,
bookmark_folder_entry,
target_user_type=models.BookmarkPermission.OWNER
)
# TODO: What is this doing?
for bookmark_child in bookmark_children:
update_bookmark_entry_for_profile(
request_profile,
get_bookmark_entry_by_id(request_profile, bookmark_child),
bookmark_folder_entry,
None)
if listing_object:
create_listing_bookmark_for_profile(request_profile, listing_object, bookmark_folder_entry)
return bookmark_folder_entry
def create_listing_bookmark_for_profile(request_profile, listing, bookmark_entry_folder=None):
"""
Create Listing Bookmark for profile
"""
bookmark_entry_folder = bookmark_entry_folder if bookmark_entry_folder else create_get_user_root_bookmark_folder(request_profile)
if bookmark_entry_folder.type != FOLDER_TYPE:
raise errors.PermissionDenied('bookmark_entry needs to be a folder type')
# Only owners of bookmark_entry_folder should be able to add to folder
check_permission_for_bookmark_entry(request_profile, bookmark_entry_folder)
bookmark_entry = create_bookmark_entry(request_profile, LISTING_TYPE, listing=listing)
# Add Permission so that user can see folder and make them owner
add_profile_permission_for_bookmark_entry(
request_profile,
bookmark_entry_folder,
request_profile,
bookmark_entry,
target_user_type=models.BookmarkPermission.OWNER
)
return bookmark_entry
def update_bookmark_entry_for_profile(request_profile, bookmark_entry_instance, bookmark_parent_object, title):
"""
Update Bookmark Entries
Method Responsibilities:
* Rename folder bookmark titles
* Moving folder/listing bookmarks under different folders
Args:
request_profile
bookmark_entry_instance
bookmark_parent_object:
`BookmarkEntry` bookmark instance where request_profile want to put bookmark_entry_instance in that folder
title
"""
if bookmark_parent_object is None and title is None:
raise errors.PermissionDenied('Need at least the bookmark_parent or title field')
# Check to see if request profile has access to bookmark_entry_instance
# (bookmark_entry_instance can be folder or listing bookmark)
check_permission_for_bookmark_entry(request_profile, bookmark_entry_instance)
folder_title_changed = False
bookmark_entry_moved = False
if bookmark_parent_object:
if bookmark_parent_object.type != FOLDER_TYPE:
raise errors.PermissionDenied('bookmark_parent_object needs to be a folder type')
# make sure user has owner access on bookmark_parent_object
check_permission_for_bookmark_entry(request_profile, bookmark_parent_object)
# get bookmark entries to folder relationships for request_profile
bookmark_entry_folder_relationships = bookmark_entry_instance.bookmark_parent.filter(
bookmark_permission__profile=request_profile)
bookmark_entry_instance.bookmark_parent.remove(*bookmark_entry_folder_relationships)
bookmark_entry_instance.bookmark_parent.add(bookmark_parent_object)
bookmark_entry_moved = True
if bookmark_entry_instance.type == FOLDER_TYPE:
if title:
bookmark_entry_instance.title = title
folder_title_changed = True
bookmark_entry_instance.save()
if bookmark_entry_moved or folder_title_changed:
dispatcher.publish('update_bookmark_entry',
bookmark_entry_instance=bookmark_entry_instance,
bookmark_parent_object=bookmark_parent_object,
folder_title_changed=folder_title_changed,
bookmark_entry_moved=bookmark_entry_moved)
return bookmark_entry_instance
def build_copy_tree_recursive(request_profile, bookmark_entry_instance):
"""
Build Tree with Folder Title and Listings to make copies of recursively
Args:
request_profile
bookmark_entry_instance
Returns:
{'bookmark_folder': 'Animals',
'bookmark_listing': None,
'folders': [],
'listings': [{'bookmark_listing': (killer_whale-['bigbrother'])},
{'bookmark_listing': (lion_finder-['bigbrother'])},
{'bookmark_listing': (monkey_finder-['bigbrother'])},
{'bookmark_listing': (parrotlet-['jones'])},
{'bookmark_listing': (white_horse-['bigbrother'])},
{'bookmark_listing': (wolf_finder-['bigbrother'])}]}
"""
output = {
'bookmark_folder_name': None,
'bookmark_listing': None,
'folders': [],
'listings': [],
}
if bookmark_entry_instance.type == FOLDER_TYPE:
folder_name = bookmark_entry_instance.title
if not bookmark_entry_instance.is_root:
folder_name = '{}(COPY)'.format(folder_name)
output['bookmark_folder_name'] = folder_name
folder_query = models.BookmarkEntry.manager.filter(
type=FOLDER_TYPE,
bookmark_parent__bookmark_permission__profile=request_profile,
bookmark_parent=bookmark_entry_instance
)
listing_query = models.BookmarkEntry.manager.filter(
type=LISTING_TYPE,
bookmark_parent__bookmark_permission__profile=request_profile,
bookmark_parent=bookmark_entry_instance
)
for current_bookmark_folder in folder_query:
output['folders'].append(build_copy_tree_recursive(request_profile, current_bookmark_folder))
for current_bookmark_listing in listing_query:
current_bookmark_listing = {
'bookmark_listing': current_bookmark_listing.listing,
}
output['listings'].append(current_bookmark_listing)
elif bookmark_entry_instance.type == LISTING_TYPE:
output['bookmark_listing'] = bookmark_entry_instance.listing
return output
def copy_tree_recursive(request_profile, copy_tree_dict, bookmark_folder_instance):
"""
Create Copy recursively
Args:
request_profile:
copy_tree_dict: takes in dictionary structure created by build_copy_tree_recursive method
bookmark_folder_instance
Returns:
{'bookmark_folder_instance': BookmarkEntry(79, bookmark_parent:['ROOT'],title:Animals,type:FOLDER,is_root:False,listing:None),
'bookmark_listing_instance': None,
'folder_instances': [],
'listing_instances': [
BookmarkEntry(80, bookmark_parent:['Animals'],title:,type:LISTING,is_root:False,listing:(killer_whale-['bigbrother'])),
BookmarkEntry(81, bookmark_parent:['Animals'],title:,type:LISTING,is_root:False,listing:(lion_finder-['bigbrother'])),
BookmarkEntry(82, bookmark_parent:['Animals'],title:,type:LISTING,is_root:False,listing:(monkey_finder-['bigbrother'])),
BookmarkEntry(83, bookmark_parent:['Animals'],title:,type:LISTING,is_root:False,listing:(parrotlet-['jones'])),
BookmarkEntry(84, bookmark_parent:['Animals'],title:,type:LISTING,is_root:False,listing:(white_horse-['bigbrother'])),
BookmarkEntry(85, bookmark_parent:['Animals'],title:,type:LISTING,is_root:False,listing:(wolf_finder-['bigbrother']))]}
"""
bookmark_folder_instance = bookmark_folder_instance if bookmark_folder_instance else create_get_user_root_bookmark_folder(request_profile)
output = {
'bookmark_folder_instance': None,
'bookmark_listing_instance': None,
'folder_instances': [],
'listing_instances': [],
}
bookmark_listing = copy_tree_dict['bookmark_listing']
if bookmark_listing:
output['bookmark_listing_instance'] = create_listing_bookmark_for_profile(
request_profile,
bookmark_listing,
bookmark_entry_folder=bookmark_folder_instance
)
bookmark_folder_copied_instance = bookmark_folder_instance
bookmark_folder_name = copy_tree_dict['bookmark_folder_name']
if bookmark_folder_name:
bookmark_folder_copied_instance = create_folder_bookmark_for_profile(
request_profile,
bookmark_folder_name,
bookmark_entry_folder=bookmark_folder_instance
)
output['bookmark_folder_instance'] = bookmark_folder_copied_instance
folders = copy_tree_dict['folders']
for current_copy_tree_dict in folders:
internal_copy_tree = copy_tree_recursive(request_profile, current_copy_tree_dict, bookmark_folder_copied_instance)
output['folder_instances'].append(internal_copy_tree)
listings = copy_tree_dict['listings']
for current_listing_record in listings:
current_bookmark_listing = current_listing_record['bookmark_listing']
output['listing_instances'].append(create_listing_bookmark_for_profile(
request_profile,
current_bookmark_listing,
bookmark_entry_folder=bookmark_folder_copied_instance
))
return output
def duplicate_bookmark_entry_for_profile(request_profile, bookmark_entry_to_copy, dest_bookmark_folder=None):
"""
Duplicate Bookmark Entry for profile
"""
check_permission_for_bookmark_entry(request_profile, bookmark_entry_to_copy, allow_viewer=True)
copy_tree_dict = build_copy_tree_recursive(request_profile, bookmark_entry_to_copy)
dest_bookmark_folder = dest_bookmark_folder if dest_bookmark_folder else create_get_user_root_bookmark_folder(request_profile)
copy_tree = copy_tree_recursive(request_profile, copy_tree_dict, dest_bookmark_folder)
bookmark_folder_instance = copy_tree.get('bookmark_folder_instance')
bookmark_listing_instance = copy_tree.get('bookmark_listing_instance')
if bookmark_folder_instance:
return bookmark_folder_instance
elif bookmark_listing_instance:
return bookmark_listing_instance
else:
return dest_bookmark_folder
def build_folder_queries_recursive_flatten(request_profile, bookmark_entry_instance, is_start=True, expand=False):
"""
Build Queries to get all folder and listing queries under a bookmark_entry folder bookmark recursively
shell usage:
from ozpcenter.api.bookmark.model_access import build_folder_queries_recursive_flatten
b = BookmarkEntry.objects.first()
p = BookmarkEntry.objects.first().bookmark_permission.first().profile
build_folder_queries_recursive_flatten(p, b)
Args:
request_profile
bookmark_entry_instance
is_start
Returns:
[
listing_query(Root),
listing_query(Folder2),
BookmarkEntry(Folder2),
listing_query(Folder1),
BookmarkEntry(Folder1),
BookmarkEntry(Root)
]
"""
output = []
folder_query = models.BookmarkEntry.manager.filter(type=FOLDER_TYPE)
listing_query = models.BookmarkEntry.manager.filter(type=LISTING_TYPE)
if is_start:
folder_query = folder_query.filter(bookmark_parent__bookmark_permission__profile=request_profile) # Validate to make sure user can see folder
listing_query = listing_query.filter(bookmark_parent__bookmark_permission__profile=request_profile) # Validate to make sure user can see folder,
folder_query = folder_query.filter(bookmark_parent=bookmark_entry_instance)
listing_query = listing_query.filter(bookmark_parent=bookmark_entry_instance)
if expand is True:
for current_listing in listing_query:
output.append(current_listing)
else:
output.append(listing_query)
for folder_entry in folder_query:
output.extend(build_folder_queries_recursive_flatten(request_profile, folder_entry, is_start=False))
output.append(bookmark_entry_instance)
return output
def delete_bookmark_entry_for_profile(request_profile, bookmark_entry_instance):
"""
Delete Bookmark Entry for profile
Validate make sure user has access
Deleting a BookmarkEntry that is a folder will have this behavior
BookmarkEntry.manager.get(id=5).delete()
DELETE FROM "bookmark_parents" WHERE "bookmark_parents"."from_bookmarkentry_id" IN (5)
DELETE FROM "bookmark_parents" WHERE "bookmark_parents"."to_bookmarkentry_id" IN (5)
DELETE FROM "ozpcenter_bookmarkpermission" WHERE "ozpcenter_bookmarkpermission"."bookmark_id" IN (5)
DELETE FROM "ozpcenter_bookmarkentry" WHERE "ozpcenter_bookmarkentry"."id" IN (5)
(5,
{'ozpcenter.BookmarkEntry': 1,
'ozpcenter.BookmarkEntry_bookmark_parent': 3,
'ozpcenter.BookmarkPermission': 1})
"""
check_permission_for_bookmark_entry(request_profile, bookmark_entry_instance)
if bookmark_entry_instance.type == FOLDER_TYPE:
# root_folder = create_get_user_root_bookmark_folder(request_profile)
folder_queries = build_folder_queries_recursive_flatten(request_profile, bookmark_entry_instance, is_start=True)
dispatcher.publish('remove_bookmark_folder', bookmark_entry=bookmark_entry_instance, folder_queries=folder_queries)
for current_query in folder_queries:
current_query.delete()
elif bookmark_entry_instance.type == LISTING_TYPE:
# If bookmark_entry_instance type is LISTING_TYPE then just delete
bookmark_entry_instance.delete()
def _create_folder_listing_queries_and_serialized(request_profile, folder_bookmark_entry, is_parent=None, ordering_fields=None, serializer_class=None, request=None):
"""
Create queries to get folder and listing data
Args:
request_profile:
used to filter profile bookmarks
request:
used for serializer to create urls
folder_bookmark_entry:
is_parent:
ordering_fields:
"""
is_parent = is_parent if is_parent else False
ordering_fields = ordering_fields if ordering_fields else ['created_date']
folder_query = models.BookmarkEntry.objects.filter(
type=FOLDER_TYPE,
bookmark_parent__bookmark_permission__profile=request_profile # Validate to make sure user can see folder
)
# for_profile should do `private listing` check for listing
listing_query = models.BookmarkEntry.objects.for_profile_minus_security_marking(request_profile).filter(
type=LISTING_TYPE,
bookmark_parent__bookmark_permission__profile=request_profile # Validate to make sure user can see folder,
)
folder_order_fields = []
listing_order_fields = []
for ordering_field in ordering_fields:
if ordering_field in FOLDER_QUERY_ORDER_MAPPING:
folder_order_fields.append(FOLDER_QUERY_ORDER_MAPPING[ordering_field])
if ordering_field in LISTING_QUERY_ORDER_MAPPING:
listing_order_fields.append(LISTING_QUERY_ORDER_MAPPING[ordering_field])
folder_query = folder_query.order_by(*folder_order_fields)
listing_query = listing_query.order_by(*listing_order_fields)
if is_parent:
folder_query = folder_query.filter(id=folder_bookmark_entry.id)
listing_query = listing_query.filter(id=folder_bookmark_entry.id)
else:
folder_query = folder_query.filter(bookmark_parent=folder_bookmark_entry)
listing_query = listing_query.filter(bookmark_parent=folder_bookmark_entry)
folder_data = serializer_class(folder_query, many=True, context={'request': request}).data
listing_data = serializer_class(listing_query, many=True, context={'request': request}).data
# Filter out all listings that request_profile does not have access to
listing_data_after_filter = pipeline.Pipeline(recommend_utils.ListIterator(listing_data),
[pipes.BookmarkListingDictPostSecurityMarkingCheckPipe(request_profile.user.username)]).to_list()
return {"folder_data": folder_data,
"listing_data_after_filter": listing_data_after_filter}
def get_bookmark_tree(request_profile, folder_bookmark_entry=None, is_parent=None, serializer_class=None, request=None, ordering_fields=None, is_shared=None):
"""
Helper Function to get all nested bookmark
Args:
request_profile
request
folder_bookmark_entry
is_parent
"""
folder_bookmark_entry = folder_bookmark_entry if folder_bookmark_entry else create_get_user_root_bookmark_folder(request_profile)
is_parent = is_parent if is_parent is not None else False
bookmarks_dict = _create_folder_listing_queries_and_serialized(
request_profile,
folder_bookmark_entry,
is_parent,
serializer_class=serializer_class,
request=request,
ordering_fields=ordering_fields,
)
nested_folder_tree = _get_nested_bookmarks_tree(
request_profile,
bookmarks_dict['folder_data'],
serializer_class=serializer_class,
request=request,
ordering_fields=ordering_fields,
is_shared=is_shared
)
return {
"folders": nested_folder_tree,
# "folders": bookmarks_dict['folder_data'],
"listings": bookmarks_dict['listing_data_after_filter'] if (is_shared is False or is_shared is None) else []
}
def _get_nested_bookmarks_tree(request_profile, data, serializer_class=None, request=None, ordering_fields=None, is_shared=None):
"""
Recursive function to get all the nested folder and listings
Args:
data: a list of serialized data in python objects using serializer_class
"""
output = []
for current_record in data:
append_record = False
if current_record.get('type') == 'FOLDER':
current_record_is_shared = current_record.get('is_shared')
if is_shared is current_record_is_shared and is_shared is True:
append_record = True
elif is_shared is current_record_is_shared and is_shared is False:
append_record = True
elif is_shared is None:
append_record = True
if append_record is True:
bookmarks_dict = _create_folder_listing_queries_and_serialized(
request_profile,
current_record['id'],
serializer_class=serializer_class,
request=request,
ordering_fields=ordering_fields,
)
nested_folder_tree = _get_nested_bookmarks_tree(
request_profile,
bookmarks_dict['folder_data'],
serializer_class=serializer_class,
request=request,
ordering_fields=ordering_fields,
is_shared=is_shared
)
current_record['children'] = {
"folders": nested_folder_tree,
"listings": bookmarks_dict['listing_data_after_filter']
}
else:
append_record = True
if append_record is True:
output.append(current_record)
return output
|
|
"""Support for Sensibo wifi-enabled home thermostats."""
import asyncio
import logging
import aiohttp
import async_timeout
import pysensibo
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
ATTR_TEMPERATURE,
CONF_API_KEY,
CONF_ID,
STATE_ON,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.util.temperature import convert as convert_temperature
from .const import DOMAIN as SENSIBO_DOMAIN
_LOGGER = logging.getLogger(__name__)
ALL = ["all"]
TIMEOUT = 8
SERVICE_ASSUME_STATE = "assume_state"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_ID, default=ALL): vol.All(cv.ensure_list, [cv.string]),
}
)
ASSUME_STATE_SCHEMA = vol.Schema(
{vol.Optional(ATTR_ENTITY_ID): cv.entity_ids, vol.Required(ATTR_STATE): cv.string}
)
_FETCH_FIELDS = ",".join(
[
"room{name}",
"measurements",
"remoteCapabilities",
"acState",
"connectionStatus{isAlive}",
"temperatureUnit",
]
)
_INITIAL_FETCH_FIELDS = f"id,{_FETCH_FIELDS}"
FIELD_TO_FLAG = {
"fanLevel": SUPPORT_FAN_MODE,
"swing": SUPPORT_SWING_MODE,
"targetTemperature": SUPPORT_TARGET_TEMPERATURE,
}
SENSIBO_TO_HA = {
"cool": HVAC_MODE_COOL,
"heat": HVAC_MODE_HEAT,
"fan": HVAC_MODE_FAN_ONLY,
"auto": HVAC_MODE_HEAT_COOL,
"dry": HVAC_MODE_DRY,
}
HA_TO_SENSIBO = {value: key for key, value in SENSIBO_TO_HA.items()}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up Sensibo devices."""
client = pysensibo.SensiboClient(
config[CONF_API_KEY], session=async_get_clientsession(hass), timeout=TIMEOUT
)
devices = []
try:
with async_timeout.timeout(TIMEOUT):
for dev in await client.async_get_devices(_INITIAL_FETCH_FIELDS):
if config[CONF_ID] == ALL or dev["id"] in config[CONF_ID]:
devices.append(
SensiboClimate(client, dev, hass.config.units.temperature_unit)
)
except (
aiohttp.client_exceptions.ClientConnectorError,
asyncio.TimeoutError,
pysensibo.SensiboError,
) as err:
_LOGGER.error("Failed to get devices from Sensibo servers")
raise PlatformNotReady from err
if not devices:
return
async_add_entities(devices)
async def async_assume_state(service):
"""Set state according to external service call.."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_climate = [
device for device in devices if device.entity_id in entity_ids
]
else:
target_climate = devices
update_tasks = []
for climate in target_climate:
await climate.async_assume_state(service.data.get(ATTR_STATE))
update_tasks.append(climate.async_update_ha_state(True))
if update_tasks:
await asyncio.wait(update_tasks)
hass.services.async_register(
SENSIBO_DOMAIN,
SERVICE_ASSUME_STATE,
async_assume_state,
schema=ASSUME_STATE_SCHEMA,
)
class SensiboClimate(ClimateEntity):
"""Representation of a Sensibo device."""
def __init__(self, client, data, units):
"""Build SensiboClimate.
client: aiohttp session.
data: initially-fetched data.
"""
self._client = client
self._id = data["id"]
self._external_state = None
self._units = units
self._available = False
self._do_update(data)
self._failed_update = False
@property
def supported_features(self):
"""Return the list of supported features."""
return self._supported_features
def _do_update(self, data):
self._name = data["room"]["name"]
self._measurements = data["measurements"]
self._ac_states = data["acState"]
self._available = data["connectionStatus"]["isAlive"]
capabilities = data["remoteCapabilities"]
self._operations = [SENSIBO_TO_HA[mode] for mode in capabilities["modes"]]
self._operations.append(HVAC_MODE_OFF)
self._current_capabilities = capabilities["modes"][self._ac_states["mode"]]
temperature_unit_key = data.get("temperatureUnit") or self._ac_states.get(
"temperatureUnit"
)
if temperature_unit_key:
self._temperature_unit = (
TEMP_CELSIUS if temperature_unit_key == "C" else TEMP_FAHRENHEIT
)
self._temperatures_list = (
self._current_capabilities["temperatures"]
.get(temperature_unit_key, {})
.get("values", [])
)
else:
self._temperature_unit = self._units
self._temperatures_list = []
self._supported_features = 0
for key in self._ac_states:
if key in FIELD_TO_FLAG:
self._supported_features |= FIELD_TO_FLAG[key]
@property
def state(self):
"""Return the current state."""
return self._external_state or super().state
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {"battery": self.current_battery}
@property
def temperature_unit(self):
"""Return the unit of measurement which this thermostat uses."""
return self._temperature_unit
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._ac_states.get("targetTemperature")
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
if self.temperature_unit == self.hass.config.units.temperature_unit:
# We are working in same units as the a/c unit. Use whole degrees
# like the API supports.
return 1
# Unit conversion is going on. No point to stick to specific steps.
return None
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
if not self._ac_states["on"]:
return HVAC_MODE_OFF
return SENSIBO_TO_HA.get(self._ac_states["mode"])
@property
def current_humidity(self):
"""Return the current humidity."""
return self._measurements["humidity"]
@property
def current_battery(self):
"""Return the current battery voltage."""
return self._measurements.get("batteryVoltage")
@property
def current_temperature(self):
"""Return the current temperature."""
# This field is not affected by temperatureUnit.
# It is always in C
return convert_temperature(
self._measurements["temperature"], TEMP_CELSIUS, self.temperature_unit
)
@property
def hvac_modes(self):
"""List of available operation modes."""
return self._operations
@property
def fan_mode(self):
"""Return the fan setting."""
return self._ac_states.get("fanLevel")
@property
def fan_modes(self):
"""List of available fan modes."""
return self._current_capabilities.get("fanLevels")
@property
def swing_mode(self):
"""Return the fan setting."""
return self._ac_states.get("swing")
@property
def swing_modes(self):
"""List of available swing modes."""
return self._current_capabilities.get("swing")
@property
def name(self):
"""Return the name of the entity."""
return self._name
@property
def min_temp(self):
"""Return the minimum temperature."""
return (
self._temperatures_list[0] if self._temperatures_list else super().min_temp
)
@property
def max_temp(self):
"""Return the maximum temperature."""
return (
self._temperatures_list[-1] if self._temperatures_list else super().max_temp
)
@property
def unique_id(self):
"""Return unique ID based on Sensibo ID."""
return self._id
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
temperature = int(temperature)
if temperature not in self._temperatures_list:
# Requested temperature is not supported.
if temperature == self.target_temperature:
return
index = self._temperatures_list.index(self.target_temperature)
if (
temperature > self.target_temperature
and index < len(self._temperatures_list) - 1
):
temperature = self._temperatures_list[index + 1]
elif temperature < self.target_temperature and index > 0:
temperature = self._temperatures_list[index - 1]
else:
return
await self._async_set_ac_state_property("targetTemperature", temperature)
async def async_set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
await self._async_set_ac_state_property("fanLevel", fan_mode)
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target operation mode."""
if hvac_mode == HVAC_MODE_OFF:
await self._async_set_ac_state_property("on", False)
return
# Turn on if not currently on.
if not self._ac_states["on"]:
await self._async_set_ac_state_property("on", True)
await self._async_set_ac_state_property("mode", HA_TO_SENSIBO[hvac_mode])
async def async_set_swing_mode(self, swing_mode):
"""Set new target swing operation."""
await self._async_set_ac_state_property("swing", swing_mode)
async def async_turn_on(self):
"""Turn Sensibo unit on."""
await self._async_set_ac_state_property("on", True)
async def async_turn_off(self):
"""Turn Sensibo unit on."""
await self._async_set_ac_state_property("on", False)
async def async_assume_state(self, state):
"""Set external state."""
change_needed = (state != HVAC_MODE_OFF and not self._ac_states["on"]) or (
state == HVAC_MODE_OFF and self._ac_states["on"]
)
if change_needed:
await self._async_set_ac_state_property("on", state != HVAC_MODE_OFF, True)
if state in (STATE_ON, HVAC_MODE_OFF):
self._external_state = None
else:
self._external_state = state
async def async_update(self):
"""Retrieve latest state."""
try:
with async_timeout.timeout(TIMEOUT):
data = await self._client.async_get_device(self._id, _FETCH_FIELDS)
except (
aiohttp.client_exceptions.ClientError,
asyncio.TimeoutError,
pysensibo.SensiboError,
):
if self._failed_update:
_LOGGER.warning(
"Failed to update data for device '%s' from Sensibo servers",
self.name,
)
self._available = False
self.async_write_ha_state()
return
_LOGGER.debug("First failed update data for device '%s'", self.name)
self._failed_update = True
return
self._failed_update = False
self._do_update(data)
async def _async_set_ac_state_property(self, name, value, assumed_state=False):
"""Set AC state."""
try:
with async_timeout.timeout(TIMEOUT):
await self._client.async_set_ac_state_property(
self._id, name, value, self._ac_states, assumed_state
)
except (
aiohttp.client_exceptions.ClientError,
asyncio.TimeoutError,
pysensibo.SensiboError,
) as err:
self._available = False
self.async_write_ha_state()
raise Exception(
f"Failed to set AC state for device {self.name} to Sensibo servers"
) from err
|
|
#!/usr/bin/env python
# Written by Bram Cohen
# modified for multitracker by John Hoffman
# see LICENSE.txt for license information
from BitTornado import PSYCO
if PSYCO.psyco:
try:
import psyco
assert psyco.__version__ >= 0x010100f0
psyco.full()
except:
pass
from sys import argv, platform, version
assert version >= '2', "Install Python 2.0 or greater"
from BitTornado.BT1.makemetafile import make_meta_file
from threading import Event, Thread, Lock
from BitTornado.bencode import bencode,bdecode
import sys, os, shutil
from os import getcwd, listdir
from os.path import join, isdir
from traceback import print_exc
try:
import wxversion
wxversion.select("2.6")
except Exception, e:
print >> sys.stderr, "%s: wxPython 2.6 not installed." %e
sys.exit(1)
from wxPython.wx import *
try:
True
except:
True = 1
False = 0
basepath=os.path.abspath(os.path.dirname(sys.argv[0]))
if platform == 'win32':
DROP_HERE = '(drop here)'
else:
DROP_HERE = ''
wxEVT_INVOKE = wxNewEventType()
def EVT_INVOKE(win, func):
win.Connect(-1, -1, wxEVT_INVOKE, func)
class InvokeEvent(wxPyEvent):
def __init__(self, func, args, kwargs):
wxPyEvent.__init__(self)
self.SetEventType(wxEVT_INVOKE)
self.func = func
self.args = args
self.kwargs = kwargs
class BasicDownloadInfo:
def __init__(self, config, calls):
self.config = config
self.calls = calls
self.uiflag = Event()
self.cancelflag = Event()
self.switchlock = Lock()
self.working = False
self.queue = []
wxInitAllImageHandlers()
self.thostselection = self.calls['getCurrentTHost']()
self.thostselectnum = 0
self.choices = None
self.choices1 = None
self.announce = ''
self.announce_list = None
self.windowStyle = wxSYSTEM_MENU|wxCAPTION|wxMINIMIZE_BOX
if self.config['stayontop']:
self.windowStyle |= wxSTAY_ON_TOP
frame = wxFrame(None, -1, 'T-Make',
size = wxSize(-1, -1),
style = self.windowStyle)
self.frame = frame
panel = wxPanel(frame, -1)
mainSizer = wxBoxSizer(wxVERTICAL)
groupSizer = wxFlexGridSizer(cols = 1, vgap = 0, hgap = 0)
# self.dropTarget = self.calls['newDropTarget']((200,200))
self.dropTarget = self.calls['newDropTarget']()
self.dropTargetPtr = wxStaticBitmap(panel, -1, self.dropTarget)
self.calls['setDropTargetRefresh'](self.dropTargetPtr.Refresh)
self.dropTargetWidth = self.dropTarget.GetWidth()
EVT_LEFT_DOWN(self.dropTargetPtr,self.dropTargetClick)
EVT_ENTER_WINDOW(self.dropTargetPtr,self.calls['dropTargetHovered'])
EVT_LEAVE_WINDOW(self.dropTargetPtr,self.calls['dropTargetUnhovered'])
groupSizer.Add(self.dropTargetPtr,0,wxALIGN_CENTER)
lowerSizer1 = wxGridSizer(cols = 6)
dirlink = wxStaticText(panel, -1, 'dir')
dirlink.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, True))
dirlink.SetForegroundColour('blue')
EVT_LEFT_UP(dirlink,self.selectdir)
lowerSizer1.Add(dirlink, -1, wxALIGN_LEFT)
lowerSizer1.Add(wxStaticText(panel, -1, ''), -1, wxALIGN_CENTER)
lowerSizer1.Add(wxStaticText(panel, -1, ''), -1, wxALIGN_CENTER)
lowerSizer1.Add(wxStaticText(panel, -1, ''), -1, wxALIGN_CENTER)
lowerSizer1.Add(wxStaticText(panel, -1, ''), -1, wxALIGN_CENTER)
filelink = wxStaticText(panel, -1, 'file')
filelink.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, True))
filelink.SetForegroundColour('blue')
EVT_LEFT_UP(filelink,self.selectfile)
lowerSizer1.Add(filelink, -1, wxALIGN_RIGHT)
groupSizer.Add(lowerSizer1, -1, wxALIGN_CENTER)
self.gauge = wxGauge(panel, -1, range = 1000,
style = wxGA_HORIZONTAL, size = (-1,15))
groupSizer.Add(self.gauge, 0, wxEXPAND)
self.statustext = wxStaticText(panel, -1, 'ready',
style = wxALIGN_CENTER|wxST_NO_AUTORESIZE)
self.statustext.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxBOLD, False))
groupSizer.Add(self.statustext, -1, wxEXPAND)
self.choices = wxChoice(panel, -1, (-1, -1), (self.dropTargetWidth, -1),
choices = [])
self.choices.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, False))
EVT_CHOICE(self.choices, -1, self.set_thost)
groupSizer.Add(self.choices, 0, wxEXPAND)
cancellink = wxStaticText(panel, -1, 'cancel')
cancellink.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, True))
cancellink.SetForegroundColour('red')
EVT_LEFT_UP(cancellink,self.cancel)
groupSizer.Add(cancellink, -1, wxALIGN_CENTER)
advlink = wxStaticText(panel, -1, 'advanced')
advlink.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, True))
advlink.SetForegroundColour('blue')
EVT_LEFT_UP(advlink,self.calls['switchToAdvanced'])
groupSizer.Add(advlink, -1, wxALIGN_CENTER)
mainSizer.Add(groupSizer, 0, wxALIGN_CENTER)
self.refresh_thostlist()
self._set_thost()
if platform == 'win32':
self.dropTargetPtr.DragAcceptFiles(True)
EVT_DROP_FILES(self.dropTargetPtr, self.selectdrop)
# border = wxBoxSizer(wxHORIZONTAL)
# border.Add(mainSizer, 1, wxEXPAND | wxALL, 0)
panel.SetSizer(mainSizer)
panel.SetAutoLayout(True)
# border.Fit(panel)
mainSizer.Fit(panel)
frame.Fit()
frame.Show(True)
EVT_INVOKE(frame, self.onInvoke)
EVT_CLOSE(frame, self._close)
def selectdir(self, x = None):
self.calls['dropTargetHovered']()
dl = wxDirDialog(self.frame, style = wxDD_DEFAULT_STYLE | wxDD_NEW_DIR_BUTTON)
if dl.ShowModal() == wxID_OK:
self.calls['dropTargetDropped']()
self.complete(dl.GetPath())
else:
self.calls['dropTargetUnhovered']()
def selectfile(self, x = None):
self.calls['dropTargetHovered']()
dl = wxFileDialog (self.frame, 'Choose file to use', '', '', '', wxOPEN)
if dl.ShowModal() == wxID_OK:
self.calls['dropTargetDropped']()
self.complete(dl.GetPath())
else:
self.calls['dropTargetUnhovered']()
def selectdrop(self, dat):
self.calls['dropTargetDropped']()
for f in dat.GetFiles():
self.complete(f)
def _announcecopy(self, f):
try:
h = open(f, 'rb')
metainfo = bdecode(h.read())
h.close()
self.announce = metainfo['announce']
if metainfo.has_key('announce-list'):
self.announce_list = metainfo['announce-list']
else:
self.announce_list = None
except:
return
def complete(self, x):
params = {'piece_size_pow2': 0}
if self.announce_list:
params['real_announce_list'] = self.announce_list
self.queue.append((x, self.announce, params))
self.go_queue()
def go_queue(self):
self.switchlock.acquire()
if self.queue and not self.working:
self.working = True
self.statustext.SetLabel('working')
q = self.queue.pop(0)
MakeMetafile(q[0], q[1], q[2], self)
self.switchlock.release()
def cancel(self, x):
self.switchlock.acquire()
if self.working:
self.working = False
self.cancelflag.set()
self.cancelflag = Event()
self.queue = []
self.statustext.SetLabel('CANCELED')
self.calls['dropTargetError']()
self.switchlock.release()
def dropTargetClick(self, x):
if x.GetPosition()[0] < int(self.dropTargetWidth*0.4):
self.selectdir()
elif x.GetPosition()[0] > int(self.dropTargetWidth*0.6):
self.selectfile()
def refresh_thostlist(self):
l = []
d = 0
for f in listdir(join(basepath,'thosts')):
if f[-6:].lower() == '.thost':
l.append(f)
if f == self.thostselection:
d = len(l)
self.choices.Clear()
if not d:
if l:
self.thostselection = l[0]
d = 1
else:
self.thostselection = ''
d = 1
self.config['thost'] = self.thostselection
self.calls['saveConfig']()
for f in l:
self.choices.Append(f[:-6])
self.thostselectnum = d-1
self.thostlist = l
self.choices.SetSelection(d-1)
return
def set_thost(self, x):
n = self.choices.GetSelection()
if n != self.thostselectnum:
self.thostselectnum = n
if n:
self.thostselection = self.thostlist[n-1]
def _set_thost(self):
self._announcecopy(join(join(basepath,'thosts'),self.thostselection))
self.calls['setCurrentTHost'](self.thostselection)
def onInvoke(self, event):
if not self.uiflag.isSet():
apply(event.func, event.args, event.kwargs)
def invokeLater(self, func, args = [], kwargs = {}):
if not self.uiflag.isSet():
wxPostEvent(self.frame, InvokeEvent(func, args, kwargs))
def build_setgauge(self, x):
self.invokeLater(self.on_setgauge, [x])
def on_setgauge(self, x):
self.gauge.SetValue(int(x*1000))
def build_done(self):
self.invokeLater(self.on_builddone)
def on_builddone(self):
self.gauge.SetValue(0)
self.statustext.SetLabel('done!')
self.calls['dropTargetSuccess']()
self.working = False
self.go_queue()
def build_failed(self, e):
self.invokeLater(self.on_buildfailed, [e])
def on_buildfailed(self, e):
self.gauge.SetValue(0)
self.statustext.SetLabel('ERROR')
self.calls['dropTargetError']()
self.working = False
self.go_queue()
def close(self):
self.cancelflag = None # this is a planned switch, don't cancel
self.uiflag.set()
self.frame.Close()
def _close(self, x = None):
self.uiflag.set()
try:
self.cancelflag.set()
except:
pass
self.frame.Destroy()
class AdvancedDownloadInfo:
def __init__(self, config, calls):
self.config = config
self.calls = calls
self.uiflag = Event()
self.cancelflag = Event()
self.switchlock = Lock()
self.working = False
self.queue = []
wxInitAllImageHandlers()
self.thostselection = self.calls['getCurrentTHost']()
self.thostselectnum = 0
self.choices = None
self.choices1 = None
self.windowStyle = wxSYSTEM_MENU|wxCAPTION|wxMINIMIZE_BOX
if self.config['stayontop']:
self.windowStyle |= wxSTAY_ON_TOP
frame = wxFrame(None, -1, 'T-Make',
size = wxSize(-1, -1),
style = self.windowStyle)
self.frame = frame
panel = wxPanel(frame, -1)
fullSizer = wxFlexGridSizer(cols = 1, vgap = 0, hgap = 8)
colSizer = wxFlexGridSizer(cols = 2, vgap = 0, hgap = 8)
leftSizer = wxFlexGridSizer(cols = 1, vgap = 3)
self.stayontop_checkbox = wxCheckBox(panel, -1, "stay on top")
self.stayontop_checkbox.SetValue(self.config['stayontop'])
EVT_CHECKBOX(frame, self.stayontop_checkbox.GetId(), self.setstayontop)
leftSizer.Add(self.stayontop_checkbox, -1, wxALIGN_CENTER)
leftSizer.Add(wxStaticText(panel, -1, ''))
button = wxButton(panel, -1, 'use image...')
EVT_BUTTON(frame, button.GetId(), self.selectDropTarget)
leftSizer.Add(button, -1, wxALIGN_CENTER)
self.groupSizer1Box = wxStaticBox(panel, -1, '')
groupSizer1 = wxStaticBoxSizer(self.groupSizer1Box, wxHORIZONTAL)
groupSizer = wxFlexGridSizer(cols = 1, vgap = 0)
self.dropTarget = self.calls['newDropTarget']((200,200))
# self.dropTarget = self.calls['newDropTarget']()
self.dropTargetPtr = wxStaticBitmap(panel, -1, self.dropTarget)
self.calls['setDropTargetRefresh'](self.dropTargetPtr.Refresh)
self.dropTargetWidth = self.dropTarget.GetWidth()
EVT_LEFT_DOWN(self.dropTargetPtr,self.dropTargetClick)
EVT_ENTER_WINDOW(self.dropTargetPtr,self.calls['dropTargetHovered'])
EVT_LEAVE_WINDOW(self.dropTargetPtr,self.calls['dropTargetUnhovered'])
groupSizer.Add(self.dropTargetPtr,0,wxALIGN_CENTER)
lowerSizer1 = wxGridSizer(cols = 3)
dirlink = wxStaticText(panel, -1, 'dir')
dirlink.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, True))
dirlink.SetForegroundColour('blue')
EVT_LEFT_UP(dirlink,self.selectdir)
lowerSizer1.Add(dirlink, -1, wxALIGN_LEFT)
lowerSizer1.Add(wxStaticText(panel, -1, ''), -1, wxALIGN_CENTER)
filelink = wxStaticText(panel, -1, 'file')
filelink.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, True))
filelink.SetForegroundColour('blue')
EVT_LEFT_UP(filelink,self.selectfile)
lowerSizer1.Add(filelink, -1, wxALIGN_RIGHT)
groupSizer.Add(lowerSizer1, -1, wxALIGN_CENTER)
self.gauge = wxGauge(panel, -1, range = 1000,
style = wxGA_HORIZONTAL, size = (-1,15))
groupSizer.Add(self.gauge, 0, wxEXPAND)
self.statustext = wxStaticText(panel, -1, 'ready',
style = wxALIGN_CENTER|wxST_NO_AUTORESIZE)
self.statustext.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxBOLD, False))
groupSizer.Add(self.statustext, -1, wxEXPAND)
self.choices = wxChoice(panel, -1, (-1, -1), (self.dropTargetWidth, -1),
choices = [])
self.choices.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, False))
EVT_CHOICE(self.choices, -1, self.set_thost)
groupSizer.Add(self.choices, 0, wxEXPAND)
cancellink = wxStaticText(panel, -1, 'cancel')
cancellink.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, True))
cancellink.SetForegroundColour('red')
EVT_LEFT_UP(cancellink,self.cancel)
groupSizer.Add(cancellink, -1, wxALIGN_CENTER)
dummyadvlink = wxStaticText(panel, -1, 'advanced')
dummyadvlink.SetFont(wxFont(7, wxDEFAULT, wxNORMAL, wxNORMAL, False))
dummyadvlink.SetForegroundColour('blue')
EVT_LEFT_UP(dirlink,self.selectdir)
groupSizer.Add(dummyadvlink, -1, wxALIGN_CENTER)
groupSizer1.Add(groupSizer)
leftSizer.Add(groupSizer1, -1, wxALIGN_CENTER)
leftSizer.Add(wxStaticText(panel, -1, 'make torrent of:'),0,wxALIGN_CENTER)
self.dirCtl = wxTextCtrl(panel, -1, '', size = (250,-1))
leftSizer.Add(self.dirCtl, 1, wxEXPAND)
b = wxBoxSizer(wxHORIZONTAL)
button = wxButton(panel, -1, 'dir')
EVT_BUTTON(frame, button.GetId(), self.selectdir)
b.Add(button, 0)
button2 = wxButton(panel, -1, 'file')
EVT_BUTTON(frame, button2.GetId(), self.selectfile)
b.Add(button2, 0)
leftSizer.Add(b, 0, wxALIGN_CENTER)
leftSizer.Add(wxStaticText(panel, -1, ''))
simple_link = wxStaticText(panel, -1, 'back to basic mode')
simple_link.SetFont(wxFont(-1, wxDEFAULT, wxNORMAL, wxNORMAL, True))
simple_link.SetForegroundColour('blue')
EVT_LEFT_UP(simple_link,self.calls['switchToBasic'])
leftSizer.Add(simple_link, -1, wxALIGN_CENTER)
colSizer.Add(leftSizer, -1, wxALIGN_CENTER_VERTICAL)
gridSizer = wxFlexGridSizer(cols = 2, vgap = 6, hgap = 8)
gridSizer.Add(wxStaticText(panel, -1, 'Torrent host:'), -1,
wxALIGN_RIGHT|wxALIGN_CENTER_VERTICAL)
self.choices1 = wxChoice(panel, -1, (-1, -1), (-1, -1),
choices = [])
EVT_CHOICE(self.choices1, -1, self.set_thost1)
gridSizer.Add(self.choices1, 0, wxEXPAND)
b = wxBoxSizer(wxHORIZONTAL)
button1 = wxButton(panel, -1, 'set default')
EVT_BUTTON(frame, button1.GetId(), self.set_default_thost)
b.Add(button1, 0)
b.Add(wxStaticText(panel, -1, ' '))
button2 = wxButton(panel, -1, 'delete')
EVT_BUTTON(frame, button2.GetId(), self.delete_thost)
b.Add(button2, 0)
b.Add(wxStaticText(panel, -1, ' '))
button3 = wxButton(panel, -1, 'save as...')
EVT_BUTTON(frame, button3.GetId(), self.save_thost)
b.Add(button3, 0)
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(b, 0, wxALIGN_CENTER)
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, 'single tracker url:'),0,
wxALIGN_RIGHT|wxALIGN_CENTER_VERTICAL)
self.annCtl = wxTextCtrl(panel, -1, 'http://my.tracker:6969/announce')
gridSizer.Add(self.annCtl, 0, wxEXPAND)
a = wxFlexGridSizer(cols = 1, vgap = 3)
a.Add(wxStaticText(panel, -1, 'tracker list:'),0,wxALIGN_RIGHT)
a.Add(wxStaticText(panel, -1, ''))
abutton = wxButton(panel, -1, 'copy\nannounces\nfrom\ntorrent', size = (70,70))
EVT_BUTTON(frame, abutton.GetId(), self.announcecopy)
a.Add(abutton, -1, wxALIGN_CENTER)
a.Add(wxStaticText(panel, -1, DROP_HERE), -1, wxALIGN_CENTER)
gridSizer.Add(a, -1, wxALIGN_RIGHT|wxALIGN_CENTER_VERTICAL)
self.annListCtl = wxTextCtrl(panel, -1, '\n\n\n\n\n', wxPoint(-1,-1), (300,120),
wxTE_MULTILINE|wxHSCROLL|wxTE_DONTWRAP)
gridSizer.Add(self.annListCtl, -1, wxEXPAND)
gridSizer.Add(wxStaticText(panel, -1, ''))
exptext = wxStaticText(panel, -1,
"a list of tracker urls separated by commas or whitespace\n" +
"and on several lines -trackers on the same line will be\n" +
"tried randomly, and all the trackers on one line\n" +
"will be tried before the trackers on the next line.")
exptext.SetFont(wxFont(6, wxDEFAULT, wxNORMAL, wxNORMAL, False))
gridSizer.Add(exptext, -1, wxALIGN_CENTER)
self.refresh_thostlist()
self._set_thost()
if platform == 'win32':
self.dropTargetPtr.DragAcceptFiles(True)
EVT_DROP_FILES(self.dropTargetPtr, self.selectdrop)
self.groupSizer1Box.DragAcceptFiles(True)
EVT_DROP_FILES(self.groupSizer1Box, self.selectdrop)
abutton.DragAcceptFiles(True)
EVT_DROP_FILES(abutton, self.announcedrop)
self.annCtl.DragAcceptFiles(True)
EVT_DROP_FILES(self.annCtl, self.announcedrop)
self.annListCtl.DragAcceptFiles(True)
EVT_DROP_FILES(self.annListCtl, self.announcedrop)
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, 'piece size:'),0,
wxALIGN_RIGHT|wxALIGN_CENTER_VERTICAL)
self.piece_length = wxChoice(panel, -1,
choices = ['automatic', '2MiB', '1MiB', '512KiB', '256KiB', '128KiB', '64KiB', '32KiB'])
self.piece_length_list = [0, 21, 20, 19, 18, 17, 16, 15]
self.piece_length.SetSelection(0)
gridSizer.Add(self.piece_length)
gridSizer.Add(wxStaticText(panel, -1, 'comment:'),0,
wxALIGN_RIGHT|wxALIGN_CENTER_VERTICAL)
self.commentCtl = wxTextCtrl(panel, -1, '')
gridSizer.Add(self.commentCtl, 0, wxEXPAND)
gridSizer.Add(wxStaticText(panel, -1, ''))
gridSizer.Add(wxStaticText(panel, -1, ''))
b1 = wxButton(panel, -1, 'Cancel', size = (-1, 30))
EVT_BUTTON(frame, b1.GetId(), self.cancel)
gridSizer.Add(b1, 0, wxEXPAND)
b2 = wxButton(panel, -1, 'MAKE TORRENT', size = (-1, 30))
EVT_BUTTON(frame, b2.GetId(), self.complete)
gridSizer.Add(b2, 0, wxEXPAND)
gridSizer.AddGrowableCol(1)
colSizer.Add(gridSizer, -1, wxALIGN_CENTER_VERTICAL)
fullSizer.Add(colSizer)
border = wxBoxSizer(wxHORIZONTAL)
border.Add(fullSizer, 1, wxEXPAND | wxALL, 15)
panel.SetSizer(border)
panel.SetAutoLayout(True)
border.Fit(panel)
frame.Fit()
frame.Show(True)
EVT_INVOKE(frame, self.onInvoke)
EVT_CLOSE(frame, self._close)
def setstayontop(self, x):
if self.stayontop_checkbox.GetValue():
self.windowStyle |= wxSTAY_ON_TOP
else:
self.windowStyle &= ~wxSTAY_ON_TOP
self.frame.SetWindowStyle(self.windowStyle)
self.config['stayontop'] = self.stayontop_checkbox.GetValue()
def selectdir(self, x = None):
self.calls['dropTargetHovered']()
dl = wxDirDialog(self.frame, style = wxDD_DEFAULT_STYLE | wxDD_NEW_DIR_BUTTON)
if dl.ShowModal() == wxID_OK:
self.dirCtl.SetValue(dl.GetPath())
self.calls['dropTargetDropped']()
else:
self.calls['dropTargetUnhovered']()
def selectfile(self, x = None):
self.calls['dropTargetHovered']()
dl = wxFileDialog (self.frame, 'Choose file to use', '', '', '', wxOPEN)
if dl.ShowModal() == wxID_OK:
self.dirCtl.SetValue(dl.GetPath())
self.calls['dropTargetDropped']()
else:
self.calls['dropTargetUnhovered']()
def selectdrop(self, dat):
self.calls['dropTargetDropped']()
for f in dat.GetFiles():
self.complete(f)
def announcecopy(self, x):
dl = wxFileDialog (self.frame, 'Choose .torrent file to use', '', '', '*.torrent', wxOPEN)
if dl.ShowModal() == wxID_OK:
self._announcecopy(dl.GetPath(), True)
def announcedrop(self, dat):
self._announcecopy(dat.GetFiles()[0], True)
def _announcecopy(self, f, external = False):
try:
h = open(f, 'rb')
metainfo = bdecode(h.read())
h.close()
self.annCtl.SetValue(metainfo['announce'])
if metainfo.has_key('announce-list'):
list = []
for tier in metainfo['announce-list']:
for tracker in tier:
list += [tracker, ', ']
del list[-1]
list += ['\n']
liststring = ''
for i in list:
liststring += i
self.annListCtl.SetValue(liststring+'\n\n')
else:
self.annListCtl.SetValue('')
if external:
self.choices.SetSelection(0)
self.choices1.SetSelection(0)
except:
return
def getannouncelist(self):
list = []
for t in self.annListCtl.GetValue().split('\n'):
tier = []
t = t.replace(',',' ')
for tr in t.split(' '):
if tr != '':
tier += [tr]
if len(tier)>0:
list.append(tier)
return list
def complete(self, x):
if not self.dirCtl.GetValue():
dlg = wxMessageDialog(self.frame, message = 'You must select a\nfile or directory',
caption = 'Error', style = wxOK | wxICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
if not self.annCtl.GetValue():
dlg = wxMessageDialog(self.frame, message = 'You must specify a\nsingle tracker url',
caption = 'Error', style = wxOK | wxICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
params = {'piece_size_pow2': self.piece_length_list[self.piece_length.GetSelection()]}
annlist = self.getannouncelist()
if len(annlist)>0:
warnings = ''
for tier in annlist:
if len(tier) > 1:
warnings += (
'WARNING: You should not specify multiple trackers\n' +
' on the same line of the tracker list unless\n' +
' you are certain they share peer data.\n')
break
if not self.annCtl.GetValue() in annlist[0]:
warnings += (
'WARNING: The single tracker url is not present in\n' +
' the first line of the tracker list. This\n' +
' may produce a dysfunctional torrent.\n')
if warnings:
warnings += ('Are you sure you wish to produce a .torrent\n' +
'with these parameters?')
dlg = wxMessageDialog(self.frame,
message = warnings,
caption = 'Warning', style = wxYES_NO | wxICON_QUESTION)
if dlg.ShowModal() != wxID_YES:
dlg.Destroy()
return
params['real_announce_list'] = annlist
comment = self.commentCtl.GetValue()
if comment != '':
params['comment'] = comment
self.statustext.SetLabel('working')
self.queue.append((self.dirCtl.GetValue(), self.annCtl.GetValue(), params))
self.go_queue()
def go_queue(self):
self.switchlock.acquire()
if self.queue and not self.working:
self.working = True
self.statustext.SetLabel('working')
q = self.queue.pop(0)
MakeMetafile(q[0], q[1], q[2], self)
self.switchlock.release()
def cancel(self, x):
self.switchlock.acquire()
if self.working:
self.working = False
self.cancelflag.set()
self.cancelflag = Event()
self.queue = []
self.statustext.SetLabel('CANCELED')
self.calls['dropTargetError']()
self.switchlock.release()
def selectDropTarget(self, x):
dl = wxFileDialog (self.frame, 'Choose image to use', join(basepath,'targets'),
join(join(basepath,'targets'), self.config['target']),
'Supported images (*.bmp,*.gif)|*.*', wxOPEN|wxHIDE_READONLY)
if dl.ShowModal() == wxID_OK:
try:
self.calls['changeDropTarget'](dl.GetPath())
self.config['target'] = dl.GetPath()
except:
pass
def dropTargetClick(self, x):
if x.GetPosition()[0] < int(self.dropTargetWidth*0.4):
self.selectdir()
elif x.GetPosition()[0] > int(self.dropTargetWidth*0.6):
self.selectfile()
def refresh_thostlist(self):
l = []
d = 0
for f in listdir(join(basepath,'thosts')):
if f[-6:].lower() == '.thost':
l.append(f)
if f == self.thostselection:
d = len(l)
self.choices.Clear()
self.choices.Append(' ')
self.choices1.Clear()
self.choices1.Append('---')
if not d:
if l:
self.thostselection = l[0]
d = 1
else:
self.thostselection = ''
d = 0
self.config['thost'] = self.thostselection
for f in l:
f1 = f[:-6]
self.choices.Append(f1)
if f == self.config['thost']:
f1 += ' (default)'
self.choices1.Append(f1)
self.thostselectnum = d
self.thostlist = l
self.choices.SetSelection(d)
self.choices1.SetSelection(d)
def set_thost(self, x):
n = self.choices.GetSelection()
if n != self.thostselectnum:
self.thostselectnum = n
self.choices1.SetSelection(n)
if n:
self.thostselection = self.thostlist[n-1]
self._set_thost()
def set_thost1(self, x):
n = self.choices1.GetSelection()
if n != self.thostselectnum:
self.thostselectnum = n
self.choices.SetSelection(n)
if n:
self.thostselection = self.thostlist[n-1]
self._set_thost()
def _set_thost(self):
self._announcecopy(join(join(basepath,'thosts'),self.thostselection))
self.calls['setCurrentTHost'](self.thostselection)
def set_default_thost(self, x):
if self.thostlist:
self.config['thost'] = self.thostselection
self.refresh_thostlist()
def save_thost(self, x):
if not self.annCtl.GetValue():
dlg = wxMessageDialog(self.frame, message = 'You must specify a\nsingle tracker url',
caption = 'Error', style = wxOK | wxICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
return
try:
metainfo = {}
metainfo['announce'] = self.annCtl.GetValue()
annlist = self.getannouncelist()
if len(annlist)>0:
warnings = ''
for tier in annlist:
if len(tier) > 1:
warnings += (
'WARNING: You should not specify multiple trackers\n' +
' on the same line of the tracker list unless\n' +
' you are certain they share peer data.\n')
break
if not self.annCtl.GetValue() in annlist[0]:
warnings += (
'WARNING: The single tracker url is not present in\n' +
' the first line of the tracker list. This\n' +
' may produce a dysfunctional torrent.\n')
if warnings:
warnings += ('Are you sure you wish to save a torrent host\n' +
'with these parameters?')
dlg = wxMessageDialog(self.frame,
message = warnings,
caption = 'Warning', style = wxYES_NO | wxICON_QUESTION)
if dlg.ShowModal() != wxID_YES:
dlg.Destroy()
return
metainfo['announce-list'] = annlist
metainfo = bencode(metainfo)
except:
return
if self.thostselectnum:
d = self.thostselection
else:
d = '.thost'
dl = wxFileDialog (self.frame, 'Save tracker data as',
join(basepath,'thosts'), d, '*.thost',
wxSAVE|wxOVERWRITE_PROMPT)
if dl.ShowModal() != wxID_OK:
return
d = dl.GetPath()
try:
f = open(d,'wb')
f.write(metainfo)
f.close()
garbage, self.thostselection = os.path.split(d)
except:
pass
self.refresh_thostlist()
def delete_thost(self, x):
dlg = wxMessageDialog(self.frame,
message = 'Are you sure you want to delete\n'+self.thostselection[:-6]+'?',
caption = 'Warning', style = wxYES_NO | wxICON_EXCLAMATION)
if dlg.ShowModal() != wxID_YES:
dlg.Destroy()
return
dlg.Destroy()
os.remove(join(join(basepath,'thosts'),self.thostselection))
self.thostselection = None
self.refresh_thostlist()
def onInvoke(self, event):
if not self.uiflag.isSet():
apply(event.func, event.args, event.kwargs)
def invokeLater(self, func, args = [], kwargs = {}):
if not self.uiflag.isSet():
wxPostEvent(self.frame, InvokeEvent(func, args, kwargs))
def build_setgauge(self, x):
self.invokeLater(self.on_setgauge, [x])
def on_setgauge(self, x):
self.gauge.SetValue(int(x*1000))
def build_done(self):
self.invokeLater(self.on_builddone)
def on_builddone(self):
self.gauge.SetValue(0)
self.statustext.SetLabel('done!')
self.calls['dropTargetSuccess']()
self.working = False
self.go_queue()
def build_failed(self, e):
self.invokeLater(self.on_buildfailed, [e])
def on_buildfailed(self, e):
self.gauge.SetValue(0)
self.statustext.SetLabel('ERROR')
self.calls['dropTargetError']()
self.working = False
self.go_queue()
def close(self):
self.cancelflag = None # this is a planned switch, don't cancel
self.uiflag.set()
self.frame.Close()
def _close(self, x = None):
self.uiflag.set()
try:
self.cancelflag.set()
except:
pass
self.calls['saveConfig']()
self.frame.Destroy()
class MakeMetafile:
def __init__(self, d, a, params, external = None):
self.d = d
self.a = a
self.params = params
self.call = external
# self.uiflag = external.uiflag
self.uiflag = external.cancelflag
Thread(target = self.complete).start()
def complete(self):
try:
make_meta_file(self.d, self.a, self.params, self.uiflag,
self.call.build_setgauge, progress_percent = 1)
if not self.uiflag.isSet():
self.call.build_done()
except (OSError, IOError), e:
self.failed(e)
except Exception, e:
print_exc()
self.failed(e)
def failed(self, e):
e = str(e)
self.call.build_failed(e)
dlg = wxMessageDialog(self.frame, message = 'Error - ' + e,
caption = 'Error', style = wxOK | wxICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
class T_make:
def __init__(self):
self.configobj = wxConfig('BitTorrent_T-make',style=wxCONFIG_USE_LOCAL_FILE)
self.getConfig()
self.currentTHost = self.config['thost']
# self.d = AdvancedDownloadInfo(self.config, self.getCalls())
self.d = BasicDownloadInfo(self.config, self.getCalls())
def getConfig(self):
config = {}
try:
config['stayontop'] = self.configobj.ReadInt('stayontop',True)
except:
config['stayontop'] = True
self.configobj.WriteInt('stayontop',True)
try:
config['target'] = self.configobj.Read('target','default.gif')
except:
config['target'] = 'default.gif'
self.configobj.Write('target','default.gif')
try:
config['thost'] = self.configobj.Read('thost','')
except:
config['thost'] = ''
self.configobj.Write('thost','')
self.configobj.Flush()
self.config = config
def saveConfig(self):
self.configobj.WriteInt('stayontop',self.config['stayontop'])
self.configobj.Write('target',self.config['target'])
self.configobj.Write('thost',self.config['thost'])
self.configobj.Flush()
def getCalls(self):
calls = {}
calls['saveConfig'] = self.saveConfig
calls['newDropTarget'] = self.newDropTarget
calls['setDropTargetRefresh'] = self.setDropTargetRefresh
calls['changeDropTarget'] = self.changeDropTarget
calls['setCurrentTHost'] = self.setCurrentTHost
calls['getCurrentTHost'] = self.getCurrentTHost
calls['dropTargetHovered'] = self.dropTargetHovered
calls['dropTargetUnhovered'] = self.dropTargetUnhovered
calls['dropTargetDropped'] = self.dropTargetDropped
calls['dropTargetSuccess'] = self.dropTargetSuccess
calls['dropTargetError'] = self.dropTargetError
calls['switchToBasic'] = self.switchToBasic
calls['switchToAdvanced'] = self.switchToAdvanced
return calls
def setCurrentTHost(self, x):
self.currentTHost = x
def getCurrentTHost(self):
return self.currentTHost
def newDropTarget(self, wh = None):
if wh:
self.dropTarget = wxEmptyBitmap(wh[0],wh[1])
try:
self.changeDropTarget(self.config['target'])
except:
pass
else:
try:
self.dropTarget = self._dropTargetRead(self.config['target'])
except:
try:
self.dropTarget = self._dropTargetRead('default.gif')
self.config['target'] = 'default.gif'
self.saveConfig()
except:
self.dropTarget = wxEmptyBitmap(100,100)
return self.dropTarget
def setDropTargetRefresh(self, refreshfunc):
self.dropTargetRefresh = refreshfunc
def changeDropTarget(self, new):
bmp = self._dropTargetRead(new)
w1,h1 = self.dropTarget.GetWidth(),self.dropTarget.GetHeight()
w,h = bmp.GetWidth(),bmp.GetHeight()
x1,y1 = int((w1-w)/2.0),int((h1-h)/2.0)
bbdata = wxMemoryDC()
bbdata.SelectObject(self.dropTarget)
bbdata.SetPen(wxTRANSPARENT_PEN)
bbdata.SetBrush(wxBrush(wx.wxSystemSettings_GetColour(wxSYS_COLOUR_MENU),wxSOLID))
bbdata.DrawRectangle(0,0,w1,h1)
bbdata.SetPen(wxBLACK_PEN)
bbdata.SetBrush(wxTRANSPARENT_BRUSH)
bbdata.DrawRectangle(x1-1,y1-1,w+2,h+2)
bbdata.DrawBitmap(bmp,x1,y1,True)
try:
self.dropTargetRefresh()
except:
pass
def _dropTargetRead(self, new):
a,b = os.path.split(new)
if a and a != join(basepath,'targets'):
if a != join(basepath,'targets'):
b1,b2 = os.path.splitext(b)
z = 0
while os.path.isfile(join(join(basepath,'targets'),b)):
z += 1
b = b1+'('+str(z)+')'+b2
shutil.copyfile(newname,join(join(basepath,'targets'),b))
new = b
name = join(join(basepath,'targets'),new)
garbage, e = os.path.splitext(new.lower())
if e == '.gif':
bmp = wxBitmap(name, wxBITMAP_TYPE_GIF)
elif e == '.bmp':
bmp = wxBitmap(name, wxBITMAP_TYPE_BMP)
else:
assert False
return bmp
def dropTargetHovered(self, x = None):
pass
def dropTargetUnhovered(self, x = None):
pass
def dropTargetDropped(self, x = None):
pass
def dropTargetSuccess(self, x = None):
pass
def dropTargetError(self, x = None):
pass
def switchToBasic(self, x = None):
self.d.close()
self.d = BasicDownloadInfo(self.config, self.getCalls())
def switchToAdvanced(self, x = None):
self.d.close()
self.d = AdvancedDownloadInfo(self.config, self.getCalls())
class btWxApp(wxApp):
def OnInit(self):
self.APP = T_make()
return True
if __name__ == '__main__':
btWxApp().MainLoop()
|
|
"""
kombu.entity
================
Exchange and Queue declarations.
"""
from __future__ import absolute_import
from .abstract import MaybeChannelBound
TRANSIENT_DELIVERY_MODE = 1
PERSISTENT_DELIVERY_MODE = 2
DELIVERY_MODES = {'transient': TRANSIENT_DELIVERY_MODE,
'persistent': PERSISTENT_DELIVERY_MODE}
__all__ = ['Exchange', 'Queue']
def pretty_bindings(bindings):
return '[%s]' % (', '.join(map(str, bindings)))
class Exchange(MaybeChannelBound):
"""An Exchange declaration.
:keyword name: See :attr:`name`.
:keyword type: See :attr:`type`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword delivery_mode: See :attr:`delivery_mode`.
:keyword arguments: See :attr:`arguments`.
.. attribute:: name
Name of the exchange. Default is no name (the default exchange).
.. attribute:: type
AMQP defines four default exchange types (routing algorithms) that
covers most of the common messaging use cases. An AMQP broker can
also define additional exchange types, so see your broker
manual for more information about available exchange types.
* `direct` (*default*)
Direct match between the routing key in the message, and the
routing criteria used when a queue is bound to this exchange.
* `topic`
Wildcard match between the routing key and the routing pattern
specified in the exchange/queue binding. The routing key is
treated as zero or more words delimited by `"."` and
supports special wildcard characters. `"*"` matches a
single word and `"#"` matches zero or more words.
* `fanout`
Queues are bound to this exchange with no arguments. Hence any
message sent to this exchange will be forwarded to all queues
bound to this exchange.
* `headers`
Queues are bound to this exchange with a table of arguments
containing headers and values (optional). A special argument
named "x-match" determines the matching algorithm, where
`"all"` implies an `AND` (all pairs must match) and
`"any"` implies `OR` (at least one pair must match).
:attr:`arguments` is used to specify the arguments.
This description of AMQP exchange types was shamelessly stolen
from the blog post `AMQP in 10 minutes: Part 4`_ by
Rajith Attapattu. This article is recommended reading.
.. _`AMQP in 10 minutes: Part 4`:
http://bit.ly/amqp-exchange-types
.. attribute:: channel
The channel the exchange is bound to (if bound).
.. attribute:: durable
Durable exchanges remain active when a server restarts. Non-durable
exchanges (transient exchanges) are purged when a server restarts.
Default is :const:`True`.
.. attribute:: auto_delete
If set, the exchange is deleted when all queues have finished
using it. Default is :const:`False`.
.. attribute:: delivery_mode
The default delivery mode used for messages. The value is an integer,
or alias string.
* 1 or `"transient"`
The message is transient. Which means it is stored in
memory only, and is lost if the server dies or restarts.
* 2 or "persistent" (*default*)
The message is persistent. Which means the message is
stored both in-memory, and on disk, and therefore
preserved if the server dies or restarts.
The default value is 2 (persistent).
.. attribute:: arguments
Additional arguments to specify when the exchange is declared.
"""
TRANSIENT_DELIVERY_MODE = TRANSIENT_DELIVERY_MODE
PERSISTENT_DELIVERY_MODE = PERSISTENT_DELIVERY_MODE
name = ''
type = 'direct'
durable = True
auto_delete = False
delivery_mode = PERSISTENT_DELIVERY_MODE
attrs = (
('name', None),
('type', None),
('arguments', None),
('durable', bool),
('auto_delete', bool),
('delivery_mode', lambda m: DELIVERY_MODES.get(m) or m),
)
def __init__(self, name='', type='', channel=None, **kwargs):
super(Exchange, self).__init__(**kwargs)
self.name = name or self.name
self.type = type or self.type
self.maybe_bind(channel)
def __hash__(self):
return hash('E|%s' % (self.name, ))
def declare(self, nowait=False, passive=False):
"""Declare the exchange.
Creates the exchange on the broker.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
if self.name:
return self.channel.exchange_declare(
exchange=self.name, type=self.type, durable=self.durable,
auto_delete=self.auto_delete, arguments=self.arguments,
nowait=nowait, passive=passive,
)
def bind_to(self, exchange='', routing_key='',
arguments=None, nowait=False, **kwargs):
"""Binds the exchange to another exchange.
:keyword nowait: If set the server will not respond, and the call
will not block waiting for a response. Default is :const:`False`.
"""
if isinstance(exchange, Exchange):
exchange = exchange.name
return self.channel.exchange_bind(destination=self.name,
source=exchange,
routing_key=routing_key,
nowait=nowait,
arguments=arguments)
def unbind_from(self, source='', routing_key='',
nowait=False, arguments=None):
"""Delete previously created exchange binding from the server."""
if isinstance(source, Exchange):
source = source.name
return self.channel.exchange_unbind(destination=self.name,
source=source,
routing_key=routing_key,
nowait=nowait,
arguments=arguments)
def Message(self, body, delivery_mode=None, priority=None,
content_type=None, content_encoding=None,
properties=None, headers=None):
"""Create message instance to be sent with :meth:`publish`.
:param body: Message body.
:keyword delivery_mode: Set custom delivery mode. Defaults
to :attr:`delivery_mode`.
:keyword priority: Message priority, 0 to 9. (currently not
supported by RabbitMQ).
:keyword content_type: The messages content_type. If content_type
is set, no serialization occurs as it is assumed this is either
a binary object, or you've done your own serialization.
Leave blank if using built-in serialization as our library
properly sets content_type.
:keyword content_encoding: The character set in which this object
is encoded. Use "binary" if sending in raw binary objects.
Leave blank if using built-in serialization as our library
properly sets content_encoding.
:keyword properties: Message properties.
:keyword headers: Message headers.
"""
properties = {} if properties is None else properties
dm = delivery_mode or self.delivery_mode
properties['delivery_mode'] = \
DELIVERY_MODES[dm] if (dm != 2 and dm != 1) else dm
return self.channel.prepare_message(body,
properties=properties,
priority=priority,
content_type=content_type,
content_encoding=content_encoding,
headers=headers)
def publish(self, message, routing_key=None, mandatory=False,
immediate=False, exchange=None):
"""Publish message.
:param message: :meth:`Message` instance to publish.
:param routing_key: Routing key.
:param mandatory: Currently not supported.
:param immediate: Currently not supported.
"""
exchange = exchange or self.name
return self.channel.basic_publish(message,
exchange=exchange,
routing_key=routing_key,
mandatory=mandatory,
immediate=immediate)
def delete(self, if_unused=False, nowait=False):
"""Delete the exchange declaration on server.
:keyword if_unused: Delete only if the exchange has no bindings.
Default is :const:`False`.
:keyword nowait: If set the server will not respond, and a
response will not be waited for. Default is :const:`False`.
"""
return self.channel.exchange_delete(exchange=self.name,
if_unused=if_unused,
nowait=nowait)
def __eq__(self, other):
if isinstance(other, Exchange):
return (self.name == other.name and
self.type == other.type and
self.arguments == other.arguments and
self.durable == other.durable and
self.auto_delete == other.auto_delete and
self.delivery_mode == other.delivery_mode)
return False
def __repr__(self):
return super(Exchange, self).__repr__(str(self))
def __str__(self):
return 'Exchange %s(%s)' % (self.name or repr(''), self.type)
@property
def can_cache_declaration(self):
return self.durable
class binding(object):
"""Represents a queue or exchange binding.
:keyword exchange: Exchange to bind to.
:keyword routing_key: Routing key used as binding key.
:keyword arguments: Arguments for bind operation.
:keyword unbind_arguments: Arguments for unbind operation.
"""
def __init__(self, exchange=None, routing_key='',
arguments=None, unbind_arguments=None):
self.exchange = exchange
self.routing_key = routing_key
self.arguments = arguments
self.unbind_arguments = unbind_arguments
def declare(self, channel, nowait=False):
"""Declare destination exchange."""
if self.exchange and self.exchange.name:
ex = self.exchange(channel)
ex.declare(nowait=nowait)
def bind(self, entity, nowait=False):
"""Bind entity to this binding."""
entity.bind_to(exchange=self.exchange,
routing_key=self.routing_key,
arguments=self.arguments,
nowait=nowait)
def unbind(self, entity, nowait=False):
"""Unbind entity from this binding."""
entity.unbind_from(self.exchange,
routing_key=self.routing_key,
arguments=self.unbind_arguments,
nowait=nowait)
def __repr__(self):
return '<binding: %s>' % (self, )
def __str__(self):
return '%s->%s' % (self.exchange.name, self.routing_key)
class Queue(MaybeChannelBound):
"""A Queue declaration.
:keyword name: See :attr:`name`.
:keyword exchange: See :attr:`exchange`.
:keyword routing_key: See :attr:`routing_key`.
:keyword channel: See :attr:`channel`.
:keyword durable: See :attr:`durable`.
:keyword exclusive: See :attr:`exclusive`.
:keyword auto_delete: See :attr:`auto_delete`.
:keyword queue_arguments: See :attr:`queue_arguments`.
:keyword binding_arguments: See :attr:`binding_arguments`.
:keyword on_declared: See :attr:`on_declared`
.. attribute:: name
Name of the queue. Default is no name (default queue destination).
.. attribute:: exchange
The :class:`Exchange` the queue binds to.
.. attribute:: routing_key
The routing key (if any), also called *binding key*.
The interpretation of the routing key depends on
the :attr:`Exchange.type`.
* direct exchange
Matches if the routing key property of the message and
the :attr:`routing_key` attribute are identical.
* fanout exchange
Always matches, even if the binding does not have a key.
* topic exchange
Matches the routing key property of the message by a primitive
pattern matching scheme. The message routing key then consists
of words separated by dots (`"."`, like domain names), and
two special characters are available; star (`"*"`) and hash
(`"#"`). The star matches any word, and the hash matches
zero or more words. For example `"*.stock.#"` matches the
routing keys `"usd.stock"` and `"eur.stock.db"` but not
`"stock.nasdaq"`.
.. attribute:: channel
The channel the Queue is bound to (if bound).
.. attribute:: durable
Durable queues remain active when a server restarts.
Non-durable queues (transient queues) are purged if/when
a server restarts.
Note that durable queues do not necessarily hold persistent
messages, although it does not make sense to send
persistent messages to a transient queue.
Default is :const:`True`.
.. attribute:: exclusive
Exclusive queues may only be consumed from by the
current connection. Setting the 'exclusive' flag
always implies 'auto-delete'.
Default is :const:`False`.
.. attribute:: auto_delete
If set, the queue is deleted when all consumers have
finished using it. Last consumer can be cancelled
either explicitly or because its channel is closed. If
there was no consumer ever on the queue, it won't be
deleted.
.. attribute:: queue_arguments
Additional arguments used when declaring the queue.
.. attribute:: binding_arguments
Additional arguments used when binding the queue.
.. attribute:: alias
Unused in Kombu, but applications can take advantage of this.
For example to give alternate names to queues with automatically
generated queue names.
.. attribute:: on_declared
Optional callback to be applied when the queue has been
declared (the ``queue_declare`` method returns).
This must be function with a signature that accepts at least 3
positional arguments: ``(name, messages, consumers)``.
"""
name = ''
exchange = Exchange('')
routing_key = ''
durable = True
exclusive = False
auto_delete = False
no_ack = False
attrs = (
('name', None),
('exchange', None),
('routing_key', None),
('queue_arguments', None),
('binding_arguments', None),
('durable', bool),
('exclusive', bool),
('auto_delete', bool),
('no_ack', None),
('alias', None),
('bindings', list),
)
def __init__(self, name='', exchange=None, routing_key='',
channel=None, bindings=None, on_declared=None,
**kwargs):
super(Queue, self).__init__(**kwargs)
self.name = name or self.name
self.exchange = exchange or self.exchange
self.routing_key = routing_key or self.routing_key
self.bindings = set(bindings or [])
self.on_declared = on_declared
# allows Queue('name', [binding(...), binding(...), ...])
if isinstance(exchange, (list, tuple, set)):
self.bindings |= set(exchange)
if self.bindings:
self.exchange = None
# exclusive implies auto-delete.
if self.exclusive:
self.auto_delete = True
self.maybe_bind(channel)
def bind(self, channel):
on_declared = self.on_declared
bound = super(Queue, self).bind(channel)
bound.on_declared = on_declared
return bound
def __hash__(self):
return hash('Q|%s' % (self.name, ))
def when_bound(self):
if self.exchange:
self.exchange = self.exchange(self.channel)
def declare(self, nowait=False):
"""Declares the queue, the exchange and binds the queue to
the exchange."""
# - declare main binding.
if self.exchange:
self.exchange.declare(nowait)
self.queue_declare(nowait, passive=False)
if self.exchange is not None:
self.queue_bind(nowait)
# - declare extra/multi-bindings.
for B in self.bindings:
B.declare(self.channel)
B.bind(self, nowait=nowait)
return self.name
def queue_declare(self, nowait=False, passive=False):
"""Declare queue on the server.
:keyword nowait: Do not wait for a reply.
:keyword passive: If set, the server will not create the queue.
The client can use this to check whether a queue exists
without modifying the server state.
"""
ret = self.channel.queue_declare(queue=self.name,
passive=passive,
durable=self.durable,
exclusive=self.exclusive,
auto_delete=self.auto_delete,
arguments=self.queue_arguments,
nowait=nowait)
if not self.name:
self.name = ret[0]
if self.on_declared:
self.on_declared(*ret)
return ret
def queue_bind(self, nowait=False):
"""Create the queue binding on the server."""
return self.bind_to(self.exchange, self.routing_key,
self.binding_arguments, nowait=nowait)
def bind_to(self, exchange='', routing_key='',
arguments=None, nowait=False):
if isinstance(exchange, Exchange):
exchange = exchange.name
return self.channel.queue_bind(queue=self.name,
exchange=exchange,
routing_key=routing_key,
arguments=arguments,
nowait=nowait)
def get(self, no_ack=None):
"""Poll the server for a new message.
Returns the message instance if a message was available,
or :const:`None` otherwise.
:keyword no_ack: If set messages received does not have to
be acknowledged.
This method provides direct access to the messages in a
queue using a synchronous dialogue, designed for
specific types of applications where synchronous functionality
is more important than performance.
"""
no_ack = self.no_ack if no_ack is None else no_ack
message = self.channel.basic_get(queue=self.name, no_ack=no_ack)
if message is not None:
m2p = getattr(self.channel, 'message_to_python', None)
if m2p:
message = m2p(message)
return message
def purge(self, nowait=False):
"""Remove all ready messages from the queue."""
return self.channel.queue_purge(queue=self.name,
nowait=nowait) or 0
def consume(self, consumer_tag='', callback=None,
no_ack=None, nowait=False):
"""Start a queue consumer.
Consumers last as long as the channel they were created on, or
until the client cancels them.
:keyword consumer_tag: Unique identifier for the consumer. The
consumer tag is local to a connection, so two clients
can use the same consumer tags. If this field is empty
the server will generate a unique tag.
:keyword no_ack: If set messages received does not have to
be acknowledged.
:keyword nowait: Do not wait for a reply.
:keyword callback: callback called for each delivered message
"""
if no_ack is None:
no_ack = self.no_ack
return self.channel.basic_consume(queue=self.name,
no_ack=no_ack,
consumer_tag=consumer_tag or '',
callback=callback,
nowait=nowait)
def cancel(self, consumer_tag):
"""Cancel a consumer by consumer tag."""
return self.channel.basic_cancel(consumer_tag)
def delete(self, if_unused=False, if_empty=False, nowait=False):
"""Delete the queue.
:keyword if_unused: If set, the server will only delete the queue
if it has no consumers. A channel error will be raised
if the queue has consumers.
:keyword if_empty: If set, the server will only delete the queue
if it is empty. If it is not empty a channel error will be raised.
:keyword nowait: Do not wait for a reply.
"""
return self.channel.queue_delete(queue=self.name,
if_unused=if_unused,
if_empty=if_empty,
nowait=nowait)
def queue_unbind(self, arguments=None, nowait=False):
return self.unbind_from(self.exchange, self.routing_key,
arguments, nowait)
def unbind_from(self, exchange='', routing_key='',
arguments=None, nowait=False):
"""Unbind queue by deleting the binding from the server."""
return self.channel.queue_unbind(queue=self.name,
exchange=exchange.name,
routing_key=routing_key,
arguments=arguments,
nowait=nowait)
def __eq__(self, other):
if isinstance(other, Queue):
return (self.name == other.name and
self.exchange == other.exchange and
self.routing_key == other.routing_key and
self.queue_arguments == other.queue_arguments and
self.binding_arguments == other.binding_arguments and
self.durable == other.durable and
self.exclusive == other.exclusive and
self.auto_delete == other.auto_delete)
return False
def __repr__(self):
s = super(Queue, self).__repr__
if self.bindings:
return s('Queue %r -> %s' % (
self.name,
pretty_bindings(self.bindings),
))
return s('Queue %r -> %s -> %r' % (
self.name,
self.exchange,
self.routing_key or '',
))
@property
def can_cache_declaration(self):
return self.durable
@classmethod
def from_dict(self, queue, **options):
binding_key = options.get('binding_key') or options.get('routing_key')
e_durable = options.get('exchange_durable')
if e_durable is None:
e_durable = options.get('durable')
e_auto_delete = options.get('exchange_auto_delete')
if e_auto_delete is None:
e_auto_delete = options.get('auto_delete')
q_durable = options.get('queue_durable')
if q_durable is None:
q_durable = options.get('durable')
q_auto_delete = options.get('queue_auto_delete')
if q_auto_delete is None:
q_auto_delete = options.get('auto_delete')
e_arguments = options.get('exchange_arguments')
q_arguments = options.get('queue_arguments')
b_arguments = options.get('binding_arguments')
bindings = options.get('bindings')
exchange = Exchange(options.get('exchange'),
type=options.get('exchange_type'),
delivery_mode=options.get('delivery_mode'),
routing_key=options.get('routing_key'),
durable=e_durable,
auto_delete=e_auto_delete,
arguments=e_arguments)
return Queue(queue,
exchange=exchange,
routing_key=binding_key,
durable=q_durable,
exclusive=options.get('exclusive'),
auto_delete=q_auto_delete,
no_ack=options.get('no_ack'),
queue_arguments=q_arguments,
binding_arguments=b_arguments,
bindings=bindings)
|
|
#!/usr/bin/env python
from numpy import *
from numpy import f2py # not part of import *
from scitools.StringFunction import StringFunction
import time, sys, os
# make sys.path so we can find Grid2D.py:
sys.path.insert(0, os.path.join(os.environ['scripting'],
'src','py','examples'))
from Grid2D import Grid2D
try:
import ext_gridloop
except ImportError:
print 'You must first build the ext_gridloop module'
sys.exit(1)
class Grid2Deff(Grid2D):
def ext_gridloop1(self, f):
"""Compute a[i,j] = f(xi,yj) in an external routine."""
# a is made here, sent to the routine, and then returned
a = zeros((self.xcoor.size, self.ycoor.size))
# C/C++ or Fortran module?
if ext_gridloop.__doc__ is not None:
if 'f2py' in ext_gridloop.__doc__:
# Fortran extension module
a = asarray(a, order='Fortran')
# could store a as self.a to avoid making Fortran
# arrays in repeated calls
ext_gridloop.gridloop1(a, self.xcoor, self.ycoor, f)
return a
def ext_gridloop2(self, f):
"""Compute a[i,j] = f(xi,yj) in an external routine."""
# a is made in the external routine
a = ext_gridloop.gridloop2(self.xcoor, self.ycoor, f)
return a
def ext_gridloop_exceptions(self, f):
"""Test error handling in the extension module."""
try: #1
ext_gridloop.gridloop1((1,2), self.xcoor, self.ycoor[1:], f)
except:
print sys.exc_type, sys.exc_value
try: #2
ext_gridloop.gridloop1(self.xcoor, self.xcoor, self.ycoor[1:], f)
except:
print sys.exc_type, sys.exc_value
try: #3
ext_gridloop.gridloop2(self.xcoor, self.ycoor, 'abc')
except:
print sys.exc_type, sys.exc_value
try: #4
ext_gridloop.gridloop2(array(self.xcoor,Complex64),
self.ycoor, 'abc')
except:
print sys.exc_type, sys.exc_value
try: #5
ext_gridloop.gridloop2(array([[0,0],[1,2]]), self.ycoor, 'abc')
except:
print sys.exc_type, sys.exc_value
# NOTE: the three next functions are only available in the
# Fortran 77 extension module:
def ext_gridloop_vec1(self, f):
"""As ext_gridloop2, but vectorized callback."""
a = zeros((self.xcoor.size, self.ycoor.size))
a = ext_gridloop.gridloop_vec1(a, self.xcoor, self.ycoor, f)
return a
def ext_gridloop_vec2(self, f):
"""As ext_gridloop_vec1, but callback to func. w/grid arg."""
a = zeros((self.xcoor.size, self.ycoor.size))
a = ext_gridloop.gridloop_vec2(a, f, func1_extra_args=(self,))
return a
def myfuncf3(self, a):
a[:,:] = myfunc(self.xcoorv, self.ycoorv) # in-place mod.
def ext_gridloop_vec3(self, f):
"""As ext_gridloop_vec2, but callback to class method."""
a = zeros((self.xcoor.size, self.ycoor.size))
a = ext_gridloop.gridloop_vec2(a, f)
return a
def ext_gridloop2_str(self, f77_name):
"""
Call an interface to ext_gridloop.gridloop2, which avoids
callbacks to Python and calls the f77_name F77 function
instead.
"""
a = ext_gridloop.gridloop2_str(self.xcoor, self.ycoor,
f77_name)
return a
def ext_gridloop_noalloc(self, f77_name, a):
"""
As ext_gridloop2_str, but a is intent(in,out), i.e., there is
no allocation of a in the wrapper code. If the function
is called a large number of times (as in our efficiency
tests), intent(in,out) increases the performance.
"""
a = ext_gridloop.gridloop_noalloc(a, self.xcoor, self.ycoor,
f77_name)
return a
def ext_gridloop2_fcb(self):
"""As ext_gridloop2, but compiled Fortran callback func."""
import callback
a = callback.gridloop2_fcb(self.xcoor, self.ycoor)
return a
def ext_gridloop2_fcb_compile(self, fstr):
if not isinstance(fstr, str):
raise TypeError, \
'fstr must be string expression, not %s', type(fstr)
# generate Fortran source
source = """
real*8 function fcb(x, y)
real*8 x, y
fcb = %s
return
end
subroutine gridloop2_fcb(a, xcoor, ycoor, nx, ny)
integer nx, ny
real*8 a(nx,ny), xcoor(nx), ycoor(ny)
Cf2py intent(out) a
Cf2py intent(in) xcoor
Cf2py intent(in) ycoor
Cf2py depend(nx,ny) a
real*8 fcb
external fcb
call gridloop2(a, xcoor, ycoor, nx, ny, fcb)
return
end
""" % fstr
# compile callback code and link with ext_gridloop.so:
f2py_args = "--fcompiler=Gnu --build-dir=tmp2"\
" -DF2PY_REPORT_ON_ARRAY_COPY=1 "\
" ./ext_gridloop.so"
r = f2py.compile(source, modulename='callback',
extra_args=f2py_args, verbose=True,
source_fn='_cb.f')
if r:
print 'unsuccessful compilation'; sys.exit(1)
import callback # see if we can import successfully
def ext_gridloop2_fcb_ptr(self):
"""As ext_gridloop2, but compiled Fortran callback func."""
from callback import fcb
a = ext_gridloop.gridloop2(self.xcoor, self.ycoor,
fcb._cpointer)
return a
def ext_gridloop2_fcb_ptr_compile(self, fstr):
if not isinstance(fstr, StringFunction):
raise TypeError, \
'fstr must be StringFunction, not %s', type(fstr)
source = fstr.F77_code('fcb')
f2py_args = "--fcompiler=Gnu --build-dir=tmp2"
r = f2py.compile(source, modulename='callback',
extra_args=f2py_args, verbose=True,
source_fn='_cb.f')
if r:
print 'unsuccessful compilation'; sys.exit(1)
import callback # see if we can import successfully
def ext_gridloop2_compile(self, fstr):
if not isinstance(fstr, str):
raise TypeError, \
'fstr must be string expression, not', type(fstr)
# generate Fortran source for gridloop2:
source = """
subroutine gridloop2(a, xcoor, ycoor, nx, ny)
integer nx, ny
real*8 a(nx,ny), xcoor(nx), ycoor(ny)
Cf2py intent(out) a
Cf2py depend(nx,ny) a
integer i,j
real*8 x, y
do j = 1,ny
y = ycoor(j)
do i = 1,nx
x = xcoor(i)
a(i,j) = %s
end do
end do
return
end
""" % fstr
f2py_args = "--fcompiler=Gnu --build-dir tmp1"\
" -DF2PY_REPORT_ON_ARRAY_COPY=1"
r = f2py.compile(source, modulename='ext_gridloop2',
extra_args=f2py_args, verbose=True,
source_fn='_cb.f')
if r:
print 'unsuccessful compilation'; sys.exit(1)
import ext_gridloop2 # see if we can import successfully
def ext_gridloop2_v2(self):
"""
As ext_gridloop2, but the Fortran gridloop2 function was
generated and compiled in Python (in ext_gridloop2_compile).
"""
import ext_gridloop2
return ext_gridloop2.gridloop2(self.xcoor, self.ycoor)
def ext_gridloop2_weave(self, fstr):
"""Migrate loop to C++ with aid of Weave."""
try:
from scipy import weave
except ImportError:
print 'Could not import weave.\nContinue...'
return
if not isinstance(fstr, str):
raise TypeError, \
'fstr must be string expression, not', type(fstr)
# the callback function is now coded in C++
# (fstr must be valid C++ code):
extra_code = r"""
double cppcb(double x, double y) {
return %s;
}
""" % fstr
# the loop in C++ (with Blitz++ array syntax):
code = r"""
int i,j;
for (i=0; i<nx; i++) {
for (j=0; j<ny; j++) {
a(i,j) = cppcb(xcoor(i), ycoor(j));
}
}
"""
nx = self.nx; ny = self.ny
a = zeros((nx, ny))
xcoor = self.xcoor; ycoor = self.ycoor
err = weave.inline(code, ['a', 'nx', 'ny', 'xcoor', 'ycoor'],
type_converters=weave.converters.blitz,
support_code=extra_code, compiler='gcc')
# a is filled
return a
def ext_gridloop1_instant(self, fstr):
if not isinstance(fstr, str):
raise TypeError, \
'fstr must be string expression, not', type(fstr)
# generate C source for gridloop1:
# (no call to C function f(x,y), fstr is inserted in the loop)
source = """
void gridloop1(double *a, int nx, int ny,
double *xcoor, double *ycoor)
{
# define index(a, i, j) a[i*ny + j]
int i, j; double x, y;
for (i=0; i<nx; i++) {
for (j=0; j<ny; j++) {
x = xcoor[i]; y = ycoor[i];
index(a, i, j) = %s
}
}
}
""" % fstr
try:
from instant import inline_with_numpy
a = zeros((self.nx, self.ny))
arrays = [['nx', 'ny', 'a'],
['nx', 'xcoor'],
['ny', 'ycoor']]
self.gridloop1_instant = \
inline_with_numpy(source, arrays=arrays)
except:
self.gridloop1_instant = None
def dump(self, a):
"""Nice printout of a 2D array a."""
for i in xrange(a.shape[0]):
for j in xrange(a.shape[1]):
print 'value at (%g,%g) \t = a[%d,%d] = %g' % \
(self.xcoor[i], self.ycoor[j], i, j, a[i,j])
def gridloop_psyco_init(self, method):
"""Try to accelerate Grid2D.gridloop with psyco."""
# define method self.gridloop_psyco:
try:
import psyco
self.gridloop_psyco = psyco.proxy(method)
except ImportError:
self.gridloop_psyco = method
def f1(x,y):
print 'x+2*y =',x+2*y
return x+2*y
def verify1():
"""Basic test of the extension module."""
g = Grid2Deff(dx=0.5, dy=1)
f_exact = g(f1) # NumPy computation
expression1 = StringFunction('x + 2*y',
independent_variables=('x','y'),
globals=globals())
f = g.ext_gridloop1(f1)
print 'f computed by external gridloop1 function and f1:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
f = g.ext_gridloop2(f1)
print 'f computed by external gridloop2 function and f1:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
f = g.ext_gridloop1(expression1)
print 'f computed by external gridloop1 function and StringFunction:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
f = g.ext_gridloop2(expression1)
print 'f computed by external gridloop2 function and StringFunction:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
fast_func = expression1.__call__
f = g.ext_gridloop2(fast_func)
print 'f computed by external gridloop2 function and StringFunction.__call__:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
f = g(expression1)
print 'f computed by __call__ and StringFunction:\n', f
if allclose(f, f_exact, atol=1.0E-10, rtol=1.0E-12):
print 'f is correct'
# check printing:
print 'array seen from Python:'
g.dump(f)
if 'dump' in dir(ext_gridloop):
print 'array seen from Fortran (transposed, but right values):'
ext_gridloop.dump(f, g.xcoor, g.ycoor)
def myfunc(x, y):
return sin(x*y) + 8*x
def myfuncf1(a, xcoor, ycoor, nx, ny):
"""Vectorized function to be called from extension module."""
#print 'myfuncf1; type of args:',type(a),type(xcoor),type(nx)
x = xcoor[:,newaxis]
y = ycoor[newaxis,:]
a[:,:] = myfunc(x, y) # in-place modification of a
print 'myfuncf1, a=',a
def myfuncf2(a, g):
"""Vectorized function to be called from extension module."""
#print 'myfuncf2; type of args:',type(a),type(g)
a[:,:] = myfunc(g.xcoorv, g.ycoorv) # in-place modification of a
def verify2(n=3):
"""
Test of some methods in class Grid2Deff that call up
some F77 routines for improving the efficiency of callbacks
to Python.
"""
if not 'gridloop_vec2' in dir(ext_gridloop):
raise ImportError, 'verify2 works only for F77 module'
dx = 1.0/n
g = Grid2Deff(dx=dx, dy=dx)
from StringIO import StringIO
from scitools.numpyutils import arr
a_exact = arr(file_=StringIO("""
0. 0. 0. 0.
2.66666667 2.7775493 2.88706441 2.99386136
5.33333333 5.55373108 5.7632897 5.95170314
8. 8.3271947 8.6183698 8.84147098"""))
def _check():
if not allclose(a, a_exact):
print 'ERROR, a is wrong, correct a reads\n', a_exact
else:
print 'correct array'
a = g.ext_gridloop_vec1(myfuncf1)
print "g.ext_gridloop_vec1(myfuncf1): a=\n",a
_check()
a = g.ext_gridloop_vec2(myfuncf2)
print "g.ext_gridloop_vec2(myfuncf2): a=\n",a
_check()
# need f2py version > 2.42 (callback to class method):
a = g.ext_gridloop_vec3(g.myfuncf3)
print "g.ext_gridloop_vec3(g.myfuncf3): a=\n",a
_check()
a = g.ext_gridloop2_str('myfunc')
print "g.ext_gridloop_str('myfunc'): a=\n",a
_check()
a = g.ext_gridloop_noalloc('myfunc', a)
print "g.ext_gridloop_str_noalloc('myfunc'): a=\n",a
_check()
fstr = 'sin(x*y) + 8*x'
g.ext_gridloop2_fcb_compile(fstr)
a = g.ext_gridloop2_fcb()
print "g.gridloop2_fcb: a=\n",a
_check()
import callback
print 'contents of callback module:', dir(callback)
fstr = StringFunction('sin(x*y) + 8*x')
g.ext_gridloop2_fcb_ptr_compile(fstr)
a = g.ext_gridloop2_fcb_ptr()
print "g.gridloop2_fcb_ptr: a=\n",a
_check()
import callback
print 'fcb callback module:', dir(callback), dir(callback.fcb)
g.ext_gridloop2_compile(fstr)
a = g.ext_gridloop2_v2()
print "g.gridloop2_v2: a=\n",a
_check()
a = g.ext_gridloop2_weave(fstr)
print "g.gridloop2_weave: a=\n",a
_check()
g.gridloop_psyco_init(g.gridloop)
a = g.gridloop_psyco(fstr)
print "g.gridloop_psyco(str): a=\n",a
_check()
a = g.gridloop_psyco(myfunc)
print "g.gridloop_psyco(func): a=\n",a
_check()
g.ext_gridloop1_instant(fstr)
g.gridloop1_instant(a, g.nx, g.ny, g.xcoor, g.ycoor)
print "g.gridloop1_instant: a=\n", a
def timing2(n=2000, best_time=1.0):
"""Time different implementations of the extension module."""
print 'Grid2Deff.timing2: reference CPU time = %g' % best_time
dx = 1.0/n
g = Grid2Deff(dx=dx, dy=dx)
# here we use straight NumPy sin in a scalar context:
def myfunc1(x, y):
return sin(x*y) + 8*x
def myfunc2(x, y):
return math.sin(x*y) + 8*x
expression1 = StringFunction('sin(x*y) + 8*x',
independent_variables=('x','y'),
globals=globals())
expression1_f = expression1.__call__ # for efficiency and F77 callback
expression2 = StringFunction('math.sin(x*y) + 8*x',
independent_variables=('x','y'),
globals=globals())
expression2_f = expression2.__call__ # for efficiency and F77 callback
from scitools.misc import timer
from scitools.EfficiencyTable import EfficiencyTable
e = EfficiencyTable('Grid2Deff tests, %dx%d grid' % (n,n), best_time)
t0a = timer(g.gridloop, (myfunc1,), repetitions=1)
e.add('g.gridloop, myfunc1', t0a)
t0b = timer(g.gridloop, (myfunc2,), repetitions=1)
e.add('g.gridloop, myfunc2', t0b)
t0c = timer(g.__call__, (myfunc1,), repetitions=1)
e.add('g.__call__, myfunc1', t0c)
t0d = timer(g.__call__, (expression1_f,), repetitions=1)
e.add('g.__call__, expression1_f', t0d)
t0e = timer(g.gridloop_itemset, (myfunc2,), repetitions=1)
e.add('g.gridloop_itemset, myfunc2', t0e)
t1a = timer(g.ext_gridloop1, (myfunc1,), repetitions=1)
e.add('g.ext_gridloop1, myfunc1', t1a)
t1b = timer(g.ext_gridloop1, (myfunc2,), repetitions=1)
e.add('g.ext_gridloop1, myfunc2', t1b)
t2a = timer(g.ext_gridloop2, (myfunc1,), repetitions=1)
e.add('g.ext_gridloop2, myfunc1', t2a)
t2b = timer(g.ext_gridloop2, (myfunc2,), repetitions=1)
e.add('g.ext_gridloop2, myfunc2', t2b)
t3a = timer(g.ext_gridloop2, (expression1_f,), repetitions=1)
e.add('g.ext_gridloop2, expression1_f', t3a)
t3b = timer(g.ext_gridloop2, (expression2_f,), repetitions=1)
e.add('g.ext_gridloop2, expression2_f', t3b)
nrep = 20
# try the improved functions (works only for the F77 module):
if 'gridloop_vec2' in dir(ext_gridloop):
t4 = timer(g.ext_gridloop_vec2, (myfuncf2,), repetitions=nrep)
e.add('g.ext_gridloop_vec2, myfuncf2', t4)
if 'gridloop2_str' in dir(ext_gridloop):
t5 = timer(g.ext_gridloop2_str, ('myfunc',), repetitions=nrep)
e.add('g.ext_gridloop2_str, myfunc', t5)
# try the version without allocation (first, make an a array):
a = g.ext_gridloop2(myfunc1) # a has now Fortran storage
t5b = timer(g.ext_gridloop_noalloc,
('myfunc', a), repetitions=nrep)
e.add('g.ext_gridloop_noalloc, myfunc', t5b)
# try 'inline' F77 compiled callback too:
# (give F77 source for core of callback function as argument)
g.ext_gridloop2_fcb_compile(str(expression1))
t6 = timer(g.ext_gridloop2_fcb, (), repetitions=nrep)
e.add('g.ext_gridloop2_fcb(%s)' % repr(str(expression1)), t6)
g.ext_gridloop2_fcb_ptr_compile(expression1)
t6b = timer(g.ext_gridloop2_fcb_ptr, (), repetitions=nrep)
e.add('g.ext_gridloop2_fcb_ptr(%s)' % repr(expression1), t6b)
g.ext_gridloop2_compile(str(expression1))
t7 = timer(g.ext_gridloop2_v2, (), repetitions=nrep)
e.add('g.ext_gridloop2_v2(%s)' % repr(str(expression1)), t7)
# weave version:
t8 = timer(g.ext_gridloop2_weave, (str(expression1),), repetitions=nrep)
e.add('g.ext_gridloop2_weave(%s)' % repr(str(expression1)), t8)
# psyco:
g.gridloop_psyco_init(g.gridloop)
if g.gridloop_psyco != g.gridloop: # has psyco
t9a = timer(g.gridloop_psyco, (myfunc2,), repetitions=1)
e.add('g.gridloop_psyco, myfunc2', t9a)
t9b = timer(g.gridloop_psyco, (expression2_f,), repetitions=1)
e.add('g.gridloop_psyco, expression2_f', t9b)
g.gridloop_psyco_init(g.gridloop_itemset)
if g.gridloop_psyco != g.gridloop_itemset: # has psyco
t9a = timer(g.gridloop_psyco, (myfunc2,), repetitions=1)
e.add('g.gridloop_psyco (itemset), myfunc2', t9a)
t9b = timer(g.gridloop_psyco, (expression2_f,), repetitions=1)
e.add('g.gridloop_psyco (itemset), expression2_f', t9b)
# instant:
g.ext_gridloop1_instant(str(expression1))
if g.gridloop1_instant is not None:
a = zeros((self.nx, self.ny))
t10 = timer(g.gridloop1_instant,
(a, self.nx, g.ny, g.xcoor, g.ycoor),
repetitions=nrep)
e.add('g.gridloop1_instant', t10)
print '\n\n\n\nrun from directory', os.getcwd()
print e
#print 'Experiments in table:', e.experiments
def exceptions1():
"""Test exceptions raised by the extension module."""
g = Grid2Deff(dx=0.5, dy=1)
def myfunc(x, y):
return sin(x*y) + 8*x
g.ext_gridloop_exceptions(myfunc)
def run():
# provide function to call (verify1, timing2, exceptions1, etc.)
# as command-line argument
try:
func = sys.argv[1]
except:
# basic test if no command-line argument
func = 'verify1'
if func == 'timing2':
# in case of timing, specify grid size as 2nd argument:
try:
n = int(sys.argv[2])
except:
n = 1100
# specify reference executing time as 3rd argument:
try:
best_time = float(sys.argv[3])
except:
best_time = 1.0
exec 'timing2(%d, %g)' % (n, best_time)
else:
exec func + '()'
if __name__ == '__main__':
# lots of experiments:
# Grid2Deff.py timing2 1100 0.13
# 1100 is grid size, 0.13 is reference time
run()
|
|
"""
Renderer Module
This module defines the PlotlyRenderer class and a single function,
fig_to_plotly, which is intended to be the main way that user's will interact
with the matplotlylib package.
"""
from __future__ import absolute_import
import six
import warnings
import plotly.graph_objs as go
from plotly.matplotlylib.mplexporter import Renderer
from plotly.matplotlylib import mpltools
# Warning format
def warning_on_one_line(msg, category, filename, lineno, file=None, line=None):
return "%s:%s: %s:\n\n%s\n\n" % (filename, lineno, category.__name__, msg)
warnings.formatwarning = warning_on_one_line
class PlotlyRenderer(Renderer):
"""A renderer class inheriting from base for rendering mpl plots in plotly.
A renderer class to be used with an exporter for rendering matplotlib
plots in Plotly. This module defines the PlotlyRenderer class which handles
the creation of the JSON structures that get sent to plotly.
All class attributes available are defined in __init__().
Basic Usage:
# (mpl code) #
fig = gcf()
renderer = PlotlyRenderer(fig)
exporter = Exporter(renderer)
exporter.run(fig) # ... et voila
"""
def __init__(self):
"""Initialize PlotlyRenderer obj.
PlotlyRenderer obj is called on by an Exporter object to draw
matplotlib objects like figures, axes, text, etc.
All class attributes are listed here in the __init__ method.
"""
self.plotly_fig = go.Figure()
self.mpl_fig = None
self.current_mpl_ax = None
self.bar_containers = None
self.current_bars = []
self.axis_ct = 0
self.x_is_mpl_date = False
self.mpl_x_bounds = (0, 1)
self.mpl_y_bounds = (0, 1)
self.msg = "Initialized PlotlyRenderer\n"
def open_figure(self, fig, props):
"""Creates a new figure by beginning to fill out layout dict.
The 'autosize' key is set to false so that the figure will mirror
sizes set by mpl. The 'hovermode' key controls what shows up when you
mouse around a figure in plotly, it's set to show the 'closest' point.
Positional agurments:
fig -- a matplotlib.figure.Figure object.
props.keys(): [
'figwidth',
'figheight',
'dpi'
]
"""
self.msg += "Opening figure\n"
self.mpl_fig = fig
self.plotly_fig["layout"] = go.Layout(
width=int(props["figwidth"] * props["dpi"]),
height=int(props["figheight"] * props["dpi"]),
autosize=False,
hovermode="closest",
)
self.mpl_x_bounds, self.mpl_y_bounds = mpltools.get_axes_bounds(fig)
margin = go.layout.Margin(
l=int(self.mpl_x_bounds[0] * self.plotly_fig["layout"]["width"]),
r=int((1 - self.mpl_x_bounds[1]) * self.plotly_fig["layout"]["width"]),
t=int((1 - self.mpl_y_bounds[1]) * self.plotly_fig["layout"]["height"]),
b=int(self.mpl_y_bounds[0] * self.plotly_fig["layout"]["height"]),
pad=0,
)
self.plotly_fig["layout"]["margin"] = margin
def close_figure(self, fig):
"""Closes figure by cleaning up data and layout dictionaries.
The PlotlyRenderer's job is to create an appropriate set of data and
layout dictionaries. When the figure is closed, some cleanup and
repair is necessary. This method removes inappropriate dictionary
entries, freeing up Plotly to use defaults and best judgements to
complete the entries. This method is called by an Exporter object.
Positional arguments:
fig -- a matplotlib.figure.Figure object.
"""
self.plotly_fig["layout"]["showlegend"] = False
self.msg += "Closing figure\n"
def open_axes(self, ax, props):
"""Setup a new axes object (subplot in plotly).
Plotly stores information about subplots in different 'xaxis' and
'yaxis' objects which are numbered. These are just dictionaries
included in the layout dictionary. This function takes information
from the Exporter, fills in appropriate dictionary entries,
and updates the layout dictionary. PlotlyRenderer keeps track of the
number of plots by incrementing the axis_ct attribute.
Setting the proper plot domain in plotly is a bit tricky. Refer to
the documentation for mpltools.convert_x_domain and
mpltools.convert_y_domain.
Positional arguments:
ax -- an mpl axes object. This will become a subplot in plotly.
props.keys() -- [
'axesbg', (background color for axes obj)
'axesbgalpha', (alpha, or opacity for background)
'bounds', ((x0, y0, width, height) for axes)
'dynamic', (zoom/pan-able?)
'axes', (list: [xaxis, yaxis])
'xscale', (log, linear, or date)
'yscale',
'xlim', (range limits for x)
'ylim',
'xdomain' (xdomain=xlim, unless it's a date)
'ydomain'
]
"""
self.msg += " Opening axes\n"
self.current_mpl_ax = ax
self.bar_containers = [
c
for c in ax.containers # empty is OK
if c.__class__.__name__ == "BarContainer"
]
self.current_bars = []
self.axis_ct += 1
# set defaults in axes
xaxis = go.layout.XAxis(
anchor="y{0}".format(self.axis_ct), zeroline=False, ticks="inside"
)
yaxis = go.layout.YAxis(
anchor="x{0}".format(self.axis_ct), zeroline=False, ticks="inside"
)
# update defaults with things set in mpl
mpl_xaxis, mpl_yaxis = mpltools.prep_xy_axis(
ax=ax, props=props, x_bounds=self.mpl_x_bounds, y_bounds=self.mpl_y_bounds
)
xaxis.update(mpl_xaxis)
yaxis.update(mpl_yaxis)
bottom_spine = mpltools.get_spine_visible(ax, "bottom")
top_spine = mpltools.get_spine_visible(ax, "top")
left_spine = mpltools.get_spine_visible(ax, "left")
right_spine = mpltools.get_spine_visible(ax, "right")
xaxis["mirror"] = mpltools.get_axis_mirror(bottom_spine, top_spine)
yaxis["mirror"] = mpltools.get_axis_mirror(left_spine, right_spine)
xaxis["showline"] = bottom_spine
yaxis["showline"] = top_spine
# put axes in our figure
self.plotly_fig["layout"]["xaxis{0}".format(self.axis_ct)] = xaxis
self.plotly_fig["layout"]["yaxis{0}".format(self.axis_ct)] = yaxis
# let all subsequent dates be handled properly if required
if "type" in dir(xaxis) and xaxis["type"] == "date":
self.x_is_mpl_date = True
def close_axes(self, ax):
"""Close the axes object and clean up.
Bars from bar charts are given to PlotlyRenderer one-by-one,
thus they need to be taken care of at the close of each axes object.
The self.current_bars variable should be empty unless a bar
chart has been created.
Positional arguments:
ax -- an mpl axes object, not required at this time.
"""
self.draw_bars(self.current_bars)
self.msg += " Closing axes\n"
self.x_is_mpl_date = False
def draw_bars(self, bars):
# sort bars according to bar containers
mpl_traces = []
for container in self.bar_containers:
mpl_traces.append(
[
bar_props
for bar_props in self.current_bars
if bar_props["mplobj"] in container
]
)
for trace in mpl_traces:
self.draw_bar(trace)
def draw_bar(self, coll):
"""Draw a collection of similar patches as a bar chart.
After bars are sorted, an appropriate data dictionary must be created
to tell plotly about this data. Just like draw_line or draw_markers,
draw_bar translates patch/path information into something plotly
understands.
Positional arguments:
patch_coll -- a collection of patches to be drawn as a bar chart.
"""
tol = 1e-10
trace = [mpltools.make_bar(**bar_props) for bar_props in coll]
widths = [bar_props["x1"] - bar_props["x0"] for bar_props in trace]
heights = [bar_props["y1"] - bar_props["y0"] for bar_props in trace]
vertical = abs(sum(widths[0] - widths[iii] for iii in range(len(widths)))) < tol
horizontal = (
abs(sum(heights[0] - heights[iii] for iii in range(len(heights)))) < tol
)
if vertical and horizontal:
# Check for monotonic x. Can't both be true!
x_zeros = [bar_props["x0"] for bar_props in trace]
if all(
(x_zeros[iii + 1] > x_zeros[iii] for iii in range(len(x_zeros[:-1])))
):
orientation = "v"
else:
orientation = "h"
elif vertical:
orientation = "v"
else:
orientation = "h"
if orientation == "v":
self.msg += " Attempting to draw a vertical bar chart\n"
old_heights = [bar_props["y1"] for bar_props in trace]
for bar in trace:
bar["y0"], bar["y1"] = 0, bar["y1"] - bar["y0"]
new_heights = [bar_props["y1"] for bar_props in trace]
# check if we're stacked or not...
for old, new in zip(old_heights, new_heights):
if abs(old - new) > tol:
self.plotly_fig["layout"]["barmode"] = "stack"
self.plotly_fig["layout"]["hovermode"] = "x"
x = [bar["x0"] + (bar["x1"] - bar["x0"]) / 2 for bar in trace]
y = [bar["y1"] for bar in trace]
bar_gap = mpltools.get_bar_gap(
[bar["x0"] for bar in trace], [bar["x1"] for bar in trace]
)
if self.x_is_mpl_date:
x = [bar["x0"] for bar in trace]
formatter = (
self.current_mpl_ax.get_xaxis()
.get_major_formatter()
.__class__.__name__
)
x = mpltools.mpl_dates_to_datestrings(x, formatter)
else:
self.msg += " Attempting to draw a horizontal bar chart\n"
old_rights = [bar_props["x1"] for bar_props in trace]
for bar in trace:
bar["x0"], bar["x1"] = 0, bar["x1"] - bar["x0"]
new_rights = [bar_props["x1"] for bar_props in trace]
# check if we're stacked or not...
for old, new in zip(old_rights, new_rights):
if abs(old - new) > tol:
self.plotly_fig["layout"]["barmode"] = "stack"
self.plotly_fig["layout"]["hovermode"] = "y"
x = [bar["x1"] for bar in trace]
y = [bar["y0"] + (bar["y1"] - bar["y0"]) / 2 for bar in trace]
bar_gap = mpltools.get_bar_gap(
[bar["y0"] for bar in trace], [bar["y1"] for bar in trace]
)
bar = go.Bar(
orientation=orientation,
x=x,
y=y,
xaxis="x{0}".format(self.axis_ct),
yaxis="y{0}".format(self.axis_ct),
opacity=trace[0]["alpha"], # TODO: get all alphas if array?
marker=go.bar.Marker(
color=trace[0]["facecolor"], # TODO: get all
line=dict(width=trace[0]["edgewidth"]),
),
) # TODO ditto
if len(bar["x"]) > 1:
self.msg += " Heck yeah, I drew that bar chart\n"
self.plotly_fig.add_trace(bar),
if bar_gap is not None:
self.plotly_fig["layout"]["bargap"] = bar_gap
else:
self.msg += " Bar chart not drawn\n"
warnings.warn(
"found box chart data with length <= 1, "
"assuming data redundancy, not plotting."
)
def draw_legend_shapes(self, mode, shape, **props):
"""Create a shape that matches lines or markers in legends.
Main issue is that path for circles do not render, so we have to use 'circle'
instead of 'path'.
"""
for single_mode in mode.split("+"):
x = props["data"][0][0]
y = props["data"][0][1]
if single_mode == "markers" and props.get("markerstyle"):
size = shape.pop("size", 6)
symbol = shape.pop("symbol")
# aligning to "center"
x0 = 0
y0 = 0
x1 = size
y1 = size
markerpath = props["markerstyle"].get("markerpath")
if markerpath is None and symbol != "circle":
self.msg += (
"not sure how to handle this marker without a valid path\n"
)
return
# marker path to SVG path conversion
path = " ".join(
[f"{a} {t[0]},{t[1]}" for a, t in zip(markerpath[1], markerpath[0])]
)
if symbol == "circle":
# symbols like . and o in matplotlib, use circle
# plotly also maps many other markers to circle, such as 1,8 and p
path = None
shape_type = "circle"
x0 = -size / 2
y0 = size / 2
x1 = size / 2
y1 = size + size / 2
else:
# triangles, star etc
shape_type = "path"
legend_shape = go.layout.Shape(
type=shape_type,
xref="paper",
yref="paper",
x0=x0,
y0=y0,
x1=x1,
y1=y1,
xsizemode="pixel",
ysizemode="pixel",
xanchor=x,
yanchor=y,
path=path,
**shape,
)
elif single_mode == "lines":
mode = "line"
x1 = props["data"][1][0]
y1 = props["data"][1][1]
legend_shape = go.layout.Shape(
type=mode,
xref="paper",
yref="paper",
x0=x,
y0=y + 0.02,
x1=x1,
y1=y1 + 0.02,
**shape,
)
else:
self.msg += "not sure how to handle this element\n"
return
self.plotly_fig.add_shape(legend_shape)
self.msg += " Heck yeah, I drew that shape\n"
def draw_marked_line(self, **props):
"""Create a data dict for a line obj.
This will draw 'lines', 'markers', or 'lines+markers'. For legend elements,
this will use layout.shapes, so they can be positioned with paper refs.
props.keys() -- [
'coordinates', ('data', 'axes', 'figure', or 'display')
'data', (a list of xy pairs)
'mplobj', (the matplotlib.lines.Line2D obj being rendered)
'label', (the name of the Line2D obj being rendered)
'linestyle', (linestyle dict, can be None, see below)
'markerstyle', (markerstyle dict, can be None, see below)
]
props['linestyle'].keys() -- [
'alpha', (opacity of Line2D obj)
'color', (color of the line if it exists, not the marker)
'linewidth',
'dasharray', (code for linestyle, see DASH_MAP in mpltools.py)
'zorder', (viewing precedence when stacked with other objects)
]
props['markerstyle'].keys() -- [
'alpha', (opacity of Line2D obj)
'marker', (the mpl marker symbol, see SYMBOL_MAP in mpltools.py)
'facecolor', (color of the marker face)
'edgecolor', (color of the marker edge)
'edgewidth', (width of marker edge)
'markerpath', (an SVG path for drawing the specified marker)
'zorder', (viewing precedence when stacked with other objects)
]
"""
self.msg += " Attempting to draw a line "
line, marker, shape = {}, {}, {}
if props["linestyle"] and props["markerstyle"]:
self.msg += "... with both lines+markers\n"
mode = "lines+markers"
elif props["linestyle"]:
self.msg += "... with just lines\n"
mode = "lines"
elif props["markerstyle"]:
self.msg += "... with just markers\n"
mode = "markers"
if props["linestyle"]:
color = mpltools.merge_color_and_opacity(
props["linestyle"]["color"], props["linestyle"]["alpha"]
)
if props["coordinates"] == "data":
line = go.scatter.Line(
color=color,
width=props["linestyle"]["linewidth"],
dash=mpltools.convert_dash(props["linestyle"]["dasharray"]),
)
else:
shape = dict(
line=dict(
color=color,
width=props["linestyle"]["linewidth"],
dash=mpltools.convert_dash(props["linestyle"]["dasharray"]),
)
)
if props["markerstyle"]:
if props["coordinates"] == "data":
marker = go.scatter.Marker(
opacity=props["markerstyle"]["alpha"],
color=props["markerstyle"]["facecolor"],
symbol=mpltools.convert_symbol(props["markerstyle"]["marker"]),
size=props["markerstyle"]["markersize"],
line=dict(
color=props["markerstyle"]["edgecolor"],
width=props["markerstyle"]["edgewidth"],
),
)
else:
shape = dict(
opacity=props["markerstyle"]["alpha"],
fillcolor=props["markerstyle"]["facecolor"],
symbol=mpltools.convert_symbol(props["markerstyle"]["marker"]),
size=props["markerstyle"]["markersize"],
line=dict(
color=props["markerstyle"]["edgecolor"],
width=props["markerstyle"]["edgewidth"],
),
)
if props["coordinates"] == "data":
marked_line = go.Scatter(
mode=mode,
name=(
str(props["label"])
if isinstance(props["label"], six.string_types)
else props["label"]
),
x=[xy_pair[0] for xy_pair in props["data"]],
y=[xy_pair[1] for xy_pair in props["data"]],
xaxis="x{0}".format(self.axis_ct),
yaxis="y{0}".format(self.axis_ct),
line=line,
marker=marker,
)
if self.x_is_mpl_date:
formatter = (
self.current_mpl_ax.get_xaxis()
.get_major_formatter()
.__class__.__name__
)
marked_line["x"] = mpltools.mpl_dates_to_datestrings(
marked_line["x"], formatter
)
self.plotly_fig.add_trace(marked_line),
self.msg += " Heck yeah, I drew that line\n"
elif props["coordinates"] == "axes":
# dealing with legend graphical elements
self.draw_legend_shapes(mode=mode, shape=shape, **props)
else:
self.msg += " Line didn't have 'data' coordinates, " "not drawing\n"
warnings.warn(
"Bummer! Plotly can currently only draw Line2D "
"objects from matplotlib that are in 'data' "
"coordinates!"
)
def draw_image(self, **props):
"""Draw image.
Not implemented yet!
"""
self.msg += " Attempting to draw image\n"
self.msg += " Not drawing image\n"
warnings.warn(
"Aw. Snap! You're gonna have to hold off on "
"the selfies for now. Plotly can't import "
"images from matplotlib yet!"
)
def draw_path_collection(self, **props):
"""Add a path collection to data list as a scatter plot.
Current implementation defaults such collections as scatter plots.
Matplotlib supports collections that have many of the same parameters
in common like color, size, path, etc. However, they needn't all be
the same. Plotly does not currently support such functionality and
therefore, the style for the first object is taken and used to define
the remaining paths in the collection.
props.keys() -- [
'paths', (structure: [vertices, path_code])
'path_coordinates', ('data', 'axes', 'figure', or 'display')
'path_transforms', (mpl transform, including Affine2D matrix)
'offsets', (offset from axes, helpful if in 'data')
'offset_coordinates', ('data', 'axes', 'figure', or 'display')
'offset_order',
'styles', (style dict, see below)
'mplobj' (the collection obj being drawn)
]
props['styles'].keys() -- [
'linewidth', (one or more linewidths)
'facecolor', (one or more facecolors for path)
'edgecolor', (one or more edgecolors for path)
'alpha', (one or more opacites for path)
'zorder', (precedence when stacked)
]
"""
self.msg += " Attempting to draw a path collection\n"
if props["offset_coordinates"] == "data":
markerstyle = mpltools.get_markerstyle_from_collection(props)
scatter_props = {
"coordinates": "data",
"data": props["offsets"],
"label": None,
"markerstyle": markerstyle,
"linestyle": None,
}
self.msg += " Drawing path collection as markers\n"
self.draw_marked_line(**scatter_props)
else:
self.msg += " Path collection not linked to 'data', " "not drawing\n"
warnings.warn(
"Dang! That path collection is out of this "
"world. I totally don't know what to do with "
"it yet! Plotly can only import path "
"collections linked to 'data' coordinates"
)
def draw_path(self, **props):
"""Draw path, currently only attempts to draw bar charts.
This function attempts to sort a given path into a collection of
horizontal or vertical bar charts. Most of the actual code takes
place in functions from mpltools.py.
props.keys() -- [
'data', (a list of verticies for the path)
'coordinates', ('data', 'axes', 'figure', or 'display')
'pathcodes', (code for the path, structure: ['M', 'L', 'Z', etc.])
'style', (style dict, see below)
'mplobj' (the mpl path object)
]
props['style'].keys() -- [
'alpha', (opacity of path obj)
'edgecolor',
'facecolor',
'edgewidth',
'dasharray', (style for path's enclosing line)
'zorder' (precedence of obj when stacked)
]
"""
self.msg += " Attempting to draw a path\n"
is_bar = mpltools.is_bar(self.current_mpl_ax.containers, **props)
if is_bar:
self.current_bars += [props]
else:
self.msg += " This path isn't a bar, not drawing\n"
warnings.warn(
"I found a path object that I don't think is part "
"of a bar chart. Ignoring."
)
def draw_text(self, **props):
"""Create an annotation dict for a text obj.
Currently, plotly uses either 'page' or 'data' to reference
annotation locations. These refer to 'display' and 'data',
respectively for the 'coordinates' key used in the Exporter.
Appropriate measures are taken to transform text locations to
reference one of these two options.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Attempting to draw an mpl text object\n"
if not mpltools.check_corners(props["mplobj"], self.mpl_fig):
warnings.warn(
"Looks like the annotation(s) you are trying \n"
"to draw lies/lay outside the given figure size.\n\n"
"Therefore, the resulting Plotly figure may not be \n"
"large enough to view the full text. To adjust \n"
"the size of the figure, use the 'width' and \n"
"'height' keys in the Layout object. Alternatively,\n"
"use the Margin object to adjust the figure's margins."
)
align = props["mplobj"]._multialignment
if not align:
align = props["style"]["halign"] # mpl default
if "annotations" not in self.plotly_fig["layout"]:
self.plotly_fig["layout"]["annotations"] = []
if props["text_type"] == "xlabel":
self.msg += " Text object is an xlabel\n"
self.draw_xlabel(**props)
elif props["text_type"] == "ylabel":
self.msg += " Text object is a ylabel\n"
self.draw_ylabel(**props)
elif props["text_type"] == "title":
self.msg += " Text object is a title\n"
self.draw_title(**props)
else: # just a regular text annotation...
self.msg += " Text object is a normal annotation\n"
if props["coordinates"] != "data":
self.msg += (
" Text object isn't linked to 'data' " "coordinates\n"
)
x_px, y_px = (
props["mplobj"].get_transform().transform(props["position"])
)
x, y = mpltools.display_to_paper(x_px, y_px, self.plotly_fig["layout"])
xref = "paper"
yref = "paper"
xanchor = props["style"]["halign"] # no difference here!
yanchor = mpltools.convert_va(props["style"]["valign"])
else:
self.msg += " Text object is linked to 'data' " "coordinates\n"
x, y = props["position"]
axis_ct = self.axis_ct
xaxis = self.plotly_fig["layout"]["xaxis{0}".format(axis_ct)]
yaxis = self.plotly_fig["layout"]["yaxis{0}".format(axis_ct)]
if (
xaxis["range"][0] < x < xaxis["range"][1]
and yaxis["range"][0] < y < yaxis["range"][1]
):
xref = "x{0}".format(self.axis_ct)
yref = "y{0}".format(self.axis_ct)
else:
self.msg += (
" Text object is outside "
"plotting area, making 'paper' reference.\n"
)
x_px, y_px = (
props["mplobj"].get_transform().transform(props["position"])
)
x, y = mpltools.display_to_paper(
x_px, y_px, self.plotly_fig["layout"]
)
xref = "paper"
yref = "paper"
xanchor = props["style"]["halign"] # no difference here!
yanchor = mpltools.convert_va(props["style"]["valign"])
annotation = go.layout.Annotation(
text=(
str(props["text"])
if isinstance(props["text"], six.string_types)
else props["text"]
),
opacity=props["style"]["alpha"],
x=x,
y=y,
xref=xref,
yref=yref,
align=align,
xanchor=xanchor,
yanchor=yanchor,
showarrow=False, # change this later?
font=go.layout.annotation.Font(
color=props["style"]["color"], size=props["style"]["fontsize"]
),
)
self.plotly_fig["layout"]["annotations"] += (annotation,)
self.msg += " Heck, yeah I drew that annotation\n"
def draw_title(self, **props):
"""Add a title to the current subplot in layout dictionary.
If there exists more than a single plot in the figure, titles revert
to 'page'-referenced annotations.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Attempting to draw a title\n"
if len(self.mpl_fig.axes) > 1:
self.msg += (
" More than one subplot, adding title as " "annotation\n"
)
x_px, y_px = props["mplobj"].get_transform().transform(props["position"])
x, y = mpltools.display_to_paper(x_px, y_px, self.plotly_fig["layout"])
annotation = go.layout.Annotation(
text=props["text"],
font=go.layout.annotation.Font(
color=props["style"]["color"], size=props["style"]["fontsize"]
),
xref="paper",
yref="paper",
x=x,
y=y,
xanchor="center",
yanchor="bottom",
showarrow=False, # no arrow for a title!
)
self.plotly_fig["layout"]["annotations"] += (annotation,)
else:
self.msg += (
" Only one subplot found, adding as a " "plotly title\n"
)
self.plotly_fig["layout"]["title"] = props["text"]
titlefont = dict(
size=props["style"]["fontsize"], color=props["style"]["color"]
)
self.plotly_fig["layout"]["titlefont"] = titlefont
def draw_xlabel(self, **props):
"""Add an xaxis label to the current subplot in layout dictionary.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Adding xlabel\n"
axis_key = "xaxis{0}".format(self.axis_ct)
self.plotly_fig["layout"][axis_key]["title"] = str(props["text"])
titlefont = dict(size=props["style"]["fontsize"], color=props["style"]["color"])
self.plotly_fig["layout"][axis_key]["titlefont"] = titlefont
def draw_ylabel(self, **props):
"""Add a yaxis label to the current subplot in layout dictionary.
props.keys() -- [
'text', (actual content string, not the text obj)
'position', (an x, y pair, not an mpl Bbox)
'coordinates', ('data', 'axes', 'figure', 'display')
'text_type', ('title', 'xlabel', or 'ylabel')
'style', (style dict, see below)
'mplobj' (actual mpl text object)
]
props['style'].keys() -- [
'alpha', (opacity of text)
'fontsize', (size in points of text)
'color', (hex color)
'halign', (horizontal alignment, 'left', 'center', or 'right')
'valign', (vertical alignment, 'baseline', 'center', or 'top')
'rotation',
'zorder', (precedence of text when stacked with other objs)
]
"""
self.msg += " Adding ylabel\n"
axis_key = "yaxis{0}".format(self.axis_ct)
self.plotly_fig["layout"][axis_key]["title"] = props["text"]
titlefont = dict(size=props["style"]["fontsize"], color=props["style"]["color"])
self.plotly_fig["layout"][axis_key]["titlefont"] = titlefont
def resize(self):
"""Revert figure layout to allow plotly to resize.
By default, PlotlyRenderer tries its hardest to precisely mimic an
mpl figure. However, plotly is pretty good with aesthetics. By
running PlotlyRenderer.resize(), layout parameters are deleted. This
lets plotly choose them instead of mpl.
"""
self.msg += "Resizing figure, deleting keys from layout\n"
for key in ["width", "height", "autosize", "margin"]:
try:
del self.plotly_fig["layout"][key]
except (KeyError, AttributeError):
pass
def strip_style(self):
self.msg += "Stripping mpl style is no longer supported\n"
|
|
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module contains all of the core logic for beets' command-line
interface. To invoke the CLI, just call beets.ui.main(). The actual
CLI commands are implemented in the ui.commands module.
"""
from __future__ import division, absolute_import, print_function
import optparse
import textwrap
import sys
from difflib import SequenceMatcher
import sqlite3
import errno
import re
import struct
import traceback
import os.path
from six.moves import input
from beets import logging
from beets import library
from beets import plugins
from beets import util
from beets.util.functemplate import Template
from beets import config
from beets.util import confit, as_string
from beets.autotag import mb
from beets.dbcore import query as db_query
from beets.dbcore import db
import six
# On Windows platforms, use colorama to support "ANSI" terminal colors.
if sys.platform == 'win32':
try:
import colorama
except ImportError:
pass
else:
colorama.init()
log = logging.getLogger('beets')
if not log.handlers:
log.addHandler(logging.StreamHandler())
log.propagate = False # Don't propagate to root handler.
PF_KEY_QUERIES = {
'comp': u'comp:true',
'singleton': u'singleton:true',
}
class UserError(Exception):
"""UI exception. Commands should throw this in order to display
nonrecoverable errors to the user.
"""
# Encoding utilities.
def _in_encoding():
"""Get the encoding to use for *inputting* strings from the console.
"""
return _stream_encoding(sys.stdin)
def _out_encoding():
"""Get the encoding to use for *outputting* strings to the console.
"""
return _stream_encoding(sys.stdout)
def _stream_encoding(stream, default='utf-8'):
"""A helper for `_in_encoding` and `_out_encoding`: get the stream's
preferred encoding, using a configured override or a default
fallback if neither is not specified.
"""
# Configured override?
encoding = config['terminal_encoding'].get()
if encoding:
return encoding
# For testing: When sys.stdout or sys.stdin is a StringIO under the
# test harness, it doesn't have an `encoding` attribute. Just use
# UTF-8.
if not hasattr(stream, 'encoding'):
return default
# Python's guessed output stream encoding, or UTF-8 as a fallback
# (e.g., when piped to a file).
return stream.encoding or default
def decargs(arglist):
"""Given a list of command-line argument bytestrings, attempts to
decode them to Unicode strings when running under Python 2.
"""
if six.PY2:
return [s.decode(util.arg_encoding()) for s in arglist]
else:
return arglist
def print_(*strings, **kwargs):
"""Like print, but rather than raising an error when a character
is not in the terminal's encoding's character set, just silently
replaces it.
The arguments must be Unicode strings: `unicode` on Python 2; `str` on
Python 3.
The `end` keyword argument behaves similarly to the built-in `print`
(it defaults to a newline).
"""
if not strings:
strings = [u'']
assert isinstance(strings[0], six.text_type)
txt = u' '.join(strings)
txt += kwargs.get('end', u'\n')
# Encode the string and write it to stdout.
if six.PY2:
# On Python 2, sys.stdout expects bytes.
out = txt.encode(_out_encoding(), 'replace')
sys.stdout.write(out)
else:
# On Python 3, sys.stdout expects text strings and uses the
# exception-throwing encoding error policy. To avoid throwing
# errors and use our configurable encoding override, we use the
# underlying bytes buffer instead.
if hasattr(sys.stdout, 'buffer'):
out = txt.encode(_out_encoding(), 'replace')
sys.stdout.buffer.write(out)
sys.stdout.buffer.flush()
else:
# In our test harnesses (e.g., DummyOut), sys.stdout.buffer
# does not exist. We instead just record the text string.
sys.stdout.write(txt)
# Configuration wrappers.
def _bool_fallback(a, b):
"""Given a boolean or None, return the original value or a fallback.
"""
if a is None:
assert isinstance(b, bool)
return b
else:
assert isinstance(a, bool)
return a
def should_write(write_opt=None):
"""Decide whether a command that updates metadata should also write
tags, using the importer configuration as the default.
"""
return _bool_fallback(write_opt, config['import']['write'].get(bool))
def should_move(move_opt=None):
"""Decide whether a command that updates metadata should also move
files when they're inside the library, using the importer
configuration as the default.
Specifically, commands should move files after metadata updates only
when the importer is configured *either* to move *or* to copy files.
They should avoid moving files when the importer is configured not
to touch any filenames.
"""
return _bool_fallback(
move_opt,
config['import']['move'].get(bool) or
config['import']['copy'].get(bool)
)
# Input prompts.
def input_(prompt=None):
"""Like `input`, but decodes the result to a Unicode string.
Raises a UserError if stdin is not available. The prompt is sent to
stdout rather than stderr. A printed between the prompt and the
input cursor.
"""
# raw_input incorrectly sends prompts to stderr, not stdout, so we
# use print_() explicitly to display prompts.
# http://bugs.python.org/issue1927
if prompt:
print_(prompt, end=u' ')
try:
resp = input()
except EOFError:
raise UserError(u'stdin stream ended while input required')
if six.PY2:
return resp.decode(_in_encoding(), 'ignore')
else:
return resp
def input_options(options, require=False, prompt=None, fallback_prompt=None,
numrange=None, default=None, max_width=72):
"""Prompts a user for input. The sequence of `options` defines the
choices the user has. A single-letter shortcut is inferred for each
option; the user's choice is returned as that single, lower-case
letter. The options should be provided as lower-case strings unless
a particular shortcut is desired; in that case, only that letter
should be capitalized.
By default, the first option is the default. `default` can be provided to
override this. If `require` is provided, then there is no default. The
prompt and fallback prompt are also inferred but can be overridden.
If numrange is provided, it is a pair of `(high, low)` (both ints)
indicating that, in addition to `options`, the user may enter an
integer in that inclusive range.
`max_width` specifies the maximum number of columns in the
automatically generated prompt string.
"""
# Assign single letters to each option. Also capitalize the options
# to indicate the letter.
letters = {}
display_letters = []
capitalized = []
first = True
for option in options:
# Is a letter already capitalized?
for letter in option:
if letter.isalpha() and letter.upper() == letter:
found_letter = letter
break
else:
# Infer a letter.
for letter in option:
if not letter.isalpha():
continue # Don't use punctuation.
if letter not in letters:
found_letter = letter
break
else:
raise ValueError(u'no unambiguous lettering found')
letters[found_letter.lower()] = option
index = option.index(found_letter)
# Mark the option's shortcut letter for display.
if not require and (
(default is None and not numrange and first) or
(isinstance(default, six.string_types) and
found_letter.lower() == default.lower())):
# The first option is the default; mark it.
show_letter = '[%s]' % found_letter.upper()
is_default = True
else:
show_letter = found_letter.upper()
is_default = False
# Colorize the letter shortcut.
show_letter = colorize('action_default' if is_default else 'action',
show_letter)
# Insert the highlighted letter back into the word.
capitalized.append(
option[:index] + show_letter + option[index + 1:]
)
display_letters.append(found_letter.upper())
first = False
# The default is just the first option if unspecified.
if require:
default = None
elif default is None:
if numrange:
default = numrange[0]
else:
default = display_letters[0].lower()
# Make a prompt if one is not provided.
if not prompt:
prompt_parts = []
prompt_part_lengths = []
if numrange:
if isinstance(default, int):
default_name = six.text_type(default)
default_name = colorize('action_default', default_name)
tmpl = '# selection (default %s)'
prompt_parts.append(tmpl % default_name)
prompt_part_lengths.append(len(tmpl % six.text_type(default)))
else:
prompt_parts.append('# selection')
prompt_part_lengths.append(len(prompt_parts[-1]))
prompt_parts += capitalized
prompt_part_lengths += [len(s) for s in options]
# Wrap the query text.
prompt = ''
line_length = 0
for i, (part, length) in enumerate(zip(prompt_parts,
prompt_part_lengths)):
# Add punctuation.
if i == len(prompt_parts) - 1:
part += '?'
else:
part += ','
length += 1
# Choose either the current line or the beginning of the next.
if line_length + length + 1 > max_width:
prompt += '\n'
line_length = 0
if line_length != 0:
# Not the beginning of the line; need a space.
part = ' ' + part
length += 1
prompt += part
line_length += length
# Make a fallback prompt too. This is displayed if the user enters
# something that is not recognized.
if not fallback_prompt:
fallback_prompt = u'Enter one of '
if numrange:
fallback_prompt += u'%i-%i, ' % numrange
fallback_prompt += ', '.join(display_letters) + ':'
resp = input_(prompt)
while True:
resp = resp.strip().lower()
# Try default option.
if default is not None and not resp:
resp = default
# Try an integer input if available.
if numrange:
try:
resp = int(resp)
except ValueError:
pass
else:
low, high = numrange
if low <= resp <= high:
return resp
else:
resp = None
# Try a normal letter input.
if resp:
resp = resp[0]
if resp in letters:
return resp
# Prompt for new input.
resp = input_(fallback_prompt)
def input_yn(prompt, require=False):
"""Prompts the user for a "yes" or "no" response. The default is
"yes" unless `require` is `True`, in which case there is no default.
"""
sel = input_options(
('y', 'n'), require, prompt, u'Enter Y or N:'
)
return sel == u'y'
def input_select_objects(prompt, objs, rep):
"""Prompt to user to choose all, none, or some of the given objects.
Return the list of selected objects.
`prompt` is the prompt string to use for each question (it should be
phrased as an imperative verb). `rep` is a function to call on each
object to print it out when confirming objects individually.
"""
choice = input_options(
(u'y', u'n', u's'), False,
u'%s? (Yes/no/select)' % prompt)
print() # Blank line.
if choice == u'y': # Yes.
return objs
elif choice == u's': # Select.
out = []
for obj in objs:
rep(obj)
if input_yn(u'%s? (yes/no)' % prompt, True):
out.append(obj)
print() # go to a new line
return out
else: # No.
return []
# Human output formatting.
def human_bytes(size):
"""Formats size, a number of bytes, in a human-readable way."""
powers = [u'', u'K', u'M', u'G', u'T', u'P', u'E', u'Z', u'Y', u'H']
unit = 'B'
for power in powers:
if size < 1024:
return u"%3.1f %s%s" % (size, power, unit)
size /= 1024.0
unit = u'iB'
return u"big"
def human_seconds(interval):
"""Formats interval, a number of seconds, as a human-readable time
interval using English words.
"""
units = [
(1, u'second'),
(60, u'minute'),
(60, u'hour'),
(24, u'day'),
(7, u'week'),
(52, u'year'),
(10, u'decade'),
]
for i in range(len(units) - 1):
increment, suffix = units[i]
next_increment, _ = units[i + 1]
interval /= float(increment)
if interval < next_increment:
break
else:
# Last unit.
increment, suffix = units[-1]
interval /= float(increment)
return u"%3.1f %ss" % (interval, suffix)
def human_seconds_short(interval):
"""Formats a number of seconds as a short human-readable M:SS
string.
"""
interval = int(interval)
return u'%i:%02i' % (interval // 60, interval % 60)
# Colorization.
# ANSI terminal colorization code heavily inspired by pygments:
# http://dev.pocoo.org/hg/pygments-main/file/b2deea5b5030/pygments/console.py
# (pygments is by Tim Hatch, Armin Ronacher, et al.)
COLOR_ESCAPE = "\x1b["
DARK_COLORS = {
"black": 0,
"darkred": 1,
"darkgreen": 2,
"brown": 3,
"darkyellow": 3,
"darkblue": 4,
"purple": 5,
"darkmagenta": 5,
"teal": 6,
"darkcyan": 6,
"lightgray": 7
}
LIGHT_COLORS = {
"darkgray": 0,
"red": 1,
"green": 2,
"yellow": 3,
"blue": 4,
"fuchsia": 5,
"magenta": 5,
"turquoise": 6,
"cyan": 6,
"white": 7
}
RESET_COLOR = COLOR_ESCAPE + "39;49;00m"
# These abstract COLOR_NAMES are lazily mapped on to the actual color in COLORS
# as they are defined in the configuration files, see function: colorize
COLOR_NAMES = ['text_success', 'text_warning', 'text_error', 'text_highlight',
'text_highlight_minor', 'action_default', 'action']
COLORS = None
def _colorize(color, text):
"""Returns a string that prints the given text in the given color
in a terminal that is ANSI color-aware. The color must be something
in DARK_COLORS or LIGHT_COLORS.
"""
if color in DARK_COLORS:
escape = COLOR_ESCAPE + "%im" % (DARK_COLORS[color] + 30)
elif color in LIGHT_COLORS:
escape = COLOR_ESCAPE + "%i;01m" % (LIGHT_COLORS[color] + 30)
else:
raise ValueError(u'no such color %s', color)
return escape + text + RESET_COLOR
def colorize(color_name, text):
"""Colorize text if colored output is enabled. (Like _colorize but
conditional.)
"""
if config['ui']['color']:
global COLORS
if not COLORS:
COLORS = dict((name,
config['ui']['colors'][name].as_str())
for name in COLOR_NAMES)
# In case a 3rd party plugin is still passing the actual color ('red')
# instead of the abstract color name ('text_error')
color = COLORS.get(color_name)
if not color:
log.debug(u'Invalid color_name: {0}', color_name)
color = color_name
return _colorize(color, text)
else:
return text
def _colordiff(a, b, highlight='text_highlight',
minor_highlight='text_highlight_minor'):
"""Given two values, return the same pair of strings except with
their differences highlighted in the specified color. Strings are
highlighted intelligently to show differences; other values are
stringified and highlighted in their entirety.
"""
if not isinstance(a, six.string_types) \
or not isinstance(b, six.string_types):
# Non-strings: use ordinary equality.
a = six.text_type(a)
b = six.text_type(b)
if a == b:
return a, b
else:
return colorize(highlight, a), colorize(highlight, b)
if isinstance(a, bytes) or isinstance(b, bytes):
# A path field.
a = util.displayable_path(a)
b = util.displayable_path(b)
a_out = []
b_out = []
matcher = SequenceMatcher(lambda x: False, a, b)
for op, a_start, a_end, b_start, b_end in matcher.get_opcodes():
if op == 'equal':
# In both strings.
a_out.append(a[a_start:a_end])
b_out.append(b[b_start:b_end])
elif op == 'insert':
# Right only.
b_out.append(colorize(highlight, b[b_start:b_end]))
elif op == 'delete':
# Left only.
a_out.append(colorize(highlight, a[a_start:a_end]))
elif op == 'replace':
# Right and left differ. Colorise with second highlight if
# it's just a case change.
if a[a_start:a_end].lower() != b[b_start:b_end].lower():
color = highlight
else:
color = minor_highlight
a_out.append(colorize(color, a[a_start:a_end]))
b_out.append(colorize(color, b[b_start:b_end]))
else:
assert(False)
return u''.join(a_out), u''.join(b_out)
def colordiff(a, b, highlight='text_highlight'):
"""Colorize differences between two values if color is enabled.
(Like _colordiff but conditional.)
"""
if config['ui']['color']:
return _colordiff(a, b, highlight)
else:
return six.text_type(a), six.text_type(b)
def get_path_formats(subview=None):
"""Get the configuration's path formats as a list of query/template
pairs.
"""
path_formats = []
subview = subview or config['paths']
for query, view in subview.items():
query = PF_KEY_QUERIES.get(query, query) # Expand common queries.
path_formats.append((query, Template(view.as_str())))
return path_formats
def get_replacements():
"""Confit validation function that reads regex/string pairs.
"""
replacements = []
for pattern, repl in config['replace'].get(dict).items():
repl = repl or ''
try:
replacements.append((re.compile(pattern), repl))
except re.error:
raise UserError(
u'malformed regular expression in replace: {0}'.format(
pattern
)
)
return replacements
def term_width():
"""Get the width (columns) of the terminal."""
fallback = config['ui']['terminal_width'].get(int)
# The fcntl and termios modules are not available on non-Unix
# platforms, so we fall back to a constant.
try:
import fcntl
import termios
except ImportError:
return fallback
try:
buf = fcntl.ioctl(0, termios.TIOCGWINSZ, ' ' * 4)
except IOError:
return fallback
try:
height, width = struct.unpack('hh', buf)
except struct.error:
return fallback
return width
FLOAT_EPSILON = 0.01
def _field_diff(field, old, new):
"""Given two Model objects, format their values for `field` and
highlight changes among them. Return a human-readable string. If the
value has not changed, return None instead.
"""
oldval = old.get(field)
newval = new.get(field)
# If no change, abort.
if isinstance(oldval, float) and isinstance(newval, float) and \
abs(oldval - newval) < FLOAT_EPSILON:
return None
elif oldval == newval:
return None
# Get formatted values for output.
oldstr = old.formatted().get(field, u'')
newstr = new.formatted().get(field, u'')
# For strings, highlight changes. For others, colorize the whole
# thing.
if isinstance(oldval, six.string_types):
oldstr, newstr = colordiff(oldval, newstr)
else:
oldstr = colorize('text_error', oldstr)
newstr = colorize('text_error', newstr)
return u'{0} -> {1}'.format(oldstr, newstr)
def show_model_changes(new, old=None, fields=None, always=False):
"""Given a Model object, print a list of changes from its pristine
version stored in the database. Return a boolean indicating whether
any changes were found.
`old` may be the "original" object to avoid using the pristine
version from the database. `fields` may be a list of fields to
restrict the detection to. `always` indicates whether the object is
always identified, regardless of whether any changes are present.
"""
old = old or new._db._get(type(new), new.id)
# Build up lines showing changed fields.
changes = []
for field in old:
# Subset of the fields. Never show mtime.
if field == 'mtime' or (fields and field not in fields):
continue
# Detect and show difference for this field.
line = _field_diff(field, old, new)
if line:
changes.append(u' {0}: {1}'.format(field, line))
# New fields.
for field in set(new) - set(old):
if fields and field not in fields:
continue
changes.append(u' {0}: {1}'.format(
field,
colorize('text_highlight', new.formatted()[field])
))
# Print changes.
if changes or always:
print_(format(old))
if changes:
print_(u'\n'.join(changes))
return bool(changes)
def show_path_changes(path_changes):
"""Given a list of tuples (source, destination) that indicate the
path changes, log the changes as INFO-level output to the beets log.
The output is guaranteed to be unicode.
Every pair is shown on a single line if the terminal width permits it,
else it is split over two lines. E.g.,
Source -> Destination
vs.
Source
-> Destination
"""
sources, destinations = zip(*path_changes)
# Ensure unicode output
sources = list(map(util.displayable_path, sources))
destinations = list(map(util.displayable_path, destinations))
# Calculate widths for terminal split
col_width = (term_width() - len(' -> ')) // 2
max_width = len(max(sources + destinations, key=len))
if max_width > col_width:
# Print every change over two lines
for source, dest in zip(sources, destinations):
log.info(u'{0} \n -> {1}', source, dest)
else:
# Print every change on a single line, and add a header
title_pad = max_width - len('Source ') + len(' -> ')
log.info(u'Source {0} Destination', ' ' * title_pad)
for source, dest in zip(sources, destinations):
pad = max_width - len(source)
log.info(u'{0} {1} -> {2}', source, ' ' * pad, dest)
class CommonOptionsParser(optparse.OptionParser, object):
"""Offers a simple way to add common formatting options.
Options available include:
- matching albums instead of tracks: add_album_option()
- showing paths instead of items/albums: add_path_option()
- changing the format of displayed items/albums: add_format_option()
The last one can have several behaviors:
- against a special target
- with a certain format
- autodetected target with the album option
Each method is fully documented in the related method.
"""
def __init__(self, *args, **kwargs):
super(CommonOptionsParser, self).__init__(*args, **kwargs)
self._album_flags = False
# this serves both as an indicator that we offer the feature AND allows
# us to check whether it has been specified on the CLI - bypassing the
# fact that arguments may be in any order
def add_album_option(self, flags=('-a', '--album')):
"""Add a -a/--album option to match albums instead of tracks.
If used then the format option can auto-detect whether we're setting
the format for items or albums.
Sets the album property on the options extracted from the CLI.
"""
album = optparse.Option(*flags, action='store_true',
help=u'match albums instead of tracks')
self.add_option(album)
self._album_flags = set(flags)
def _set_format(self, option, opt_str, value, parser, target=None,
fmt=None, store_true=False):
"""Internal callback that sets the correct format while parsing CLI
arguments.
"""
if store_true:
setattr(parser.values, option.dest, True)
# Use the explicitly specified format, or the string from the option.
if fmt:
value = fmt
elif value:
value, = decargs([value])
else:
value = u''
parser.values.format = value
if target:
config[target._format_config_key].set(value)
else:
if self._album_flags:
if parser.values.album:
target = library.Album
else:
# the option is either missing either not parsed yet
if self._album_flags & set(parser.rargs):
target = library.Album
else:
target = library.Item
config[target._format_config_key].set(value)
else:
config[library.Item._format_config_key].set(value)
config[library.Album._format_config_key].set(value)
def add_path_option(self, flags=('-p', '--path')):
"""Add a -p/--path option to display the path instead of the default
format.
By default this affects both items and albums. If add_album_option()
is used then the target will be autodetected.
Sets the format property to u'$path' on the options extracted from the
CLI.
"""
path = optparse.Option(*flags, nargs=0, action='callback',
callback=self._set_format,
callback_kwargs={'fmt': u'$path',
'store_true': True},
help=u'print paths for matched items or albums')
self.add_option(path)
def add_format_option(self, flags=('-f', '--format'), target=None):
"""Add -f/--format option to print some LibModel instances with a
custom format.
`target` is optional and can be one of ``library.Item``, 'item',
``library.Album`` and 'album'.
Several behaviors are available:
- if `target` is given then the format is only applied to that
LibModel
- if the album option is used then the target will be autodetected
- otherwise the format is applied to both items and albums.
Sets the format property on the options extracted from the CLI.
"""
kwargs = {}
if target:
if isinstance(target, six.string_types):
target = {'item': library.Item,
'album': library.Album}[target]
kwargs['target'] = target
opt = optparse.Option(*flags, action='callback',
callback=self._set_format,
callback_kwargs=kwargs,
help=u'print with custom format')
self.add_option(opt)
def add_all_common_options(self):
"""Add album, path and format options.
"""
self.add_album_option()
self.add_path_option()
self.add_format_option()
# Subcommand parsing infrastructure.
#
# This is a fairly generic subcommand parser for optparse. It is
# maintained externally here:
# http://gist.github.com/462717
# There you will also find a better description of the code and a more
# succinct example program.
class Subcommand(object):
"""A subcommand of a root command-line application that may be
invoked by a SubcommandOptionParser.
"""
def __init__(self, name, parser=None, help='', aliases=(), hide=False):
"""Creates a new subcommand. name is the primary way to invoke
the subcommand; aliases are alternate names. parser is an
OptionParser responsible for parsing the subcommand's options.
help is a short description of the command. If no parser is
given, it defaults to a new, empty CommonOptionsParser.
"""
self.name = name
self.parser = parser or CommonOptionsParser()
self.aliases = aliases
self.help = help
self.hide = hide
self._root_parser = None
def print_help(self):
self.parser.print_help()
def parse_args(self, args):
return self.parser.parse_args(args)
@property
def root_parser(self):
return self._root_parser
@root_parser.setter
def root_parser(self, root_parser):
self._root_parser = root_parser
self.parser.prog = '{0} {1}'.format(
as_string(root_parser.get_prog_name()), self.name)
class SubcommandsOptionParser(CommonOptionsParser):
"""A variant of OptionParser that parses subcommands and their
arguments.
"""
def __init__(self, *args, **kwargs):
"""Create a new subcommand-aware option parser. All of the
options to OptionParser.__init__ are supported in addition
to subcommands, a sequence of Subcommand objects.
"""
# A more helpful default usage.
if 'usage' not in kwargs:
kwargs['usage'] = u"""
%prog COMMAND [ARGS...]
%prog help COMMAND"""
kwargs['add_help_option'] = False
# Super constructor.
super(SubcommandsOptionParser, self).__init__(*args, **kwargs)
# Our root parser needs to stop on the first unrecognized argument.
self.disable_interspersed_args()
self.subcommands = []
def add_subcommand(self, *cmds):
"""Adds a Subcommand object to the parser's list of commands.
"""
for cmd in cmds:
cmd.root_parser = self
self.subcommands.append(cmd)
# Add the list of subcommands to the help message.
def format_help(self, formatter=None):
# Get the original help message, to which we will append.
out = super(SubcommandsOptionParser, self).format_help(formatter)
if formatter is None:
formatter = self.formatter
# Subcommands header.
result = ["\n"]
result.append(formatter.format_heading('Commands'))
formatter.indent()
# Generate the display names (including aliases).
# Also determine the help position.
disp_names = []
help_position = 0
subcommands = [c for c in self.subcommands if not c.hide]
subcommands.sort(key=lambda c: c.name)
for subcommand in subcommands:
name = subcommand.name
if subcommand.aliases:
name += ' (%s)' % ', '.join(subcommand.aliases)
disp_names.append(name)
# Set the help position based on the max width.
proposed_help_position = len(name) + formatter.current_indent + 2
if proposed_help_position <= formatter.max_help_position:
help_position = max(help_position, proposed_help_position)
# Add each subcommand to the output.
for subcommand, name in zip(subcommands, disp_names):
# Lifted directly from optparse.py.
name_width = help_position - formatter.current_indent - 2
if len(name) > name_width:
name = "%*s%s\n" % (formatter.current_indent, "", name)
indent_first = help_position
else:
name = "%*s%-*s " % (formatter.current_indent, "",
name_width, name)
indent_first = 0
result.append(name)
help_width = formatter.width - help_position
help_lines = textwrap.wrap(subcommand.help, help_width)
help_line = help_lines[0] if help_lines else ''
result.append("%*s%s\n" % (indent_first, "", help_line))
result.extend(["%*s%s\n" % (help_position, "", line)
for line in help_lines[1:]])
formatter.dedent()
# Concatenate the original help message with the subcommand
# list.
return out + "".join(result)
def _subcommand_for_name(self, name):
"""Return the subcommand in self.subcommands matching the
given name. The name may either be the name of a subcommand or
an alias. If no subcommand matches, returns None.
"""
for subcommand in self.subcommands:
if name == subcommand.name or \
name in subcommand.aliases:
return subcommand
return None
def parse_global_options(self, args):
"""Parse options up to the subcommand argument. Returns a tuple
of the options object and the remaining arguments.
"""
options, subargs = self.parse_args(args)
# Force the help command
if options.help:
subargs = ['help']
elif options.version:
subargs = ['version']
return options, subargs
def parse_subcommand(self, args):
"""Given the `args` left unused by a `parse_global_options`,
return the invoked subcommand, the subcommand options, and the
subcommand arguments.
"""
# Help is default command
if not args:
args = ['help']
cmdname = args.pop(0)
subcommand = self._subcommand_for_name(cmdname)
if not subcommand:
raise UserError(u"unknown command '{0}'".format(cmdname))
suboptions, subargs = subcommand.parse_args(args)
return subcommand, suboptions, subargs
optparse.Option.ALWAYS_TYPED_ACTIONS += ('callback',)
# The main entry point and bootstrapping.
def _load_plugins(config):
"""Load the plugins specified in the configuration.
"""
paths = config['pluginpath'].as_str_seq(split=False)
paths = [util.normpath(p) for p in paths]
log.debug(u'plugin paths: {0}', util.displayable_path(paths))
# On Python 3, the search paths need to be unicode.
paths = [util.py3_path(p) for p in paths]
# Extend the `beetsplug` package to include the plugin paths.
import beetsplug
beetsplug.__path__ = paths + beetsplug.__path__
# For backwards compatibility, also support plugin paths that
# *contain* a `beetsplug` package.
sys.path += paths
plugins.load_plugins(config['plugins'].as_str_seq())
plugins.send("pluginload")
return plugins
def _setup(options, lib=None):
"""Prepare and global state and updates it with command line options.
Returns a list of subcommands, a list of plugins, and a library instance.
"""
# Configure the MusicBrainz API.
mb.configure()
config = _configure(options)
plugins = _load_plugins(config)
# Get the default subcommands.
from beets.ui.commands import default_commands
subcommands = list(default_commands)
subcommands.extend(plugins.commands())
if lib is None:
lib = _open_library(config)
plugins.send("library_opened", lib=lib)
library.Item._types.update(plugins.types(library.Item))
library.Album._types.update(plugins.types(library.Album))
return subcommands, plugins, lib
def _configure(options):
"""Amend the global configuration object with command line options.
"""
# Add any additional config files specified with --config. This
# special handling lets specified plugins get loaded before we
# finish parsing the command line.
if getattr(options, 'config', None) is not None:
overlay_path = options.config
del options.config
config.set_file(overlay_path)
else:
overlay_path = None
config.set_args(options)
# Configure the logger.
if config['verbose'].get(int):
log.set_global_level(logging.DEBUG)
else:
log.set_global_level(logging.INFO)
if overlay_path:
log.debug(u'overlaying configuration: {0}',
util.displayable_path(overlay_path))
config_path = config.user_config_path()
if os.path.isfile(config_path):
log.debug(u'user configuration: {0}',
util.displayable_path(config_path))
else:
log.debug(u'no user configuration found at {0}',
util.displayable_path(config_path))
log.debug(u'data directory: {0}',
util.displayable_path(config.config_dir()))
return config
def _open_library(config):
"""Create a new library instance from the configuration.
"""
dbpath = util.bytestring_path(config['library'].as_filename())
try:
lib = library.Library(
dbpath,
config['directory'].as_filename(),
get_path_formats(),
get_replacements(),
)
lib.get_item(0) # Test database connection.
except (sqlite3.OperationalError, sqlite3.DatabaseError):
log.debug(u'{}', traceback.format_exc())
raise UserError(u"database file {0} could not be opened".format(
util.displayable_path(dbpath)
))
log.debug(u'library database: {0}\n'
u'library directory: {1}',
util.displayable_path(lib.path),
util.displayable_path(lib.directory))
return lib
def _raw_main(args, lib=None):
"""A helper function for `main` without top-level exception
handling.
"""
parser = SubcommandsOptionParser()
parser.add_format_option(flags=('--format-item',), target=library.Item)
parser.add_format_option(flags=('--format-album',), target=library.Album)
parser.add_option('-l', '--library', dest='library',
help=u'library database file to use')
parser.add_option('-d', '--directory', dest='directory',
help=u"destination music directory")
parser.add_option('-v', '--verbose', dest='verbose', action='count',
help=u'log more details (use twice for even more)')
parser.add_option('-c', '--config', dest='config',
help=u'path to configuration file')
parser.add_option('-h', '--help', dest='help', action='store_true',
help=u'show this help message and exit')
parser.add_option('--version', dest='version', action='store_true',
help=optparse.SUPPRESS_HELP)
options, subargs = parser.parse_global_options(args)
# Special case for the `config --edit` command: bypass _setup so
# that an invalid configuration does not prevent the editor from
# starting.
if subargs and subargs[0] == 'config' \
and ('-e' in subargs or '--edit' in subargs):
from beets.ui.commands import config_edit
return config_edit()
test_lib = bool(lib)
subcommands, plugins, lib = _setup(options, lib)
parser.add_subcommand(*subcommands)
subcommand, suboptions, subargs = parser.parse_subcommand(subargs)
subcommand.func(lib, suboptions, subargs)
plugins.send('cli_exit', lib=lib)
if not test_lib:
# Clean up the library unless it came from the test harness.
lib._close()
def main(args=None):
"""Run the main command-line interface for beets. Includes top-level
exception handlers that print friendly error messages.
"""
try:
_raw_main(args)
except UserError as exc:
message = exc.args[0] if exc.args else None
log.error(u'error: {0}', message)
sys.exit(1)
except util.HumanReadableException as exc:
exc.log(log)
sys.exit(1)
except library.FileOperationError as exc:
# These errors have reasonable human-readable descriptions, but
# we still want to log their tracebacks for debugging.
log.debug('{}', traceback.format_exc())
log.error('{}', exc)
sys.exit(1)
except confit.ConfigError as exc:
log.error(u'configuration error: {0}', exc)
sys.exit(1)
except db_query.InvalidQueryError as exc:
log.error(u'invalid query: {0}', exc)
sys.exit(1)
except IOError as exc:
if exc.errno == errno.EPIPE:
# "Broken pipe". End silently.
pass
else:
raise
except KeyboardInterrupt:
# Silently ignore ^C except in verbose mode.
log.debug(u'{}', traceback.format_exc())
except db.DBAccessError as exc:
log.error(
u'database access error: {0}\n'
u'the library file might have a permissions problem',
exc
)
sys.exit(1)
|
|
from __future__ import unicode_literals
import datetime
import re
import sys
import warnings
from unittest import skipIf
from xml.dom.minidom import parseString
from django.core import serializers
from django.core.urlresolvers import reverse
from django.db.models import Max, Min
from django.http import HttpRequest
from django.template import (
Context, RequestContext, Template, TemplateSyntaxError, context_processors,
)
from django.test import (
TestCase, override_settings, skipIfDBFeature, skipUnlessDBFeature,
)
from django.test.utils import requires_tz_support
from django.utils import six, timezone
from .forms import (
EventForm, EventLocalizedForm, EventLocalizedModelForm, EventModelForm,
EventSplitForm,
)
from .models import (
AllDayEvent, Event, MaybeEvent, Session, SessionEvent, Timestamp,
)
try:
import pytz
except ImportError:
pytz = None
# These tests use the EAT (Eastern Africa Time) and ICT (Indochina Time)
# who don't have Daylight Saving Time, so we can represent them easily
# with FixedOffset, and use them directly as tzinfo in the constructors.
# settings.TIME_ZONE is forced to EAT. Most tests use a variant of
# datetime.datetime(2011, 9, 1, 13, 20, 30), which translates to
# 10:20:30 in UTC and 17:20:30 in ICT.
UTC = timezone.utc
EAT = timezone.get_fixed_timezone(180) # Africa/Nairobi
ICT = timezone.get_fixed_timezone(420) # Asia/Bangkok
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=False)
class LegacyDatabaseTests(TestCase):
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination actually never happens.
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt.replace(tzinfo=EAT), dt.replace(microsecond=0))
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination is no longer possible since timezone support
# was removed from the SQLite backend -- it didn't work.
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_utc_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# django.db.backends.utils.typecast_dt will just drop the
# timezone, so a round-trip in the database alters the data (!)
# interpret the naive datetime in local time and you get a wrong value
self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt)
# interpret the naive datetime in original time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=UTC), dt)
@skipUnlessDBFeature('supports_timezones')
@skipIfDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# interpret the naive datetime in local time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=EAT), dt)
# This combination is no longer possible since timezone support
# was removed from the SQLite backend -- it didn't work.
@skipUnlessDBFeature('supports_timezones')
@skipUnlessDBFeature('needs_datetime_string_cast')
def test_aware_datetime_in_other_timezone_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertIsNone(event.dt.tzinfo)
# django.db.backends.utils.typecast_dt will just drop the
# timezone, so a round-trip in the database alters the data (!)
# interpret the naive datetime in local time and you get a wrong value
self.assertNotEqual(event.dt.replace(tzinfo=EAT), dt)
# interpret the naive datetime in original time to get the correct value
self.assertEqual(event.dt.replace(tzinfo=ICT), dt)
@skipIfDBFeature('supports_timezones')
def test_aware_datetime_unspported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
with self.assertRaises(ValueError):
Event.objects.create(dt=dt)
def test_auto_now_and_auto_now_add(self):
now = datetime.datetime.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0),
datetime.datetime(2011, 1, 1, 4, 0, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0),
datetime.datetime(2011, 1, 1, 4, 30, 0)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertTrue(AllDayEvent.objects.filter(day__gte=dt).exists())
@override_settings(TIME_ZONE='Africa/Nairobi', USE_TZ=True)
class NewDatabaseTests(TestCase):
@requires_tz_support
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
def test_datetime_from_date(self):
dt = datetime.date(2011, 9, 1)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
self.assertEqual(event.dt, datetime.datetime(2011, 9, 1, tzinfo=EAT))
@requires_tz_support
@skipUnlessDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(tzinfo=EAT))
@requires_tz_support
@skipIfDBFeature('supports_microsecond_precision')
def test_naive_datetime_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
Event.objects.create(dt=dt)
self.assertEqual(len(recorded), 1)
msg = str(recorded[0].message)
self.assertTrue(msg.startswith("DateTimeField Event.dt received "
"a naive datetime"))
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
# naive datetimes are interpreted in local time
self.assertEqual(event.dt, dt.replace(microsecond=0, tzinfo=EAT))
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
@skipIfDBFeature('supports_microsecond_precision')
def test_aware_datetime_in_local_timezone_with_microsecond_unsupported(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060, tzinfo=EAT)
Event.objects.create(dt=dt)
event = Event.objects.get()
# microseconds are lost during a round-trip in the database
self.assertEqual(event.dt, dt.replace(microsecond=0))
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
Event.objects.create(dt=dt)
event = Event.objects.get()
self.assertEqual(event.dt, dt)
def test_auto_now_and_auto_now_add(self):
now = timezone.now()
past = now - datetime.timedelta(seconds=2)
future = now + datetime.timedelta(seconds=2)
Timestamp.objects.create()
ts = Timestamp.objects.get()
self.assertLess(past, ts.created)
self.assertLess(past, ts.updated)
self.assertGreater(future, ts.updated)
self.assertGreater(future, ts.updated)
def test_query_filter(self):
dt1 = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
dt2 = datetime.datetime(2011, 9, 1, 14, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt1)
Event.objects.create(dt=dt2)
self.assertEqual(Event.objects.filter(dt__gte=dt1).count(), 2)
self.assertEqual(Event.objects.filter(dt__gt=dt1).count(), 1)
self.assertEqual(Event.objects.filter(dt__gte=dt2).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt2).count(), 0)
@skipIf(pytz is None, "this test requires pytz")
def test_query_filter_with_pytz_timezones(self):
tz = pytz.timezone('Europe/Paris')
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=tz)
Event.objects.create(dt=dt)
next = dt + datetime.timedelta(seconds=3)
prev = dt - datetime.timedelta(seconds=3)
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__exact=next).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, next)).count(), 0)
self.assertEqual(Event.objects.filter(dt__in=(prev, dt, next)).count(), 1)
self.assertEqual(Event.objects.filter(dt__range=(prev, next)).count(), 1)
@requires_tz_support
def test_query_filter_with_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)
Event.objects.create(dt=dt)
dt = dt.replace(tzinfo=None)
with warnings.catch_warnings(record=True) as recorded:
warnings.simplefilter('always')
# naive datetimes are interpreted in local time
self.assertEqual(Event.objects.filter(dt__exact=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__lte=dt).count(), 1)
self.assertEqual(Event.objects.filter(dt__gt=dt).count(), 0)
self.assertEqual(len(recorded), 3)
for warning in recorded:
msg = str(warning.message)
self.assertTrue(msg.startswith("DateTimeField Event.dt "
"received a naive datetime"))
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 2)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 2)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 2)
self.assertEqual(Event.objects.filter(dt__hour=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetime_lookups_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
# These two dates fall in the same day in EAT, but in different days,
# years and months in UTC.
self.assertEqual(Event.objects.filter(dt__year=2011).count(), 1)
self.assertEqual(Event.objects.filter(dt__month=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__day=1).count(), 1)
self.assertEqual(Event.objects.filter(dt__week_day=7).count(), 1)
self.assertEqual(Event.objects.filter(dt__hour=22).count(), 1)
self.assertEqual(Event.objects.filter(dt__minute=30).count(), 2)
self.assertEqual(Event.objects.filter(dt__second=0).count(), 2)
def test_query_aggregation(self):
# Only min and max make sense for datetimes.
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT))
result = Event.objects.all().aggregate(Min('dt'), Max('dt'))
self.assertEqual(result, {
'dt__min': datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT),
'dt__max': datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT),
})
def test_query_annotation(self):
# Only min and max make sense for datetimes.
morning = Session.objects.create(name='morning')
afternoon = Session.objects.create(name='afternoon')
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 23, 20, 20, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), session=afternoon)
SessionEvent.objects.create(dt=datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT), session=morning)
morning_min_dt = datetime.datetime(2011, 9, 1, 3, 20, 40, tzinfo=EAT)
afternoon_min_dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).order_by('dt'),
[morning_min_dt, afternoon_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__lt=afternoon_min_dt),
[morning_min_dt],
transform=lambda d: d.dt)
self.assertQuerysetEqual(
Session.objects.annotate(dt=Min('events__dt')).filter(dt__gte=afternoon_min_dt),
[afternoon_min_dt],
transform=lambda d: d.dt)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 0, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT),
datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT)],
transform=lambda d: d)
@skipUnlessDBFeature('has_zoneinfo_database')
def test_query_datetimes_in_other_timezone(self):
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=EAT))
Event.objects.create(dt=datetime.datetime(2011, 1, 1, 4, 30, 0, tzinfo=EAT))
with timezone.override(UTC):
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'year'),
[datetime.datetime(2010, 1, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'month'),
[datetime.datetime(2010, 12, 1, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'day'),
[datetime.datetime(2010, 12, 31, 0, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 0, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'hour'),
[datetime.datetime(2010, 12, 31, 22, 0, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 0, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'minute'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
self.assertQuerysetEqual(
Event.objects.datetimes('dt', 'second'),
[datetime.datetime(2010, 12, 31, 22, 30, 0, tzinfo=UTC),
datetime.datetime(2011, 1, 1, 1, 30, 0, tzinfo=UTC)],
transform=lambda d: d)
def test_raw_sql(self):
# Regression test for #17755
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
event = Event.objects.create(dt=dt)
self.assertQuerysetEqual(
Event.objects.raw('SELECT * FROM timezones_event WHERE dt = %s', [dt]),
[event],
transform=lambda d: d)
@requires_tz_support
def test_filter_date_field_with_aware_datetime(self):
# Regression test for #17742
day = datetime.date(2011, 9, 1)
AllDayEvent.objects.create(day=day)
# This is 2011-09-02T01:30:00+03:00 in EAT
dt = datetime.datetime(2011, 9, 1, 22, 30, 0, tzinfo=UTC)
self.assertFalse(AllDayEvent.objects.filter(day__gte=dt).exists())
def test_null_datetime(self):
# Regression test for #17294
e = MaybeEvent.objects.create()
self.assertEqual(e.dt, None)
@override_settings(TIME_ZONE='Africa/Nairobi')
class SerializationTests(TestCase):
# Backend-specific notes:
# - JSON supports only milliseconds, microseconds will be truncated.
# - PyYAML dumps the UTC offset correctly for timezone-aware datetimes,
# but when it loads this representation, it substracts the offset and
# returns a naive datetime object in UTC (http://pyyaml.org/ticket/202).
# Tests are adapted to take these quirks into account.
def assert_python_contains_datetime(self, objects, dt):
self.assertEqual(objects[0]['fields']['dt'], dt)
def assert_json_contains_datetime(self, json, dt):
self.assertIn('"fields": {"dt": "%s"}' % dt, json)
def assert_xml_contains_datetime(self, xml, dt):
field = parseString(xml).getElementsByTagName('field')[0]
self.assertXMLEqual(field.childNodes[0].wholeText, dt)
def assert_yaml_contains_datetime(self, yaml, dt):
# Depending on the yaml dumper, '!timestamp' might be absent
six.assertRegex(self, yaml,
r"- fields: {dt: !(!timestamp)? '%s'}" % re.escape(dt))
def test_naive_datetime(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_naive_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, 405060)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30.405")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30.405060")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30.405060")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt, dt)
def test_aware_datetime_with_microsecond(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, 405060, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30.405+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt.replace(microsecond=405000))
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30.405060+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30.405060+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_utc(self):
dt = datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T10:20:30Z")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T10:20:30+00:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 10:20:30+00:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_local_timezone(self):
dt = datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T13:20:30+03:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 13:20:30+03:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
def test_aware_datetime_in_other_timezone(self):
dt = datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT)
data = serializers.serialize('python', [Event(dt=dt)])
self.assert_python_contains_datetime(data, dt)
obj = next(serializers.deserialize('python', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('json', [Event(dt=dt)])
self.assert_json_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('json', data)).object
self.assertEqual(obj.dt, dt)
data = serializers.serialize('xml', [Event(dt=dt)])
self.assert_xml_contains_datetime(data, "2011-09-01T17:20:30+07:00")
obj = next(serializers.deserialize('xml', data)).object
self.assertEqual(obj.dt, dt)
if not isinstance(serializers.get_serializer('yaml'), serializers.BadSerializer):
data = serializers.serialize('yaml', [Event(dt=dt)])
self.assert_yaml_contains_datetime(data, "2011-09-01 17:20:30+07:00")
obj = next(serializers.deserialize('yaml', data)).object
self.assertEqual(obj.dt.replace(tzinfo=UTC), dt)
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class TemplateTests(TestCase):
@requires_tz_support
def test_localtime_templatetag_and_filters(self):
"""
Test the {% localtime %} templatetag and related filters.
"""
datetimes = {
'utc': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'eat': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'ict': datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT),
'naive': datetime.datetime(2011, 9, 1, 13, 20, 30),
}
templates = {
'notag': Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}"),
'noarg': Template("{% load tz %}{% localtime %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'on': Template("{% load tz %}{% localtime on %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
'off': Template("{% load tz %}{% localtime off %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}"),
}
# Transform a list of keys in 'datetimes' to the expected template
# output. This makes the definition of 'results' more readable.
def t(*result):
return '|'.join(datetimes[key].isoformat() for key in result)
# Results for USE_TZ = True
results = {
'utc': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('utc', 'eat', 'utc', 'ict'),
},
'eat': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('eat', 'eat', 'utc', 'ict'),
},
'ict': {
'notag': t('eat', 'eat', 'utc', 'ict'),
'noarg': t('eat', 'eat', 'utc', 'ict'),
'on': t('eat', 'eat', 'utc', 'ict'),
'off': t('ict', 'eat', 'utc', 'ict'),
},
'naive': {
'notag': t('naive', 'eat', 'utc', 'ict'),
'noarg': t('naive', 'eat', 'utc', 'ict'),
'on': t('naive', 'eat', 'utc', 'ict'),
'off': t('naive', 'eat', 'utc', 'ict'),
}
}
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
# Changes for USE_TZ = False
results['utc']['notag'] = t('utc', 'eat', 'utc', 'ict')
results['ict']['notag'] = t('ict', 'eat', 'utc', 'ict')
with self.settings(USE_TZ=False):
for k1, dt in six.iteritems(datetimes):
for k2, tpl in six.iteritems(templates):
ctx = Context({'dt': dt, 'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, '%s / %s: %r != %r' % (k1, k2, actual, expected))
@skipIf(pytz is None, "this test requires pytz")
def test_localtime_filters_with_pytz(self):
"""
Test the |localtime, |utc, and |timezone filters with pytz.
"""
# Use a pytz timezone as local time
tpl = Template("{% load tz %}{{ dt|localtime }}|{{ dt|utc }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30)})
with self.settings(TIME_ZONE='Europe/Paris'):
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00|2011-09-01T10:20:30+00:00")
# Use a pytz timezone as argument
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
tpl = Template("{% load tz %}{{ dt|timezone:'Europe/Paris' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_localtime_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% localtime foo %}{% endlocaltime %}").render()
def test_localtime_filters_do_not_raise_exceptions(self):
"""
Test the |localtime, |utc, and |timezone filters on bad inputs.
"""
tpl = Template("{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:tz }}")
with self.settings(USE_TZ=True):
# bad datetime value
ctx = Context({'dt': None, 'tz': ICT})
self.assertEqual(tpl.render(ctx), "None|||")
ctx = Context({'dt': 'not a date', 'tz': ICT})
self.assertEqual(tpl.render(ctx), "not a date|||")
# bad timezone value
tpl = Template("{% load tz %}{{ dt|timezone:tz }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': None})
self.assertEqual(tpl.render(ctx), "")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), 'tz': 'not a tz'})
self.assertEqual(tpl.render(ctx), "")
@requires_tz_support
def test_timezone_templatetag(self):
"""
Test the {% timezone %} templatetag.
"""
tpl = Template(
"{% load tz %}"
"{{ dt }}|"
"{% timezone tz1 %}"
"{{ dt }}|"
"{% timezone tz2 %}"
"{{ dt }}"
"{% endtimezone %}"
"{% endtimezone %}"
)
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC),
'tz1': ICT, 'tz2': None})
self.assertEqual(tpl.render(ctx), "2011-09-01T13:20:30+03:00|2011-09-01T17:20:30+07:00|2011-09-01T13:20:30+03:00")
@skipIf(pytz is None, "this test requires pytz")
def test_timezone_templatetag_with_pytz(self):
"""
Test the {% timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% timezone tz %}{{ dt }}{% endtimezone %}")
# Use a pytz timezone as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': pytz.timezone('Europe/Paris')})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
# Use a pytz timezone name as argument
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT),
'tz': 'Europe/Paris'})
self.assertEqual(tpl.render(ctx), "2011-09-01T12:20:30+02:00")
def test_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% timezone %}{% endtimezone %}").render()
with self.assertRaises(ValueError if pytz is None else pytz.UnknownTimeZoneError):
Template("{% load tz %}{% timezone tz %}{% endtimezone %}").render(Context({'tz': 'foobar'}))
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_get_current_timezone_templatetag(self):
"""
Test the {% get_current_timezone %} templatetag.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Africa/Nairobi" if pytz else "EAT")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context()), "UTC")
tpl = Template("{% load tz %}{% timezone tz %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
with timezone.override(UTC):
self.assertEqual(tpl.render(Context({'tz': ICT})), "+0700")
@skipIf(pytz is None, "this test requires pytz")
def test_get_current_timezone_templatetag_with_pytz(self):
"""
Test the {% get_current_timezone %} templatetag with pytz.
"""
tpl = Template("{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}")
with timezone.override(pytz.timezone('Europe/Paris')):
self.assertEqual(tpl.render(Context()), "Europe/Paris")
tpl = Template("{% load tz %}{% timezone 'Europe/Paris' %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), "Europe/Paris")
def test_get_current_timezone_templatetag_invalid_argument(self):
with self.assertRaises(TemplateSyntaxError):
Template("{% load tz %}{% get_current_timezone %}").render()
@skipIf(sys.platform.startswith('win'), "Windows uses non-standard time zone names")
def test_tz_template_context_processor(self):
"""
Test the django.template.context_processors.tz template context processor.
"""
tpl = Template("{{ TIME_ZONE }}")
context = Context()
self.assertEqual(tpl.render(context), "")
request_context = RequestContext(HttpRequest(), processors=[context_processors.tz])
self.assertEqual(tpl.render(request_context), "Africa/Nairobi" if pytz else "EAT")
@requires_tz_support
def test_date_and_time_template_filters(self):
tpl = Template("{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 23:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-02 at 03:20:20")
def test_date_and_time_template_filters_honor_localtime(self):
tpl = Template("{% load tz %}{% localtime off %}{{ dt|date:'Y-m-d' }} at {{ dt|time:'H:i:s' }}{% endlocaltime %}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 20, 20, 20, tzinfo=UTC)})
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
with timezone.override(ICT):
self.assertEqual(tpl.render(ctx), "2011-09-01 at 20:20:20")
def test_localtime_with_time_zone_setting_set_to_none(self):
# Regression for #17274
tpl = Template("{% load tz %}{{ dt }}")
ctx = Context({'dt': datetime.datetime(2011, 9, 1, 12, 20, 30, tzinfo=EAT)})
with self.settings(TIME_ZONE=None):
# the actual value depends on the system time zone of the host
self.assertTrue(tpl.render(ctx).startswith("2011"))
@requires_tz_support
def test_now_template_tag_uses_current_time_zone(self):
# Regression for #17343
tpl = Template("{% now \"O\" %}")
self.assertEqual(tpl.render(Context({})), "+0300")
with timezone.override(ICT):
self.assertEqual(tpl.render(Context({})), "+0700")
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=False)
class LegacyFormsTests(TestCase):
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_non_existent_time(self):
form = EventForm({'dt': '2011-03-27 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 3, 27, 2, 30, 0))
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_ambiguous_time(self):
form = EventForm({'dt': '2011-10-30 02:30:00'})
with timezone.override(pytz.timezone('Europe/Paris')):
# this is obviously a bug
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 10, 30, 2, 30, 0))
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 13, 20, 30))
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 13, 20, 30))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True)
class NewFormsTests(TestCase):
@requires_tz_support
def test_form(self):
form = EventForm({'dt': '2011-09-01 13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_other_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30'})
with timezone.override(ICT):
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
def test_form_with_explicit_timezone(self):
form = EventForm({'dt': '2011-09-01 17:20:30+07:00'})
# Datetime inputs formats don't allow providing a time zone.
self.assertFalse(form.is_valid())
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_non_existent_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-03-27 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-03-27 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@skipIf(pytz is None, "this test requires pytz")
def test_form_with_ambiguous_time(self):
with timezone.override(pytz.timezone('Europe/Paris')):
form = EventForm({'dt': '2011-10-30 02:30:00'})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors['dt'],
["2011-10-30 02:30:00 couldn't be interpreted in time zone "
"Europe/Paris; it may be ambiguous or it may not exist."])
@requires_tz_support
def test_split_form(self):
form = EventSplitForm({'dt_0': '2011-09-01', 'dt_1': '13:20:30'})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['dt'], datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_form(self):
form = EventLocalizedForm(initial={'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)})
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@requires_tz_support
def test_model_form(self):
EventModelForm({'dt': '2011-09-01 13:20:30'}).save()
e = Event.objects.get()
self.assertEqual(e.dt, datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
@requires_tz_support
def test_localized_model_form(self):
form = EventLocalizedModelForm(instance=Event(dt=datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT)))
with timezone.override(ICT):
self.assertIn("2011-09-01 17:20:30", str(form))
@override_settings(DATETIME_FORMAT='c', TIME_ZONE='Africa/Nairobi', USE_L10N=False, USE_TZ=True,
PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF='timezones.urls')
class AdminTests(TestCase):
fixtures = ['tz_users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
@requires_tz_support
def test_changelist(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(EAT).isoformat())
def test_changelist_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_event_changelist'))
self.assertContains(response, e.dt.astimezone(ICT).isoformat())
@requires_tz_support
def test_change_editable(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
response = self.client.get(reverse('admin:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(EAT).date().isoformat())
self.assertContains(response, e.dt.astimezone(EAT).time().isoformat())
def test_change_editable_in_other_timezone(self):
e = Event.objects.create(dt=datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC))
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_event_change', args=(e.pk,)))
self.assertContains(response, e.dt.astimezone(ICT).date().isoformat())
self.assertContains(response, e.dt.astimezone(ICT).time().isoformat())
@requires_tz_support
def test_change_readonly(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
response = self.client.get(reverse('admin:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(EAT).isoformat())
def test_change_readonly_in_other_timezone(self):
Timestamp.objects.create()
# re-fetch the object for backends that lose microseconds (MySQL)
t = Timestamp.objects.get()
with timezone.override(ICT):
response = self.client.get(reverse('admin:timezones_timestamp_change', args=(t.pk,)))
self.assertContains(response, t.created.astimezone(ICT).isoformat())
|
|
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Yun, Liu<[email protected]>
import re
import subprocess
import unittest
import os
import zipfile
import shutil
from xml.etree import ElementTree
import json
import sys
sys.path.append("../")
import comm
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def invokeCrosswalkPkg(self, params=None):
params = params or []
cmdline = []
if comm.HOST_PREFIX:
cmdline.append(comm.HOST_PREFIX)
cmdline.extend([os.path.join(comm.PackTools, 'crosswalk-pkg')])
cmdline.extend(params)
process = subprocess.Popen(cmdline, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, _ = process.communicate()
self.assertEquals(process.returncode, 0)
return output
def test_path_absolute(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
output = self.invokeCrosswalkPkg([
'--platforms=android',
'--android=' + comm.ANDROID_MODE,
'--crosswalk=' + comm.crosswalkzip,
'--targets=' + comm.BIT,
comm.ConstPath + '/../testapp/create_package_basic/',
])
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
self.assertIn("target android", output)
self.assertNotIn("candle", output)
self.assertNotIn("light", output)
def test_default_channel(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
output = self.invokeCrosswalkPkg([
'--platforms=android',
'--android=' + comm.ANDROID_MODE,
'--targets=' + comm.BIT,
comm.ConstPath + '/../testapp/create_package_basic/',
])
version = comm.check_crosswalk_version(self, "stable")
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
self.assertIn(version, output)
def test_crosswalk_release(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
self.invokeCrosswalkPkg([
'--platforms=android',
'--android=' + comm.ANDROID_MODE,
'--crosswalk=' + comm.crosswalkzip,
'--targets=' + comm.BIT,
comm.ConstPath + '/../testapp/create_package_basic/',
])
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
def test_crosswalk_build(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
crosswalkdir = zipfile.ZipFile(comm.crosswalkzip,'r')
for file in crosswalkdir.namelist():
crosswalkdir.extract(file, r'.')
crosswalkdir.close()
os.chdir('org.xwalk.test')
self.invokeCrosswalkPkg([
'--platforms=android',
'--android=' + comm.ANDROID_MODE,
'--crosswalk=' + comm.crosswalkzip[:-4] + '/',
'--targets=' + comm.BIT,
comm.ConstPath + '/../testapp/create_package_basic/',
])
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
shutil.rmtree(comm.crosswalkzip[:-4])
def test_release_without_argument(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
self.invokeCrosswalkPkg([
'--platforms=android',
'--android=' + comm.ANDROID_MODE,
'--release',
'--crosswalk=' + comm.crosswalkzip,
'--targets=' + comm.BIT,
comm.ConstPath + '/../testapp/create_package_basic/',
])
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
def test_crosswalk_version(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
self.invokeCrosswalkPkg([
'--platforms=android',
'--android=' + comm.ANDROID_MODE,
'--crosswalk=' + comm.crosswalkVersion,
'--targets=' + comm.BIT,
comm.ConstPath + '/../testapp/create_package_basic/',
])
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
def test_manifest_packageId(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
if os.path.exists(comm.ConstPath + "/../testapp/start_url/manifest.json"):
os.remove(comm.ConstPath + "/../testapp/start_url/manifest.json")
os.chdir('org.xwalk.test')
self.invokeCrosswalkPkg(['--platforms=android',
'--android=' + comm.ANDROID_MODE,
'--crosswalk=' + comm.crosswalkzip,
'--manifest=org.xwalk.test',
'--targets=' + comm.BIT,
comm.ConstPath + '/../testapp/start_url/'])
with open(comm.ConstPath + "/../testapp/start_url/manifest.json") as json_file:
data = json.load(json_file)
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
os.remove(comm.ConstPath + "/../testapp/start_url/manifest.json")
self.assertEquals(data['xwalk_package_id'].strip(os.linesep), "org.xwalk.test")
def test_keep_project(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
output = self.invokeCrosswalkPkg([
'--platforms=android',
'--android=' + comm.ANDROID_MODE,
'--keep',
'--crosswalk=' + comm.crosswalkzip,
'--targets=' + comm.BIT,
comm.ConstPath + '/../testapp/create_package_basic/',
])
apks = os.listdir(os.getcwd())
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if comm.BIT == "64":
self.assertIn("64", apks[i])
apkLength = apkLength + 1
self.assertEquals(apkLength, 2)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
project_dir = None
for line in output.split('\n'):
match = re.search(r'Keeping build tree in (.+)$', line)
if match is not None:
project_dir = match.group(1)
comm.run(self)
comm.clear("org.xwalk.test")
self.assertTrue(os.path.isdir(project_dir))
self.assertTrue(os.path.isdir(os.path.join(project_dir, "app")))
self.assertTrue(os.path.isdir(os.path.join(project_dir, "prj")))
def test_target_arch(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
if comm.BIT == "64":
self.invokeCrosswalkPkg([
'--platforms=android',
'--android=' + comm.ANDROID_MODE,
'--crosswalk=' + comm.crosswalkzip,
'--targets=arm64-v8a x86_64',
comm.ConstPath + '/../testapp/create_package_basic/',
])
else:
self.invokeCrosswalkPkg([
'--platforms=android',
'--android=' + comm.ANDROID_MODE,
'--crosswalk=' + comm.crosswalkzip,
'--targets=armeabi-v7a x86',
comm.ConstPath + '/../testapp/create_package_basic/',
])
apks = os.listdir(os.getcwd())
x86Length = 0
x86_64Length = 0
armLength = 0
arm_64Length = 0
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if "64" in apks[i]:
x86_64Length = x86_64Length + 1
else:
x86Length = x86Length + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if "64" in apks[i]:
arm_64Length = arm_64Length + 1
else:
armLength = armLength + 1
if comm.BIT == "64":
self.assertEquals(x86_64Length, 1)
self.assertEquals(arm_64Length, 1)
self.assertEquals(x86Length, 0)
self.assertEquals(armLength, 0)
else:
self.assertEquals(x86_64Length, 0)
self.assertEquals(arm_64Length, 0)
self.assertEquals(x86Length, 1)
self.assertEquals(armLength, 1)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.run(self)
comm.clear("org.xwalk.test")
def test_target_a_x(self):
comm.setUp()
os.chdir(comm.XwalkPath)
comm.clear("org.xwalk.test")
os.mkdir("org.xwalk.test")
os.chdir('org.xwalk.test')
self.invokeCrosswalkPkg([
'--platforms=android',
'--android=' + comm.ANDROID_MODE,
'--crosswalk=canary',
'--targets=a x',
comm.ConstPath + '/../testapp/create_package_basic/',
])
apks = os.listdir(os.getcwd())
x86Length = 0
x86_64Length = 0
armLength = 0
arm_64Length = 0
apkLength = 0
if comm.MODE != " --android-shared":
for i in range(len(apks)):
if apks[i].endswith(".apk") and "x86" in apks[i]:
if "64" in apks[i]:
x86_64Length = x86_64Length + 1
else:
x86Length = x86Length + 1
if apks[i].endswith(".apk") and "arm" in apks[i]:
if "64" in apks[i]:
arm_64Length = arm_64Length + 1
else:
armLength = armLength + 1
self.assertEquals(x86_64Length, 1)
self.assertEquals(arm_64Length, 1)
self.assertEquals(x86Length, 1)
self.assertEquals(armLength, 1)
else:
for i in range(len(apks)):
if apks[i].endswith(".apk") and "shared" in apks[i]:
apkLength = apkLength + 1
appVersion = apks[i].split('-')[1]
self.assertEquals(apkLength, 1)
comm.clear("org.xwalk.test")
def test_tools_version(self):
comm.setUp()
os.chdir(comm.XwalkPath)
self.assertEquals(self.invokeCrosswalkPkg(['--version']),
self.invokeCrosswalkPkg(['--v']))
version_output = self.invokeCrosswalkPkg(['--version'])
with open(comm.PackTools + "../package.json") as json_file:
data = json.load(json_file)
self.assertEquals(data['version'], version_output.strip())
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/python
#####
# tries predicting tree cover in coto brus
#####
import gdal
import scipy
import numpy as np
from sklearn import tree
from sklearn import ensemble
from sklearn import linear_model
from sklearn import svm
from sklearn import metrics
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
import pickle
# some functions for plotting later
def func_linear(x, m, b):
return m * x + b
def func_fit(x, y, function):
opt, cov = scipy.optimize.curve_fit(function, x, y)
y_fit = function(np.array(x), *opt)
rsq = metrics.r2_score(y, y_fit)
rms = np.sqrt(metrics.mean_squared_error(y, y_fit))
return [y_fit, rsq, rms]
# set base directory for files
base = '/home/cba/Downloads/tree-cover/'
# predictor data
#pred_file = base + 'coto_brus_predictors_masked2.tif'
pred_file = base + 'coto_brus_despeckle.tif'
pred_ref = gdal.Open(pred_file)
pred_prj = pred_ref.GetProjection()
pred_geo = pred_ref.GetGeoTransform()
nx = pred_ref.RasterXSize
ny = pred_ref.RasterYSize
nb = pred_ref.RasterCount
#pred_names = ["Palsar HH", "Palsar HV", "% Soil", "% Veg", "% Urban", "NIRv"]
pred_names = ["Palsar HH", "Palsar HV", "NIRv", "% Soil", "% Veg", "% Urban"]
# training data
train_file = base + 'coto_brus_tree_cover.tif'
train_ref = gdal.Open(train_file)
train_nd = train_ref.GetRasterBand(1).GetNoDataValue()
# name of the output mode
model_file = base + 'coto_brus_tree_cover_model'
# read the data into memory and reduce dimensions
train_arr = train_ref.ReadAsArray()
gd = np.where(train_arr != train_nd)
train = train_arr[gd[0], gd[1]]
train_arr = None
pred_arr = pred_ref.ReadAsArray()
pred = pred_arr[:, gd[0], gd[1]]
pred_arr = None
pred = pred.transpose()
# transform the predictor data to 0-1 range
scaler = preprocessing.MinMaxScaler()
pred = scaler.fit_transform(pred)
# save the scaler to use when applying later
pickle.dump(scaler, open(base + 'tree_cover_scaler.sav', 'wb'))
# create the train/test split
x_train, x_test, y_train, y_test = train_test_split(
pred, train, test_size = 0.8)
# create a more advanced train/test split based on even
# sampling across tree cover bins (here, 10%)
binsize = 0.1
bins = np.arange(0, 1.00001, binsize)
#bins = np.array([0, 0.1, 1.0001])
# use 90% of the least common bin size as the number of per-bin samples
train_hist = np.histogram(train, bins = bins)
nrnd = int(np.floor(min(train_hist[0]) * 0.9))
# to get good representation of low tree cover, add a bunch more
# samples from that class
lt10 = 10
nrnd_lt10 = lt10 * nrnd
# create arrays to store the train and test data
n_train = int(np.floor(nrnd * len(train_hist[0]) + (nrnd_lt10 - nrnd)))
n_test = train.shape[0] - n_train
x_train = np.zeros((n_train, nb))
y_train = np.zeros(n_train)
x_test = np.zeros((n_test, nb))
y_test = np.zeros(n_test)
# create a counter to store the indices for where the test data go in the test array
train_start = 0
test_start = 0
for i in range(len(bins)-1):
# find the indices for this bin, then randomize them
ll = np.where(train >= bins[i])
if i != len(bins)-2:
ul = np.where(train < bins[i+1])
else:
ul = np.where(train <= bins[i+1])
intersect = np.intersect1d(ul, ll)
np.random.shuffle(intersect)
# separate the indices for each bin into train and test sets
if i == 0:
int_train = intersect[:nrnd_lt10]
int_test = intersect[nrnd_lt10:]
else:
int_train = intersect[:nrnd]
int_test = intersect[nrnd:]
# extract these data from the response and predictor data
n_test = len(int_test)
if i == 0:
n_train = nrnd_lt10
else:
n_train = nrnd
x_train[train_start:train_start+n_train, :] = pred[int_train, :]
y_train[train_start:train_start+n_train] = train[int_train]
train_start += n_train
x_test[test_start:test_start+n_test] = pred[int_test, :]
y_test[test_start:test_start+n_test] = train[int_test]
test_start += n_test
# create the regression models
gb = ensemble.GradientBoostingRegressor(learning_rate=0.1, min_samples_split=2,
n_estimators=1000, verbose=2, loss='lad')
rf = ensemble.RandomForestRegressor(n_jobs = 7, verbose = 2, n_estimators=15,
criterion='mae')
et = ensemble.ExtraTreesRegressor(n_jobs = 7, verbose = 1)
br = ensemble.BaggingRegressor(n_jobs = 7, verbose = 1)
ab = ensemble.AdaBoostRegressor()
ol = linear_model.LinearRegression(n_jobs = 7)
models = [rf, gb, et, br, ab, ol]
model_names = ['GradientBoosting', 'RandomForest', 'ExtraTrees',
'Bagging', 'AdaBoost', 'LinearModel']
output_pred = {}
output_test = {}
for i in range(len(models)):
# report starting
print("-----")
print("Processing {} model".format(model_names[i]))
# train the model
models[i].fit(x_train, y_train)
model_file_name = "{}_{}.sav".format(model_file, model_names[i])
# save the model
pickle.dump(models[i], open(model_file_name, 'wb'))
# test the model
y_pred = models[i].predict(x_test)
output_test[model_names[i]] = y_pred
# output the full model to a dictionary
opred = models[i].predict(pred)
output_pred[model_names[i]] = opred
# get metrics
r_squared = metrics.r2_score(y_test, y_pred)
explained_var = metrics.explained_variance_score(y_test, y_pred)
rmse = np.sqrt(metrics.mean_squared_error(y_test, y_pred))
mae = metrics.mean_absolute_error(y_test, y_pred)
# print model results
print("r-squared : {:0.3f}".format(r_squared))
print("explained var: {:0.3f}".format(explained_var))
print("rmse : {:0.3f}".format(rmse))
print("mean abs err : {:0.3f}".format(mae))
#print("var. importance")
#for j in range(len(pred_names)):
# print("{} : {:0.3f}".format(pred_names[j], rf_def.feature_importances_[j]))
# write to an output raster
output_file_name = "{}_{}.tif".format(model_file, model_names[i])
oref = gdal.GetDriverByName("GTiff").Create(output_file_name,
nx, ny, 1, gdal.GDT_Float32)
oref.SetProjection(pred_prj)
oref.SetGeoTransform(pred_geo)
ob = oref.GetRasterBand(1)
outarr = np.zeros((ny, nx))
outarr[gd[0], gd[1]] = opred
ob.WriteArray(outarr)
outarr = None
ob = None
oref = None
# plot the output
#plt.figure(1)
#lin_fit = func_fit(y_test, y_pred, func_linear)
#plt.hexbin(y_pred, y_test, cmap = 'plasma', alpha = 0.95, bins = 'log',
# label = "rmse: {:0.3f}".format(rmse), gridsize = 20)
#plt.plot(lin_fit[0], y_test, c = "black", label = "r-sqared: {:0.3f}".format(lin_fit[1]))
#plt.xlabel("Predicted")
#plt.ylabel("Actual")
#plt.title("{} Tree Cover".format(model_names[i]))
#plt.legend()
#plt.show()
#plt.tight_layout()
#outfile = "{}_{}.png".format(model_file, model_names[i])
#plt.savefig(outfile)
#plt.close()
|
|
import math
import sys
import numpy
import scipy.stats
import Likelihood
import cellDetect
try:
import astropy.io.fits as pyfits
except:
import pyfits
import logging
log = logging.getLogger("main")
class FixedBinSearch(object):
def __init__(self, X, Y, t, timebin, eventfile, expomap, hwu, region=None):
# Store the hardware unit
self.hwu = hwu
# Read the events
self.X = X
self.Y = Y
self.time = t
self.timebin = float(timebin)
# Read the source catalog
sources = []
with pyfits.open(eventfile) as f:
srcX = []
srcY = []
for row in f['REGION'].data:
srcX.append(row.field("X")[0])
srcY.append(row.field("Y")[0])
sources = numpy.array(zip(srcX, srcY))
# Transform RA,Dec in X,Y for the sources
self.eventfile = eventfile
# wcs = XMMWCS.XMMWCS(self.eventfile)
# sources = numpy.array( wcs.sky2xy(zip(ras,decs)) )
log.debug("Read %s sources from the source catalog" % (len(sources)))
if region is None:
self.xmin = self.X.min()
self.xmax = self.X.max()
self.ymin = self.Y.min()
self.ymax = self.Y.max()
# Now keep only the sources within this region
idx = ((sources[:, 0] >= self.xmin) &
(sources[:, 0] <= self.xmax) &
(sources[:, 1] >= self.ymin) &
(sources[:, 1] <= self.ymax))
self.sources = sources[idx]
else:
# Use the region to filter the sources
filt = region.get_filter()
self.sources = []
for src in sources:
if filt.inside1(src[0], src[1]):
self.sources.append(src)
# Use the region to determine the extreme of X and Y
shape = region[0]
# NOTE: Assuming all units are in pixels
if shape.name == 'circle':
xc, yc, radius = shape.coord_list
# sometimes for no reason the radius is negative. Fix that
radius = abs(radius)
elif shape.name == 'ellipse':
xc, yc, r1, r2, ang = shape.coord_list
radius = max(abs(r1), abs(r2))
else:
raise RuntimeError("Region of shape %s is not supported" % (shape.name))
pass
self.xmin = xc - radius
self.xmax = xc + radius
self.ymin = yc - radius
self.ymax = yc + radius
nsrc = len(self.sources)
nmax = 100
if nsrc > nmax:
log.info("There were %s sources in the region, they are too many. Keeping only %s" % (nsrc, nmax))
self.sources = self.sources[:nmax]
log.debug("Kept %s source(s) within the region" % (len(self.sources)))
self.expomap = expomap
def scan(self, regfilt, threshold=5.0):
bins = numpy.arange(self.time.min(), self.time.max() + 1e-4, self.timebin)
xbins = numpy.arange(self.xmin, self.xmax, 25)
ybins = numpy.arange(self.ymin, self.ymax, 25)
region = Likelihood.Region(self.xmin, self.xmax,
self.ymin, self.ymax,
40, regfilt,
self.expomap, self.eventfile)
ls = Likelihood.Likelihood(self.X, self.Y, region)
# Build the likelihood model
# Bkg model (isotropic)
iso = Likelihood.Isotropic("bkg", 1.0)
m = Likelihood.GlobalModel("likeModel") + iso
knownSources = []
for i, src in enumerate(self.sources):
thisSrc = Likelihood.PointSource("src%i" % (i + 1), src[0], src[1], self.eventfile, self.hwu)
m += thisSrc
if i >= 10:
log.info("Fixing normalization of source %s" % (i + 1))
thisSrc["src%i_norm" % (i + 1)].fix()
knownSources.append(thisSrc)
ls.setModel(m)
# Minimize the mlogLike to get the background level
like0 = ls.minimize()
figIntegrated = ls.plot()
excessesFound = []
figs = [figIntegrated]
for t1, t2 in zip(bins[:-1], bins[1:]):
sys.stderr.write(".")
idx = (self.time >= t1) & (self.time < t2)
# db = DbScanner(self.X[idx],self.Y[idx])
# db = SAODetect(self.X[idx],self.Y[idx])
# db = cellDetect.CellDetect(self.X[idx],self.Y[idx],200,3)
ls = Likelihood.Likelihood(self.X[idx], self.Y[idx], region)
totCounts = ls.getTotalCounts()
if totCounts < 3:
# No need to perform any fit. Too few counts in this
# interval
log.debug("Skip interval %s - %s, less than 3 counts here" % (t1, t2))
continue
# Build the likelihood model
# Bkg model (isotropic)
iso = Likelihood.Isotropic("bkg", 0.1)
m = Likelihood.GlobalModel("likeModel") + iso
for i, src in enumerate(knownSources):
m += src
ls.setModel(m)
# Minimize the mlogLike to get the background level
like0 = ls.minimize(verbose=0)
# ls.plot()
bkgLevel = iso['bkg_amp'].value / pow(region.binsize, 2.0)
error = iso['bkg_amp'].error / pow(region.binsize, 2.0)
# print("Bkg: %s +/- %s" %(bkgLevel, error))
db = cellDetect.CellDetect(self.X[idx], self.xmin, self.xmax,
self.Y[idx], self.ymin, self.ymax,
200, 3)
centroids = db.findExcesses(bkgLevel * pow(200, 2), error * pow(200, 2))
newSources = []
if len(centroids) > 0:
# Verify if this source is truly significant
TSs = []
for (x, y) in centroids:
sys.stderr.write("-")
# Avoid computing the TS for a source too close to the ones already
# known
d = map(lambda src: math.sqrt(pow(src.xpos - x, 2) + pow(src.ypos - y, 2)), knownSources)
dd = filter(lambda x: x < 5.0 / 0.05, d)
if len(dd) > 0:
# There is a source close to this centroid. No need
# to investigate further
continue
thisSrc = Likelihood.PointSource("testsrc", x, y, self.eventfile, self.hwu,
knownSources[0].outfile)
ls.model += thisSrc
like1 = ls.minimize(verbose=0)
TSs.append(2 * (like0 - like1))
# print("(X,Y) = (%s,%s) -> TS = %s" %(x,y,TSs[-1]))
ls.model.removeSource("testsrc")
# Using the approximation TS = sqrt( significance )
if TSs[-1] >= pow(threshold, 2):
newSources.append([x, y, TSs[-1]])
if len(newSources) > 0:
db.centroids = numpy.array(newSources)
fig = db.plot(xbins, ybins)
figs.append(fig)
for (x, y, ts) in newSources:
excessesFound.append([t1, t2, x, y, math.sqrt(ts)])
sys.stderr.write("\n")
return excessesFound, figs
def get_pvalue(significance):
return scipy.stats.norm().sf(significance)
def consolidateTimeIntervals(allExcesses, pixelSize):
h = 0
while True:
if h >= allExcesses.shape[0]:
break
# Take h excess as reference
ref = allExcesses[h]
# Compute the distance between the reference
# and all the others
d = numpy.sqrt(numpy.power(ref[2] - allExcesses[h:, 2], 2) +
numpy.power(ref[3] - allExcesses[h:, 3], 2))
idx = (d <= 10.0 / pixelSize)
if numpy.sum(idx) > 0:
# Select all excesses close to the reference
thisExcesses = allExcesses[h:][idx]
# If they are contiguous also in the time domain,
# collapse them
toBeRemoved = []
for i, exc in enumerate(thisExcesses):
if exc[0] - ref[1] == 0:
# Contiguous also in time
# Collapse these excesses to one
# Update end time
ref[1] = exc[1]
# Update significance by combining the two significances
# (formula from:
# http://www.loujost.com/Statistics%20and%20Physics/Significance%20Levels/CombiningPValues.htm)
# If the sigma value in exc[-1] is too high, the get_pvalue function
# will return zero because the probability is so low that it underflows
# the precision of the macine. Hence, we put a lower limit at 1e-20
k1 = max(get_pvalue(exc[-1]), 1e-20)
k0 = max(get_pvalue(ref[-1]), 1e-20)
k = k0 * k1
new_pvalue = k - k * math.log(k)
ref[-1] = scipy.stats.norm().isf(new_pvalue)
toBeRemoved.append(i + h)
allExcesses = numpy.delete(allExcesses, toBeRemoved, axis=0)
h += 1
return allExcesses
|
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.resources.types import (
dynamic_search_ads_search_term_view,
)
from google.ads.googleads.v8.services.types import (
dynamic_search_ads_search_term_view_service,
)
from .transports.base import (
DynamicSearchAdsSearchTermViewServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import DynamicSearchAdsSearchTermViewServiceGrpcTransport
class DynamicSearchAdsSearchTermViewServiceClientMeta(type):
"""Metaclass for the DynamicSearchAdsSearchTermViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[DynamicSearchAdsSearchTermViewServiceTransport]]
_transport_registry[
"grpc"
] = DynamicSearchAdsSearchTermViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[DynamicSearchAdsSearchTermViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class DynamicSearchAdsSearchTermViewServiceClient(
metaclass=DynamicSearchAdsSearchTermViewServiceClientMeta
):
"""Service to fetch dynamic search ads views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DynamicSearchAdsSearchTermViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
DynamicSearchAdsSearchTermViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> DynamicSearchAdsSearchTermViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
DynamicSearchAdsSearchTermViewServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def dynamic_search_ads_search_term_view_path(
customer_id: str,
ad_group_id: str,
search_term_fingerprint: str,
headline_fingerprint: str,
landing_page_fingerprint: str,
page_url_fingerprint: str,
) -> str:
"""Return a fully-qualified dynamic_search_ads_search_term_view string."""
return "customers/{customer_id}/dynamicSearchAdsSearchTermViews/{ad_group_id}~{search_term_fingerprint}~{headline_fingerprint}~{landing_page_fingerprint}~{page_url_fingerprint}".format(
customer_id=customer_id,
ad_group_id=ad_group_id,
search_term_fingerprint=search_term_fingerprint,
headline_fingerprint=headline_fingerprint,
landing_page_fingerprint=landing_page_fingerprint,
page_url_fingerprint=page_url_fingerprint,
)
@staticmethod
def parse_dynamic_search_ads_search_term_view_path(
path: str,
) -> Dict[str, str]:
"""Parse a dynamic_search_ads_search_term_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/dynamicSearchAdsSearchTermViews/(?P<ad_group_id>.+?)~(?P<search_term_fingerprint>.+?)~(?P<headline_fingerprint>.+?)~(?P<landing_page_fingerprint>.+?)~(?P<page_url_fingerprint>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[
str, DynamicSearchAdsSearchTermViewServiceTransport, None
] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the dynamic search ads search term view service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.DynamicSearchAdsSearchTermViewServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(
transport, DynamicSearchAdsSearchTermViewServiceTransport
):
# transport is a DynamicSearchAdsSearchTermViewServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = DynamicSearchAdsSearchTermViewServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def get_dynamic_search_ads_search_term_view(
self,
request: dynamic_search_ads_search_term_view_service.GetDynamicSearchAdsSearchTermViewRequest = None,
*,
resource_name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> dynamic_search_ads_search_term_view.DynamicSearchAdsSearchTermView:
r"""Returns the requested dynamic search ads search term view in
full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.GetDynamicSearchAdsSearchTermViewRequest`):
The request object. Request message for
[DynamicSearchAdsSearchTermViewService.GetDynamicSearchAdsSearchTermView][google.ads.googleads.v8.services.DynamicSearchAdsSearchTermViewService.GetDynamicSearchAdsSearchTermView].
resource_name (:class:`str`):
Required. The resource name of the
dynamic search ads search term view to
fetch.
This corresponds to the ``resource_name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.resources.types.DynamicSearchAdsSearchTermView:
A dynamic search ads search term
view.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any([resource_name]):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a dynamic_search_ads_search_term_view_service.GetDynamicSearchAdsSearchTermViewRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
dynamic_search_ads_search_term_view_service.GetDynamicSearchAdsSearchTermViewRequest,
):
request = dynamic_search_ads_search_term_view_service.GetDynamicSearchAdsSearchTermViewRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if resource_name is not None:
request.resource_name = resource_name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_dynamic_search_ads_search_term_view
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("resource_name", request.resource_name),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("DynamicSearchAdsSearchTermViewServiceClient",)
|
|
# -*- coding: utf-8 -*-
from unittest import TestCase
from jsonschema import RefResolver
from mock import patch, Mock
from pluct.resource import Resource, ObjectResource, ArrayResource
from pluct.session import Session
from pluct.schema import Schema
class BaseTestCase(TestCase):
def setUp(self):
self.session = Session()
def resource_from_data(self, url, data=None, schema=None):
resource = Resource.from_data(
url=url, data=data, schema=schema, session=self.session)
return resource
def resource_from_response(self, response, schema):
resource = Resource.from_response(
response, session=self.session, schema=schema)
return resource
class ResourceInitTestCase(BaseTestCase):
def test_blocks_init_of_base_class(self):
with self.assertRaises(NotImplementedError):
Resource()
class ResourceTestCase(BaseTestCase):
def setUp(self):
super(ResourceTestCase, self).setUp()
self.data = {
"name": "repos",
"platform": "js",
}
self.raw_schema = {
'type': "object",
'required': ["platform"],
'title': "some title",
'properties': {
u'name': {u'type': u'string'},
u'platform': {u'type': u'string'}
},
'links': [
{
"href": "/apps/{name}/log",
"method": "GET",
"rel": "log"
},
{
"href": "/apps/{name}/env",
"method": "GET",
"rel": "env"
}
]}
self.schema = Schema(
href="url.com", raw_schema=self.raw_schema, session=self.session)
self.url = "http://app.com/content"
self.result = self.resource_from_data(
url=self.url, data=self.data, schema=self.schema)
def test_get_should_returns_a_resource(self):
self.assertIsInstance(self.result, Resource)
def test_missing_attribute(self):
with self.assertRaises(AttributeError):
self.result.not_found
def test_str(self):
self.assertEqual(str(self.data), str(self.result))
def test_data(self):
self.assertEqual(self.data, self.result.data)
def test_iter(self):
iterated = [i for i in self.result]
self.assertEqual(iterated, self.data.keys())
def test_schema(self):
self.assertEqual(self.schema.url, self.result.schema.url)
def test_is_valid_schema_error(self):
old = self.result.schema['required']
try:
self.result.schema['required'] = ["ble"]
self.assertFalse(self.result.is_valid())
finally:
self.result.schema.required = old
def test_is_valid_invalid(self):
data = {
u'doestnotexists': u'repos',
}
result = self.resource_from_data('/url', data=data, schema=self.schema)
self.assertFalse(result.is_valid())
def test_is_valid(self):
self.assertTrue(self.result.is_valid())
def test_resolve_pointer(self):
self.assertEqual(self.result.resolve_pointer("/name"), "repos")
def test_resource_should_be_instance_of_dict(self):
self.assertIsInstance(self.result, dict)
def test_resource_should_be_instance_of_schema(self):
self.assertIsInstance(self.result, Resource)
def test_is_valid_call_validate_with_resolver_instance(self):
with patch('pluct.resources.validate') as mock_validate:
self.result.is_valid()
self.assertTrue(mock_validate.called)
resolver = mock_validate.call_args[-1]['resolver']
self.assertIsInstance(resolver, RefResolver)
http_handler, https_handler = resolver.handlers.values()
self.assertEquals(http_handler, self.result.session_request_json)
self.assertEquals(https_handler, self.result.session_request_json)
def test_session_request_json(self):
mock_request_return = Mock()
with patch.object(self.result.session, 'request') as mock_request:
mock_request.return_value = mock_request_return
self.result.session_request_json(self.url)
self.assertTrue(mock_request.called)
self.assertTrue(mock_request_return.json.called)
class ParseResourceTestCase(BaseTestCase):
def setUp(self):
super(ParseResourceTestCase, self).setUp()
self.item_schema = {
'type': 'object',
'properties': {
'id': {
'type': 'integer'
}
},
'links': [{
"href": "http://localhost/foos/{id}/",
"method": "GET",
"rel": "item",
}]
}
self.raw_schema = {
'title': "title",
'type': "object",
'properties': {
u'objects': {
u'type': u'array',
u'items': self.item_schema,
},
u'values': {
u'type': u'array'
}
}
}
self.schema = Schema(
href="url.com", raw_schema=self.raw_schema, session=self.session)
def test_wraps_array_objects_as_resources(self):
data = {
'objects': [
{'id': 111}
]
}
app = self.resource_from_data(
url="appurl.com", data=data, schema=self.schema)
item = app['objects'][0]
self.assertIsInstance(item, ObjectResource)
self.assertEqual(item.data['id'], 111)
self.assertEqual(item.schema, self.item_schema)
def test_eq_and_ne_operators(self):
data = {
'objects': [
{'id': 111}
]
}
app = self.resource_from_data(
url="appurl.com", data=data, schema=self.schema)
# Uses __eq__
self.assertDictContainsSubset(dict(objects=[dict(id=111)]), app)
# Uses __ne__
self.assertDictEqual(dict(objects=[dict(id=111)]), app)
def test_wraps_array_objects_as_resources_even_without_items_key(self):
data = {
'values': [
{'id': 1}
]
}
resource = self.resource_from_data(
url="appurl.com", data=data, schema=self.schema)
item = resource['values'][0]
self.assertIsInstance(item, Resource)
self.assertEqual(item.data['id'], 1)
@patch("requests.get")
def test_doesnt_wrap_non_objects_as_resources(self, get):
data = {
'values': [
1,
'string',
['array']
]
}
resource_list = self.resource_from_data(
url="appurl.com", data=data, schema=self.schema)
values = resource_list['values']
self.assertEqual(values, data['values'])
class FromResponseTestCase(BaseTestCase):
def setUp(self):
super(FromResponseTestCase, self).setUp()
self._response = Mock()
self._response.url = 'http://example.com'
content_type = 'application/json; profile=http://example.com/schema'
self._response.headers = {
'content-type': content_type
}
self.schema = Schema('/', raw_schema={}, session=self.session)
def test_should_return_resource_from_response(self):
self._response.json.return_value = {}
returned_resource = self.resource_from_response(
self._response, schema=self.schema)
self.assertEqual(returned_resource.url, 'http://example.com')
self.assertEqual(returned_resource.data, {})
def test_should_return_resource_from_response_with_no_json_data(self):
self._response.json = Mock(side_effect=ValueError())
returned_resource = self.resource_from_response(
self._response, schema=self.schema)
self.assertEqual(returned_resource.url, 'http://example.com')
self.assertEqual(returned_resource.data, {})
def test_resource_with_an_array_without_schema(self):
data = {
u'units': [
{u'name': u'someunit'}
],
u'name': u'registry',
}
s = Schema(
href='url',
raw_schema={
'title': 'app schema',
'type': 'object',
'required': ['name'],
'properties': {'name': {'type': 'string'}}
},
session=self.session)
response = self.resource_from_data("url", data, s)
self.assertDictEqual(data, response.data)
class ResourceFromDataTestCase(BaseTestCase):
def test_should_create_array_resource_from_list(self):
data = []
resource = self.resource_from_data('/', data=data)
self.assertIsInstance(resource, ArrayResource)
self.assertEqual(resource.url, '/')
self.assertEqual(resource.data, data)
def test_should_create_object_resource_from_dict(self):
data = {}
resource = self.resource_from_data('/', data=data)
self.assertIsInstance(resource, ObjectResource)
self.assertEqual(resource.url, '/')
self.assertEqual(resource.data, data)
|
|
# Copyright 2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
File: MongoSplitter.py
Author: NYU ITP Team
Description: Will calculate splits for a given collection/database
and store/return them in MongoSplit objects
'''
from pymongo import uri_parser
from split import MongoInputSplit
from mongodisco.mongo_util import get_collection, get_connection, get_database
import logging
import bson
def calculate_splits(config):
"""reads config to find out what type of split to perform"""
#if the user does not specify an inputURI we will need to construct it from
#the db/collection name TODO
uri = config.get("input_uri", "mongodb://localhost/test.in")
config['input_uri'] = uri
uri_info = uri_parser.parse_uri(uri)
#database_name = uri_info['database']
collection_name = uri_info['collection']
db = get_database(uri)
stats = db.command("collstats", collection_name)
is_sharded = False if "sharded" not in stats else stats["sharded"]
use_shards = config.get("use_shards", False)
use_chunks = config.get("use_chunks", False)
slave_ok = config.get("slave_ok", False)
logging.info(" Calculate Splits Code ... Use Shards? - %s\nUse Chunks? \
- %s\nCollection Sharded? - %s" % (use_shards, use_chunks, is_sharded))
logging.info("WRAPP")
logging.info(config)
logging.info("WRAPP")
if config.get("create_input_splits"):
logging.info("Creation of Input Splits is enabled.")
if is_sharded and (use_shards or use_chunks):
if use_shards and use_chunks:
logging.warn("Combining 'use chunks' and 'read from shards \
directly' can have unexpected & erratic behavior in a live \
system due to chunk migrations. ")
logging.info("Sharding mode calculation entering.")
return calculate_sharded_splits(config, use_shards, use_chunks, uri)
# perfectly ok for sharded setups to run with a normally calculated split.
#May even be more efficient for some cases
else:
logging.info("Using Unsharded Split mode \
(Calculating multiple splits though)")
return calculate_unsharded_splits(config, uri)
else:
logging.info("Creation of Input Splits is disabled;\
Non-Split mode calculation entering.")
return calculate_single_split(config)
def calculate_unsharded_splits(config, uri):
"""@todo: Docstring for calculate_unsharded_splits
:returns: @todo
Note: collection_name seems unnecessary --CW
"""
splits = [] # will return this
logging.info("Calculating unsharded splits")
coll = get_collection(uri)
q = {} if not "query" in config else config.get("query")
# create the command to do the splits
# command to split should look like this VV
# SON([('splitVector', u'test.test_data'), ('maxChunkSize', 2),
# ('force', True), ('keyPattern', {'x': 1})])
split_key = config.get('split_key')
split_size = config.get('split_size')
full_name = coll.full_name
logging.info("Calculating unsharded splits on collection %s with Split Key %s" %
(full_name, split_key))
logging.info("Max split size :: %sMB" % split_size)
cmd = bson.son.SON()
cmd["splitVector"] = full_name
cmd["maxChunkSize"] = split_size
cmd["keyPattern"] = split_key
cmd["force"] = False
split_max = config.get('split_max')
split_min = config.get('split_min')
if split_min is not None and split_max is not None:
cmd["min"] = split_min
cmd["max"] = split_max
logging.debug("Issuing Command: %s" % cmd)
data = coll.database.command(cmd)
logging.debug("%r" % data)
# results should look like this
# {u'ok': 1.0, u'splitKeys': [{u'_id': ObjectId('4f49775348d9846c5e582b00')},
# {u'_id': ObjectId('4f49775548d9846c5e58553b')}]}
if data.get("err"):
raise Exception(data.get("err"))
elif data.get("ok") != 1.0:
raise Exception("Unable to calculate splits")
split_data = data.get('splitKeys')
if not split_data:
logging.warning("WARNING: No Input Splits were calculated by the split code. \
Proceeding with a *single* split. Data may be too small, try lowering \
'mongo.input.split_size' if this is undesirable.")
else:
logging.info("Calculated %s splits" % len(split_data))
last_key = split_min
for bound in split_data:
splits.append(_split(config, q, last_key, bound))
last_key = bound
splits.append(_split(config, q, last_key, split_max))
return [s.format_uri_with_query() for s in splits]
def _split(config=None, q={}, min=None, max=None):
""" constructs a split object to be used later
:returns: an actual MongoSplit object
"""
print "_split being created"
query = bson.son.SON()
query["$query"] = q
if min:
query["$min"] = min
if max:
query["$max"] = max
logging.info("Assembled Query: ", query)
return MongoInputSplit(
config.get("input_uri"),
config.get("input_key"),
query,
config.get("fields"),
config.get("sort"),
config.get("limit", 0),
config.get("skip", 0),
config.get("timeout", True),
config.get("slave_ok",False))
def calculate_single_split(config):
splits = []
logging.info("calculating single split")
query = bson.son.SON()
query["$query"] = config.get("query", {})
splits.append(MongoInputSplit(
config.get("input_uri"),
config.get("input_key"),
query,
config.get("fields"),
config.get("sort"),
config.get("limit", 0),
config.get("skip", 0),
config.get("timeout", True),
config.get("slave_ok",False)))
logging.debug("Calculated %d split objects" % len(splits))
logging.debug("Dump of calculated splits ... ")
for s in splits:
logging.debug(" Split: %s" % s.__str__())
return [s.format_uri_with_query() for s in splits]
def calculate_sharded_splits(config, use_shards, use_chunks, uri):
"""Calculates splits fetching them directly from a sharded setup
:returns: A list of sharded splits
"""
splits = []
if use_chunks:
splits = fetch_splits_via_chunks(config, uri, use_shards)
elif use_shards:
logging.warn("Fetching Input Splits directly from shards is potentially \
dangerous for data consistency should migrations occur during the retrieval.")
splits = fetch_splits_from_shards(config, uri)
else:
logging.error("Neither useChunks nor useShards enabled; failed to pick a valid state.")
if splits == None:
logging.error("Failed to create/calculate Input Splits from Shard Chunks; final splits content is 'None'.")
logging.debug("Calculated splits and returning them - splits: %r" % splits)
return splits
def fetch_splits_from_shards(config, uri):
"""Internal method to fetch splits from shareded db
:returns: The splits
"""
logging.warn("WARNING getting splits that connect directly to the backend mongods is risky and might not produce correct results")
connection = get_connection(uri)
configDB = connection["config"]
shardsColl = configDB["shards"]
shardSet = []
splits = []
cur = shardsColl.find()
for row in cur:
host = row.get('host')
slashIndex = host.find("/")
if slashIndex > 0:
host = host[slashIndex + 1:]
shardSet.append(host)
splits = []
for host in shardSet:
new_uri = get_new_URI(uri,host)
config['input_uri'] = new_uri
splits += calculate_unsharded_splits(config,new_uri)
#I think this is better than commented way
return splits
'''
splits.append(MongoInputSplit(new_uri,
config.get("input_key"),
config.get("query"),
config.get("fields"),
config.get("sort"),
config.get("limit", 0),
config.get("skip", 0),
config.get("timeout", True)))
return [s.format_uri_with_query() for s in splits]
'''
def fetch_splits_via_chunks(config, uri, use_shards):
"""Retrieves split objects based on chunks in mongo
:returns: The splits
"""
originalQuery = config.get("query")
if use_shards:
logging.warn("WARNING getting splits that connect directly to the \
backend mongods is risky and might not produce correct results")
logging.debug("fetch_splits_via_chunks: originalQuery: %s" % originalQuery)
connection = get_connection(uri)
configDB = connection["config"]
shardMap = {}
if use_shards:
shardsColl = configDB["shards"]
cur = shardsColl.find()
for row in cur:
host = row.get('host')
slashIndex = host.find("/")
if slashIndex > 0:
host = host[slashIndex + 1:]
shardMap[row.get('_id')] = host
logging.debug("MongoInputFormat.getSplitsUsingChunks(): shard map is: %s" % shardMap)
chunksCollection = configDB["chunks"]
logging.info(configDB.collection_names())
query = bson.son.SON()
uri_info = uri_parser.parse_uri(uri)
query["ns"] = uri_info['database'] + '.' + uri_info['collection']
cur = chunksCollection.find(query)
logging.info("query is ", query)
logging.info(cur.count())
logging.info(chunksCollection.find().count())
numChunks = 0
splits = []
for row in cur:
numChunks += 1
minObj = row.get('min')
shardKeyQuery = bson.son.SON()
min = bson.son.SON()
max = bson.son.SON()
for key in minObj:
tMin = minObj[key]
tMax = (row.get('max'))[key]
#@to-do do type comparison first?
min[key] = tMin
max[key] = tMax
if originalQuery == None:
originalQuery = bson.son.SON()
shardKeyQuery["$query"] = originalQuery
shardKeyQuery["$min"] = min
shardKeyQuery["$max"] = max
inputURI = config.get("input_uri")
if use_shards:
shardName = row.get('shard')
host = shardMap[shardName]
inputURI = get_new_URI(inputURI, host)
splits.append(MongoInputSplit(
inputURI,
config.get("input_key"),
shardKeyQuery,
config.get("fields"),
config.get("sort"),
config.get("limit", 0),
config.get("skip", 0),
config.get("timeout", True),
config.get("slave_ok",False)))
# return splits in uri format for disco
return [s.format_uri_with_query() for s in splits]
def get_new_URI(original_URI, new_URI):
"""
:returns: a new Mongo_URI
"""
MONGO_URI_PREFIX = "mongodb://"
orig_URI_string = original_URI[len(MONGO_URI_PREFIX):]
server_end = -1
server_start = 0
"""to find the last index of / in the original URI string """
idx = orig_URI_string.rfind("/")
if idx < 0:
server_end = len(orig_URI_string)
else:
server_end = idx
idx = orig_URI_string.find("@")
server_start = idx + 1
sb = orig_URI_string[0:server_start] + new_URI + orig_URI_string[server_end:]
ans = MONGO_URI_PREFIX + sb
logging.debug("get_new_URI(): original " + original_URI + " new uri: " + ans)
return ans
|
|
"""
Base class for optimization.
"""
from abc import ABCMeta, abstractmethod
from collections import namedtuple
from copy import deepcopy
from functools import reduce
from string import ascii_letters, digits
from types import MethodType
from boltons.iterutils import pairwise
import numpy as np
from scipy.optimize import Bounds, basinhopping, differential_evolution, minimize, shgo
from .. import Distribution, insert_rvf, modify_outcomes
from ..algorithms.channelcapacity import channel_capacity
from ..exceptions import ditException, OptimizationException
from ..helpers import flatten, normalize_rvs, parse_rvs
from ..math import prod, sample_simplex
from ..utils import partitions, powerset
from ..utils.optimization import (BasinHoppingCallBack,
BasinHoppingInnerCallBack,
Uniquifier,
accept_test,
basinhop_status,
colon,
)
__all__ = (
'BaseOptimizer',
'BaseConvexOptimizer',
'BaseNonConvexOptimizer',
'BaseAuxVarOptimizer',
)
svdvals = lambda m: np.linalg.svd(m, compute_uv=False)
class BaseOptimizer(metaclass=ABCMeta):
"""
Base class for performing optimizations.
"""
def __init__(self, dist, rvs=None, crvs=None, rv_mode=None):
"""
Initialize the optimizer.
Parameters
----------
dist : Distribution
The distribution to compute the intrinsic mutual information of.
rvs : iterable of iterables
The variables of interest.
crvs : iterable
The variables to be conditioned on.
rv_mode : str, None
Specifies how to interpret `rvs` and `crvs`. Valid options are:
{'indices', 'names'}. If equal to 'indices', then the elements of
`crvs` and `rvs` are interpreted as random variable indices. If
equal to 'names', the the elements are interpreted as random
variable names. If `None`, then the value of `dist._rv_mode` is
consulted, which defaults to 'indices'.
"""
rvs, crvs, rv_mode = normalize_rvs(dist, rvs, crvs, rv_mode)
self._dist = dist.copy(base='linear')
self._alphabet = self._dist.alphabet
self._original_shape = list(map(len, self._dist.alphabet))
self._true_rvs = [parse_rvs(self._dist, rv, rv_mode=rv_mode)[1] for rv in rvs]
self._true_crvs = parse_rvs(self._dist, crvs, rv_mode=rv_mode)[1]
self._dist = modify_outcomes(self._dist, tuple)
# compress all random variables down to single vars
self._unqs = []
for var in self._true_rvs + [self._true_crvs]:
unq = Uniquifier()
self._dist = insert_rvf(self._dist, lambda x: (unq(tuple(x[i] for i in var)),))
self._unqs.append(unq)
self._dist.make_dense()
self._full_shape = list(map(len, self._dist.alphabet))
self._full_pmf = self._dist.pmf.reshape(self._full_shape)
self._n = dist.outcome_length()
self._pmf = self._full_pmf.sum(axis=tuple(range(self._n)))
self._shape = self._pmf.shape
self._full_vars = set(range(len(self._full_shape)))
self._all_vars = set(range(len(rvs) + 1))
self._rvs = set(range(len(rvs)))
self._crvs = {len(rvs)}
self._proxy_vars = tuple(range(self._n, self._n + len(rvs) + 1))
self._additional_options = {}
self.constraints = []
###########################################################################
# Required methods in subclasses
@abstractmethod
def construct_initial(self):
"""
Select the default method of generating an initial condition.
Returns
-------
x : np.ndarray
An optimization vector.
"""
pass
@abstractmethod
def _optimization_backend(self, x0, minimizer_kwargs, niter):
"""
Abstract method for performing an optimization.
Parameters
----------
x0 : np.ndarray
An initial optimization vector.
minimizer_kwargs : dict
A dictionary of keyword arguments to pass to the optimizer.
niter : int
If applicable, the number of iterations to make.
Returns
-------
result : OptimizeResult, None
Returns the result of the optimization, or None if it failed.
"""
pass
@abstractmethod
def _objective(self):
"""
Construct the objective function.
Returns
-------
obj : func
The objective function.
"""
pass
###########################################################################
# Various initial conditions
def construct_random_initial(self):
"""
Construct a random optimization vector.
Returns
-------
x : np.ndarray
A random optimization vector.
"""
vec = sample_simplex(self._optvec_size)
return vec
def construct_uniform_initial(self):
"""
Construct a uniform optimization vector.
Returns
-------
x : np.ndarray
A random optimization vector.
"""
vec = np.ones(self._optvec_size) / self._optvec_size
return vec
###########################################################################
# Convenience functions for constructing objectives.
@staticmethod
def _h(p):
"""
Compute the entropy of `p`.
Parameters
----------
p : np.ndarray
A vector of probabilities.
Returns
-------
h : float
The entropy.
"""
return -np.nansum(p * np.log2(p))
def _entropy(self, rvs, crvs=None):
"""
Compute the conditional entropy, :math`H[X|Y]`.
Parameters
----------
rvs : collection
The indices to consider as the X variable.
crvs : collection
The indices to consider as the Y variable.
Returns
-------
h : func
The conditional entropy.
"""
if crvs is None:
crvs = set()
idx_joint = tuple(self._all_vars - (rvs | crvs))
idx_crvs = tuple(self._all_vars - crvs)
def entropy(pmf):
"""
Compute the specified entropy.
Parameters
----------
pmf : np.ndarray
The joint probability distribution.
Returns
-------
h : float
The entropy.
"""
pmf_joint = pmf.sum(axis=idx_joint, keepdims=True)
pmf_crvs = pmf_joint.sum(axis=idx_crvs, keepdims=True)
h_joint = self._h(pmf_joint)
h_crvs = self._h(pmf_crvs)
ch = h_joint - h_crvs
return ch
return entropy
def _mutual_information(self, rv_x, rv_y):
"""
Compute the mutual information, :math:`I[X:Y]`.
Parameters
----------
rv_x : collection
The indices to consider as the X variable.
rv_y : collection
The indices to consider as the Y variable.
Returns
-------
mi : func
The mutual information.
"""
idx_xy = tuple(self._all_vars - (rv_x | rv_y))
idx_x = tuple(self._all_vars - rv_x)
idx_y = tuple(self._all_vars - rv_y)
def mutual_information(pmf):
"""
Compute the specified mutual information.
Parameters
----------
pmf : np.ndarray
The joint probability distribution.
Returns
-------
mi : float
The mutual information.
"""
pmf_xy = pmf.sum(axis=idx_xy, keepdims=True)
pmf_x = pmf_xy.sum(axis=idx_x, keepdims=True)
pmf_y = pmf_xy.sum(axis=idx_y, keepdims=True)
mi = np.nansum(pmf_xy * np.log2(pmf_xy / (pmf_x * pmf_y)))
return mi
return mutual_information
def _conditional_mutual_information(self, rv_x, rv_y, rv_z):
"""
Compute the conditional mutual information, :math:`I[X:Y|Z]`.
Parameters
----------
rv_x : collection
The indices to consider as the X variable.
rv_y : collection
The indices to consider as the Y variable.
rv_z : collection
The indices to consider as the Z variable.
Returns
-------
cmi : func
The conditional mutual information.
"""
if not rv_z:
return self._mutual_information(rv_x=rv_x, rv_y=rv_y)
idx_xyz = tuple(self._all_vars - (rv_x | rv_y | rv_z))
idx_xz = tuple(self._all_vars - (rv_x | rv_z))
idx_yz = tuple(self._all_vars - (rv_y | rv_z))
idx_z = tuple(self._all_vars - rv_z)
def conditional_mutual_information(pmf):
"""
Compute the specified conditional mutual information.
Parameters
----------
pmf : np.ndarray
The joint probability distribution.
Returns
-------
cmi : float
The conditional mutual information.
"""
pmf_xyz = pmf.sum(axis=idx_xyz, keepdims=True)
pmf_xz = pmf_xyz.sum(axis=idx_xz, keepdims=True)
pmf_yz = pmf_xyz.sum(axis=idx_yz, keepdims=True)
pmf_z = pmf_xz.sum(axis=idx_z, keepdims=True)
cmi = np.nansum(pmf_xyz * np.log2(pmf_z * pmf_xyz / pmf_xz / pmf_yz))
return cmi
return conditional_mutual_information
def _coinformation(self, rvs, crvs=None):
"""
Compute the coinformation.
Parameters
----------
rvs : set
The random variables to compute the coinformation of.
crvs : set
The random variables to condition on.
Returns
-------
ci : func
The coinformation.
"""
if crvs is None:
crvs = set()
idx_joint = tuple(self._all_vars - (rvs | crvs))
idx_crvs = tuple(self._all_vars - crvs)
idx_subrvs = [tuple(self._all_vars - set(ss)) for ss in sorted(powerset(rvs), key=len)[1:-1]]
power = [(-1)**len(ss) for ss in sorted(powerset(rvs), key=len)[1:-1]]
power += [(-1)**len(rvs)]
power += [-sum(power)]
def coinformation(pmf):
"""
Compute the specified co-information.
Parameters
----------
pmf : np.ndarray
The joint probability distribution.
Returns
-------
ci : float
The co-information.
"""
pmf_joint = pmf.sum(axis=idx_joint, keepdims=True)
pmf_crvs = pmf_joint.sum(axis=idx_crvs, keepdims=True)
pmf_subrvs = [pmf_joint.sum(axis=idx, keepdims=True) for idx in idx_subrvs] + [pmf_joint, pmf_crvs]
pmf_ci = reduce(np.multiply, [pmf**p for pmf, p in zip(pmf_subrvs, power)])
ci = np.nansum(pmf_joint * np.log2(pmf_ci))
return ci
return coinformation
def _total_correlation(self, rvs, crvs=None):
"""
Compute the total correlation.
Parameters
----------
rvs : set
The random variables to compute the total correlation of.
crvs : set
The random variables to condition on.
Returns
-------
tc : func
The total correlation.
"""
if crvs is None:
crvs = set()
idx_joint = tuple(self._all_vars - (rvs | crvs))
idx_margs = [tuple(self._all_vars - ({rv} | crvs)) for rv in rvs]
idx_crvs = tuple(self._all_vars - crvs)
n = len(rvs) - 1
def total_correlation(pmf):
"""
Compute the specified total correlation.
Parameters
----------
pmf : np.ndarray
The joint probability distribution.
Returns
-------
ci : float
The total correlation.
"""
pmf_joint = pmf.sum(axis=idx_joint, keepdims=True)
pmf_margs = [pmf_joint.sum(axis=marg, keepdims=True) for marg in idx_margs]
pmf_crvs = pmf_margs[0].sum(axis=idx_crvs, keepdims=True)
h_crvs = self._h(pmf_crvs.ravel())
h_margs = sum(self._h(p.ravel()) for p in pmf_margs)
h_joint = self._h(pmf_joint.ravel())
tc = h_margs - h_joint - n * h_crvs
return tc
return total_correlation
def _dual_total_correlation(self, rvs, crvs=None):
"""
Compute the dual total correlation.
Parameters
----------
rvs : set
The random variables to compute the dual total correlation of.
crvs : set
The random variables to condition on.
Returns
-------
dtc : func
The dual total correlation.
"""
if crvs is None:
crvs = set()
idx_joint = tuple(self._all_vars - (rvs | crvs))
idx_margs = [tuple(self._all_vars - ((rvs - {rv}) | crvs)) for rv in rvs]
idx_crvs = tuple(self._all_vars - crvs)
n = len(rvs) - 1
def dual_total_correlation(pmf):
"""
Compute the specified dual total correlation.
Parameters
----------
pmf : np.ndarray
The joint probability distribution.
Returns
-------
ci : float
The dual total correlation.
"""
pmf_joint = pmf.sum(axis=idx_joint, keepdims=True)
pmf_margs = [pmf_joint.sum(axis=marg, keepdims=True) for marg in idx_margs]
pmf_crvs = pmf_joint.sum(axis=idx_crvs, keepdims=True)
h_crvs = self._h(pmf_crvs)
h_joint = self._h(pmf_joint) - h_crvs
h_margs = [self._h(marg) - h_crvs for marg in pmf_margs]
dtc = sum(h_margs) - n * h_joint
return dtc
return dual_total_correlation
def _caekl_mutual_information(self, rvs, crvs=None):
"""
Compute the CAEKL mutual information.
Parameters
----------
rvs : set
The random variables to compute the CAEKL mutual information of.
crvs : set
The random variables to condition on.
Returns
-------
caekl : func
The CAEKL mutual information.
"""
if crvs is None:
crvs = set()
parts = [p for p in partitions(rvs) if len(p) > 1]
idx_parts = {}
for part in parts:
for p in part:
if p not in idx_parts:
idx_parts[p] = tuple(self._all_vars - (p | crvs))
part_norms = [len(part) - 1 for part in parts]
idx_joint = tuple(self._all_vars - (rvs | crvs))
idx_crvs = tuple(self._all_vars - crvs)
def caekl_mutual_information(pmf):
"""
Compute the specified CAEKL mutual information.
Parameters
----------
pmf : np.ndarray
The joint probability distribution.
Returns
-------
caekl : float
The CAEKL mutual information.
"""
pmf_joint = pmf.sum(axis=idx_joint, keepdims=True)
pmf_parts = {p: pmf_joint.sum(axis=idx, keepdims=True) for p, idx in idx_parts.items()}
pmf_crvs = pmf_joint.sum(axis=idx_crvs, keepdims=True)
h_crvs = self._h(pmf_crvs)
h_joint = self._h(pmf_joint) - h_crvs
pairs = zip(parts, part_norms)
candidates = [(sum(self._h(pmf_parts[p]) - h_crvs for p in part) - h_joint) / norm for part, norm in pairs]
caekl = min(candidates)
return caekl
return caekl_mutual_information
def _maximum_correlation(self, rv_x, rv_y):
"""
Compute the maximum correlation.
Parameters
----------
rv_x : collection
The index to consider as the X variable.
rv_y : collection
The index to consider as the Y variable.
Returns
-------
mc : func
The maximum correlation.
"""
idx_xy = tuple(self._all_vars - (rv_x | rv_y))
idx_x = tuple(self._all_vars - rv_x)
idx_y = tuple(self._all_vars - rv_y)
def maximum_correlation(pmf):
"""
Compute the specified maximum correlation.
Parameters
----------
pmf : np.ndarray
The joint probability distribution.
Returns
-------
mi : float
The mutual information.
"""
pmf_xy = pmf.sum(axis=idx_xy)
pmf_x = pmf.sum(axis=idx_x)[:, np.newaxis]
pmf_y = pmf.sum(axis=idx_y)[np.newaxis, :]
Q = pmf_xy / (np.sqrt(pmf_x) * np.sqrt(pmf_y))
Q[np.isnan(Q)] = 0
mc = svdvals(Q)[1]
return mc
return maximum_correlation
def _conditional_maximum_correlation(self, rv_x, rv_y, rv_z):
"""
Compute the conditional maximum correlation.
Parameters
----------
rv_x : collection
The index to consider as the X variable.
rv_y : collection
The index to consider as the Y variable.
rv_z : collection
The index to consider as the Z variable.
Returns
-------
cmc : func
The conditional maximum correlation.
"""
idx_xyz = tuple(self._all_vars - (rv_x | rv_y | rv_z))
idx_xz = tuple(self._all_vars - (rv_x | rv_z))
idx_yz = tuple(self._all_vars - (rv_y | rv_z))
def conditional_maximum_correlation(pmf):
"""
Compute the specified maximum correlation.
Parameters
----------
pmf : np.ndarray
The joint probability distribution.
Returns
-------
mi : float
The mutual information.
"""
p_xyz = pmf.sum(axis=idx_xyz)
p_xz = pmf.sum(axis=idx_xz)[:, np.newaxis, :]
p_yz = pmf.sum(axis=idx_yz)[np.newaxis, :, :]
Q = np.where(p_xyz, p_xyz / (np.sqrt(p_xz * p_yz)), 0)
cmc = max(svdvals(np.squeeze(m))[1] for m in np.dsplit(Q, Q.shape[2]))
return cmc
return conditional_maximum_correlation
def _total_variation(self, rv_x, rv_y):
"""
Compute the total variation, :math:`TV[X||Y]`.
Parameters
----------
rv_x : collection
The indices to consider as the X variable.
rv_y : collection
The indices to consider as the Y variable.
Returns
-------
tv : func
The total variation.
Note
----
The pmfs are assumed to be over the same alphabet.
"""
idx_xy = tuple(self._all_vars - (rv_x | rv_y))
idx_x = tuple(self._all_vars - rv_x)
idx_y = tuple(self._all_vars - rv_y)
def total_variation(pmf):
"""
Compute the specified total variation.
Parameters
----------
pmf : np.ndarray
The joint probability distribution.
Returns
-------
tv : float
The total variation.
"""
pmf_xy = pmf.sum(axis=idx_xy, keepdims=True)
pmf_x = pmf_xy.sum(axis=idx_x)
pmf_y = pmf_xy.sum(axis=idx_y)
tv = abs(pmf_x - pmf_y).sum() / 2
return tv
return total_variation
###########################################################################
# Optimization methods.
def optimize(self, x0=None, niter=None, maxiter=None, polish=1e-6, callback=False):
"""
Perform the optimization.
Parameters
----------
x0 : np.ndarray
An initial optimization vector.
niter : int
The number of optimization iterations to perform.
maxiter : int
The number of steps for an optimization subroutine to perform.
polish : float
The threshold for valid optimization elements. If 0, no polishing is
performed.
callback : bool
Whether to use a callback to track the performance of the optimization.
Generally, this should be False as it adds some significant time to the
optimization.
Returns
-------
result : OptimizeResult
The result of the optimization.
"""
try:
callable(self.objective)
except AttributeError:
self.objective = MethodType(self._objective(), self)
x0 = x0.copy() if x0 is not None else self.construct_initial()
icb = BasinHoppingInnerCallBack() if callback else None
minimizer_kwargs = {'bounds': [(0, 1)] * x0.size,
'callback': icb,
'constraints': self.constraints,
'options': {},
}
try: # pragma: no cover
if callable(self._jacobian):
minimizer_kwargs['jac'] = self._jacobian
else: # compute jacobians for objective, constraints using numdifftools
import numdifftools as ndt
minimizer_kwargs['jac'] = ndt.Jacobian(self.objective)
for const in minimizer_kwargs['constraints']:
const['jac'] = ndt.Jacobian(const['fun'])
except AttributeError:
pass
additions = deepcopy(self._additional_options)
options = additions.pop('options', {})
minimizer_kwargs['options'].update(options)
minimizer_kwargs.update(additions)
if maxiter:
minimizer_kwargs['options']['maxiter'] = maxiter
self._callback = BasinHoppingCallBack(minimizer_kwargs.get('constraints', {}), icb)
result = self._optimization_backend(x0, minimizer_kwargs, niter)
if result:
self._optima = result.x
else: # pragma: no cover
msg = "No optima found."
raise OptimizationException(msg)
if polish:
self._polish(cutoff=polish)
return result
def _optimize_shotgun(self, x0, minimizer_kwargs, niter):
"""
Perform a non-convex optimization. This uses a "shotgun" approach,
minimizing several random initial conditions and selecting the minimal
result.
Parameters
----------
x0 : ndarray, None
Initial optimization vector. If None, use a random vector.
minimizer_kwargs : dict
A dictionary of keyword arguments to pass to the optimizer.
niter : int
If applicable, the number of iterations to make.
Returns
-------
result : OptimizeResult, None
The result of the optimization. Returns None if the optimization
failed.
TODO
----
* Rather than random initial conditions, use latin hypercube sampling.
* Make parallel in some fashion (threads, processes).
"""
if niter is None:
niter = self._default_hops
results = []
if x0 is not None:
res = minimize(fun=self.objective,
x0=x0,
**minimizer_kwargs
)
if res.success:
results.append(res)
niter -= 1
ics = (self.construct_random_initial() for _ in range(niter))
for initial in ics:
res = minimize(fun=self.objective,
x0=initial,
**minimizer_kwargs
)
if res.success:
results.append(res)
try:
result = min(results, key=lambda r: self.objective(r.x))
except ValueError: # pragma: no cover
result = None
return result
def _polish(self, cutoff=1e-6):
"""
Improve the solution found by the optimizer.
Parameters
----------
cutoff : float
Set probabilities lower than this to zero, reducing the total
optimization dimension.
"""
x0 = self._optima.copy()
count = (x0 < cutoff).sum()
x0[x0 < cutoff] = 0
x0[x0 > 1 - cutoff] = 1
lb = np.array([1 if np.isclose(x, 1) else 0 for x in x0])
ub = np.array([0 if np.isclose(x, 0) else 1 for x in x0])
minimizer_kwargs = {
'bounds': Bounds(lb, ub),
'tol': None,
'callback': None,
'constraints': self.constraints,
}
try: # pragma: no cover
if callable(self._jacobian):
minimizer_kwargs['jac'] = self._jacobian
else: # compute jacobians for objective, constraints using numdifftools
import numdifftools as ndt
minimizer_kwargs['jac'] = ndt.Jacobian(self.objective)
for const in minimizer_kwargs['constraints']:
const['jac'] = ndt.Jacobian(const['fun'])
except AttributeError:
pass
res = minimize(
fun=self.objective,
x0=x0,
**minimizer_kwargs
)
if res.success:
self._optima = res.x.copy()
if count < (res.x < cutoff).sum():
self._polish(cutoff=cutoff)
class BaseConvexOptimizer(BaseOptimizer):
"""
Implement convex optimization.
"""
def _optimization_backend(self, x0, minimizer_kwargs, niter):
"""
Perform a convex optimization.
Parameters
----------
x0 : ndarray, None
Initial optimization vector. If None, use a random vector.
minimizer_kwargs : dict
A dictionary of keyword arguments to pass to the optimizer.
niter : int
If applicable, the number of iterations to make.
Returns
-------
result : OptimizeResult, None
The result of the optimization. Returns None if the optimization failed.
"""
if niter is None:
# even though this is convex, there might still be some optimization
# issues, so we use niter > 1.
niter = 2
return self._optimize_shotgun(x0, minimizer_kwargs, niter=niter)
class BaseNonConvexOptimizer(BaseOptimizer):
"""
Implement non-convex optimization.
"""
_shotgun = False
def _optimization_basinhopping(self, x0, minimizer_kwargs, niter):
"""
Perform a non-convex optimization. This uses scipy.optimize.basinhopping,
a simulated annealing-like algorithm.
Parameters
----------
x0 : ndarray, None
Initial optimization vector. If None, use a random vector.
minimizer_kwargs : dict
A dictionary of keyword arguments to pass to the optimizer.
niter : int
If applicable, the number of iterations to make.
Returns
-------
result : OptimizeResult, None
The result of the optimization. Returns None if the optimization failed.
"""
if niter is None:
niter = self._default_hops
if self._shotgun:
res_shotgun = self._optimize_shotgun(x0.copy(), minimizer_kwargs, self._shotgun)
if res_shotgun:
x0 = res_shotgun.x.copy()
else:
res_shotgun = None
result = basinhopping(func=self.objective,
x0=x0,
minimizer_kwargs=minimizer_kwargs,
niter=niter,
accept_test=accept_test,
callback=self._callback,
)
success, _ = basinhop_status(result)
if not success: # pragma: no cover
result = self._callback.minimum() or res_shotgun
return result
def _optimization_diffevo(self, x0, minimizer_kwargs, niter): # pragma: no cover
"""
Parameters
----------
x0 : np.ndarray
An optimization vector.
minimizer_kwargs : dict
A dictionary of keyword arguments to pass to the optimizer.
niter : int
If applicable, the number of iterations to make.
Returns
-------
result : OptimizeResult, None
The result of the optimization. Returns None if the optimization
failed.
"""
if 'constraints' in minimizer_kwargs:
msg = "Differential Evolution can only be used in unconstrained optimization."
raise OptimizationException(msg)
if niter is None:
niter = self._default_hops
result = differential_evolution(func=self.objective,
bounds=minimizer_kwargs['bounds'],
maxiter=minimizer_kwargs['options']['maxiter'],
popsize=niter,
tol=minimizer_kwargs['options']['ftol'],
)
if result.success:
return result
def _optimization_shgo(self, x0, minimizer_kwargs, niter):
"""
Perform a non-convex optimization. This uses the relatively new
scipy.optimize.shgo.
Parameters
----------
x0 : ndarray, None
Initial optimization vector. If None, use a random vector.
minimizer_kwargs : dict
A dictionary of keyword arguments to pass to the optimizer.
niter : int
If applicable, the number of iterations to make.
Returns
-------
result : OptimizeResult, None
The result of the optimization. Returns None if the optimization failed.
"""
if niter is None:
niter = self._default_hops
result = shgo(func=self.objective,
bounds=minimizer_kwargs['bounds'],
constraints=minimizer_kwargs['constraints'],
iters=niter,
)
if result.success: # pragma: no cover
return result
_optimization_backend = _optimization_basinhopping
AuxVar = namedtuple('AuxVar', ['bases', 'bound', 'shape', 'mask', 'size'])
class BaseAuxVarOptimizer(BaseNonConvexOptimizer):
"""
Base class that performs many methods related to optimizing auxiliary variables.
"""
###########################################################################
# Register the auxiliary variables.
def _construct_auxvars(self, auxvars):
"""
Register the auxiliary variables.
Parameters
----------
auxvars : [(tuple, int)]
The bases and bounds for each auxiliary variable.
"""
self._aux_vars = []
for bases, bound in auxvars:
shape = [self._shape[i] for i in bases] + [bound]
mask = np.ones(shape) / bound
self._aux_vars.append(AuxVar(bases, bound, shape, mask, prod(shape)))
self._shape += (bound,)
self._full_shape += (bound,)
self._all_vars |= {len(self._all_vars)}
self._arvs = self._all_vars - (self._rvs | self._crvs)
self._aux_bounds = [av.bound for av in self._aux_vars]
self._optvec_size = sum(av.size for av in self._aux_vars)
self._default_hops = prod(self._aux_bounds)
self._parts = list(pairwise(np.cumsum([0] + [av.size for av in self._aux_vars])))
self._construct_slices()
if len(self._aux_vars) == 1:
self.construct_joint = self._construct_joint_single
def _construct_slices(self):
"""
Construct the slices used to construct the joint pmf.
"""
arvs = sorted(self._arvs)
self._full_slices = []
for i, (auxvar, var) in enumerate(zip(self._aux_vars, arvs)):
relevant_vars = {self._n + b for b in auxvar.bases}
index = sorted(self._full_vars) + [self._n + a for a in arvs[:i + 1]]
var += self._n
self._full_slices.append(tuple(colon if i in relevant_vars | {var} else np.newaxis for i in index))
self._slices = []
for i, (auxvar, var) in enumerate(zip(self._aux_vars, arvs)):
relevant_vars = auxvar.bases
index = sorted(self._rvs | self._crvs | set(arvs[:i + 1]))
self._slices.append(tuple(colon if i in relevant_vars | {var} else np.newaxis for i in index))
###########################################################################
# Constructing the joint distribution.
def _construct_channels(self, x):
"""
Construct the conditional distributions which produce the
auxiliary variables.
Parameters
----------
x : np.ndarray
An optimization vector
Yields
------
channel : np.ndarray
A conditional distribution.
"""
parts = [x[a:b] for a, b in self._parts]
for part, auxvar in zip(parts, self._aux_vars):
channel = part.reshape(auxvar.shape)
channel /= channel.sum(axis=(-1,), keepdims=True)
channel[np.isnan(channel)] = auxvar.mask[np.isnan(channel)]
yield channel
def construct_joint(self, x):
"""
Construct the joint distribution.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
joint : np.ndarray
The joint distribution resulting from the distribution passed
in and the optimization vector.
"""
joint = self._pmf
channels = self._construct_channels(x.copy())
for channel, slc in zip(channels, self._slices):
joint = joint[..., np.newaxis] * channel[slc]
return joint
def _construct_joint_single(self, x):
"""
Construct the joint distribution.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
joint : np.ndarray
The joint distribution resulting from the distribution passed
in and the optimization vector.
"""
_, _, shape, mask, _ = self._aux_vars[0]
channel = x.copy().reshape(shape)
channel /= channel.sum(axis=-1, keepdims=True)
channel[np.isnan(channel)] = mask[np.isnan(channel)]
joint = self._pmf[..., np.newaxis] * channel[self._slices[0]]
return joint
def construct_full_joint(self, x):
"""
Construct the joint distribution.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
joint : np.ndarray
The joint distribution resulting from the distribution passed
in and the optimization vector.
"""
joint = self._full_pmf
channels = self._construct_channels(x.copy())
for channel, slc in zip(channels, self._full_slices):
joint = joint[..., np.newaxis] * channel[slc]
return joint
###########################################################################
# Various initial conditions
def construct_random_initial(self):
"""
Construct a random optimization vector.
Returns
-------
x : np.ndarray
A random optimization vector.
"""
vecs = []
for av in self._aux_vars:
vec = sample_simplex(av.shape[-1], prod(av.shape[:-1]))
vecs.append(vec.ravel())
return np.concatenate(vecs, axis=0)
def construct_uniform_initial(self):
"""
Construct a uniform optimization vector.
Returns
-------
x : np.ndarray
A uniform optimization vector.
"""
vecs = []
for av in self._aux_vars:
vec = np.ones(av.shape) / av.bound
vecs.append(vec.ravel())
return np.concatenate(vecs, axis=0)
def construct_copy_initial(self):
"""
Construct a copy optimization vector.
Returns
-------
x : np.ndarray
A copy optimization vector.
"""
vecs = []
for av in self._aux_vars:
shape = [prod(av.shape[:-1]), av.shape[-1]]
vec = np.eye(*shape) / av.bound
vecs.append(vec.ravel())
return np.concatenate(vecs, axis=0)
def construct_constant_initial(self):
"""
Construct a constant optimization vector.
Returns
-------
x : np.ndarray
A constant optimization vector.
"""
vecs = []
for av in self._aux_vars:
vec = np.zeros(av.shape)
vec[..., 0] = 1
vecs.append(vec.ravel())
return np.concatenate(vecs, axis=0)
# Default to using a random initial condition:
construct_initial = construct_random_initial
###########################################################################
# Construct the optimized distribution.
def construct_distribution(self, x=None, cutoff=1e-5):
"""
Construct the distribution.
Parameters
----------
x : np.ndarray, None
An optimization vector. If None, use `self._optima`.
cutoff : float
Ignore probabilities smaller than this.
Returns
-------
d : Distribution
The original distribution, plus its auxiliary variables.
"""
if x is None:
x = self._optima
alphabets = list(self._alphabet)
try:
# if all outcomes are strings, make new variable strings too.
''.join(flatten(alphabets))
for bound in self._aux_bounds:
alphabets += [(digits + ascii_letters)[:bound]]
string = True
except TypeError:
for bound in self._aux_bounds:
alphabets += [list(range(bound))]
string = False
joint = self.construct_full_joint(x)
outcomes, pmf = zip(*[(o, p) for o, p in np.ndenumerate(joint) if p > cutoff])
# normalize, in case cutoffs removed a significant amount of pmf
pmf = np.asarray(pmf)
pmf /= pmf.sum()
d = Distribution(outcomes, pmf)
mapping = {}
for i, unq in zip(sorted(self._n + i for i in self._rvs | self._crvs), self._unqs):
if len(unq.inverse) > 1:
n = d.outcome_length()
d = insert_rvf(d, lambda o: unq.inverse[o[i]])
mapping[i] = tuple(range(n, n + len(unq.inverse[0])))
new_map = {}
for rv, rvs in zip(sorted(self._rvs), self._true_rvs):
i = rv + self._n
for a, b in zip(rvs, mapping[i]):
new_map[a] = b
mapping = [[(new_map[i] if i in new_map else i) for i in range(len(self._full_shape))
if i not in self._proxy_vars]]
d = d.coalesce(mapping, extract=True)
if string:
try:
d = modify_outcomes(d, lambda o: ''.join(map(str, o)))
except ditException:
pass
return d
###########################################################################
# Constraints which may or may not be useful in certain contexts.
def _constraint_deterministic(self):
"""
Constructor for a constraint which specifies that auxiliary variables
must be a deterministic function of the random variables.
Returns
-------
constraint : func
The constraint function.
"""
entropy = self._entropy(self._arvs, self._rvs | self._crvs)
def constraint(x):
"""
Constraint which ensure that the auxiliary variables are a function
of the random variables.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
"""
pmf = self.construct_joint(x)
return entropy(pmf) ** 2
return constraint
###########################################################################
# TODO: make these works
def _channel_capacity(self, x): # pragma: no cover
"""
Compute the channel capacity of the mapping z -> z_bar.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
ccd : [float]
The channel capacity of each auxiliary variable.
TODO
----
Make this compute the channel capacity of any/all auxvar(s)
"""
ccs = []
for channel in self._construct_channels(x):
ccs.append(channel_capacity(channel)[0])
return ccs
def _post_process(self, style='entropy', minmax='min', niter=10, maxiter=None): # pragma: no cover
"""
Find a solution to the minimization with a secondary property.
Parameters
----------
style : 'entropy', 'channel'
The measure to perform the secondary optimization on. If 'entropy',
the entropy of z_bar is optimized. If 'channel', the channel capacity
of p(z_bar|z) is optimized.
minmax : 'min', 'max'
Whether to minimize or maximize the objective.
niter : int
The number of basin hops to perform.
maxiter : int
The number of minimization steps to perform.
Notes
-----
This seems to not work well. Presumably the channel space, once
restricted to matching the correct objective, is very fragmented.
TODO
----
This should really use some sort of multiobjective optimization.
"""
entropy = self._entropy(self._arvs)
def objective_entropy(x):
"""
Post-process the entropy.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
ent : float
The entropy.
"""
ent = entropy(self.construct_joint(x))
return sign * ent
def objective_channelcapacity(x):
"""
Post-process the channel capacity.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
cc : float
The sum of the channel capacities.
"""
cc = sum(self._channel_capacity(x))
return sign * cc
sign = +1 if minmax == 'min' else -1
if style == 'channel':
objective = objective_channelcapacity
elif style == 'entropy':
objective = objective_entropy
else:
msg = "Style {} is not understood.".format(style)
raise OptimizationException(msg)
true_objective = self.objective(self._optima)
def constraint_match_objective(x):
"""
Constraint to ensure that the new solution is not worse than that
found before.
Parameters
----------
x : np.ndarray
An optimization vector.
Returns
-------
obj : float
The l2 deviation of the current objective from the true.
"""
obj = (self.objective(x) - true_objective)**2
return obj
constraint = [{'type': 'eq',
'fun': constraint_match_objective,
}]
# set up required attributes
try:
self.constraints += constraint
except AttributeError:
self.constraints = constraint
self.__old_objective, self.objective = self.objective, objective
self.optimize(x0=self._optima.copy(), niter=niter, maxiter=maxiter)
# and remove them again.
self.constraints = self.constraints[:-1]
if not self.constraints:
del self.constraints
self.objective = self.__old_objective
del self.__old_objective
|
|
"""Represent the main entry point for the pipeline tool."""
# Copyright (c) 2017 Thomas Lehmann
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
# to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# pylint: too-many-instance-attributes
import sys
import platform
import os
import logging
import multiprocessing
import click
from spline.matrix import Matrix, MatrixProcessData
from spline.pipeline import Pipeline
from spline.components.hooks import Hooks
from spline.components.config import ApplicationOptions
from spline.tools.loader import Loader
from spline.tools.logger import Logger
from spline.tools.filters import find_matrix
from spline.tools.event import Event
from spline.tools.report.collector import Collector
from spline.tools.version import VersionsCheck, VersionsReport
from spline.validation import Validator
class Application(object):
"""Pipeline application."""
def __init__(self, options):
"""
Initialize application with command line options.
Args:
options (ApplicationOptions): given command line options.
"""
self.event = Event.create(__name__)
self.options = options
self.logging_level = logging.DEBUG
self.setup_logging()
self.logger = Logger.get_logger(__name__)
def setup_logging(self):
"""Setup of application logging."""
is_custom_logging = len(self.options.logging_config) > 0
is_custom_logging = is_custom_logging and os.path.isfile(self.options.logging_config)
is_custom_logging = is_custom_logging and not self.options.dry_run
if is_custom_logging:
Logger.configure_by_file(self.options.logging_config)
else:
logging_format = "%(asctime)-15s - %(name)s - %(message)s"
if self.options.dry_run:
logging_format = "%(name)s - %(message)s"
Logger.configure_default(logging_format, self.logging_level)
def validate_document(self, definition):
"""
Validate given pipeline document.
The method is trying to load, parse and validate the spline document.
The validator verifies the Python structure B{not} the file format.
Args:
definition (str): path and filename of a yaml file containing a valid spline definition.
Returns:
dict: loaded and validated spline document.
Note:
if validation fails the application does exit!
See Also:
spline.validation.Validator
"""
initial_document = {}
try:
initial_document = Loader.load(definition)
except RuntimeError as exception:
self.logger.error(str(exception))
sys.exit(1)
document = Validator().validate(initial_document)
if document is None:
self.logger.info("Schema validation for '%s' has failed", definition)
sys.exit(1)
self.logger.info("Schema validation for '%s' succeeded", definition)
return document
def run_matrix(self, matrix_definition, document):
"""
Running pipeline via a matrix.
Args:
matrix_definition (dict): one concrete matrix item.
document (dict): spline document (complete) as loaded from yaml file.
"""
matrix = Matrix(matrix_definition, 'matrix(parallel)' in document)
process_data = MatrixProcessData()
process_data.options = self.options
process_data.pipeline = document['pipeline']
process_data.model = {} if 'model' not in document else document['model']
process_data.hooks = Hooks(document)
return matrix.process(process_data)
def shutdown(self, collector, success):
"""Shutdown of the application."""
self.event.delegate(success)
if collector is not None:
collector.queue.put(None)
collector.join()
if not success:
sys.exit(1)
def run(self, definition):
"""Processing the pipeline."""
self.logger.info("Running with Python %s", sys.version.replace("\n", ""))
self.logger.info("Running on platform %s", platform.platform())
self.logger.info("Current cpu count is %d", multiprocessing.cpu_count())
self.logger.info("Processing pipeline definition '%s'", definition)
document = self.validate_document(definition)
if self.options.validate_only:
self.logger.info("Stopping after validation as requested!")
return []
self.provide_temporary_scripts_path()
versions = VersionsCheck().process(document)
VersionsReport().process(versions)
collector = Application.create_and_run_collector(document, self.options)
matrix = find_matrix(document)
output = []
if len(matrix) == 0:
model = {} if 'model' not in document else document['model']
pipeline = Pipeline(model=model, options=self.options)
pipeline.hooks = Hooks(document)
result = pipeline.process(document['pipeline'])
else:
result = self.run_matrix(matrix, document)
output = result['output']
self.shutdown(collector, success=result['success'])
return output
def provide_temporary_scripts_path(self):
"""When configured trying to ensure that path does exist."""
if len(self.options.temporary_scripts_path) > 0:
if os.path.isfile(self.options.temporary_scripts_path):
self.logger.error("Error: configured script path seems to be a file!")
# it's ok to leave because called before the collector runs
sys.exit(1)
if not os.path.isdir(self.options.temporary_scripts_path):
os.makedirs(self.options.temporary_scripts_path)
@staticmethod
def create_and_run_collector(document, options):
"""Create and run collector process for report data."""
collector = None
if not options.report == 'off':
collector = Collector()
collector.store.configure(document)
Event.configure(collector_queue=collector.queue)
collector.start()
return collector
@click.command()
@click.option('--definition', type=click.Path(exists=True, file_okay=True, dir_okay=False),
metavar='<path/filename>',
default='pipeline.yaml', help="Pipeline definition in yaml format (default: pipeline.yaml)")
@click.option('--tags', type=click.STRING, default='', metavar="tag[,tag,...]",
help="Comma separated list of tags for filtering individual tasks")
@click.option('--matrix-tags', type=click.STRING, default='', metavar="tag[,tag,...]",
help="Comma separated list of tags for filtering individual matrix runs")
@click.option('--validate-only', is_flag=True, default=False,
help="When used validates given pipeline definition only")
@click.option('--logging-config', default="", type=click.STRING,
help="Path and filename of logging configuration")
@click.option('--event-logging', is_flag=True, default=False,
help="When enabled then it does log event details")
@click.option('--dry-run', is_flag=True, default=False,
help="When enabled then no Bash script is executed but shown")
@click.option('--debug', is_flag=True, default=False,
help="When enabled then using 'set -x' for debugging Bash scripts")
@click.option('--strict', is_flag=True, default=False,
help="When enabled then using strict mode for Bash")
@click.option('--report', default='off', type=click.Choice(['off', 'html']),
help="Adjusting report and format (default: off)")
@click.option('--temporary-scripts-path', default='', type=str, metavar='<path>',
help="When not set using system temp path, otherwise defined one")
def main(**kwargs):
"""The Pipeline tool."""
options = ApplicationOptions(**kwargs)
Event.configure(is_logging_enabled=options.event_logging)
application = Application(options)
application.run(options.definition)
if __name__ == "__main__":
main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test suite for XenAPI."""
import functools
import json
import os
import re
import stubout
import ast
from nova import db
from nova import context
from nova import flags
from nova import log as logging
from nova import test
from nova import utils
from nova.compute import instance_types
from nova.compute import power_state
from nova import exception
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import vm_utils
from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
from nova.tests.glance import stubs as glance_stubs
from nova.tests import fake_utils
LOG = logging.getLogger('nova.tests.test_xenapi')
FLAGS = flags.FLAGS
def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
"""
vm_utils.with_vdi_attached_here needs to be stubbed out because it
calls down to the filesystem to attach a vdi. This provides a
decorator to handle that.
"""
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
orig_with_vdi_attached_here = vm_utils.with_vdi_attached_here
vm_utils.with_vdi_attached_here = lambda *x: should_return
function(self, *args, **kwargs)
vm_utils.with_vdi_attached_here = orig_with_vdi_attached_here
return decorated_function
class XenAPIVolumeTestCase(test.TestCase):
"""Unit tests for Volume operations."""
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass')
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
self.values = {'id': 1,
'project_id': self.user_id,
'user_id': 'fake',
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'local_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
def _create_volume(self, size='0'):
"""Create a volume object."""
vol = {}
vol['size'] = size
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['host'] = 'localhost'
vol['availability_zone'] = FLAGS.storage_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
return db.volume_create(self.context, vol)
def test_create_iscsi_storage(self):
"""This shows how to test helper classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = volume_utils.VolumeHelper
helper.XenAPI = session.get_imported_xenapi()
vol = self._create_volume()
info = helper.parse_volume_info(vol['id'], '/dev/sdc')
label = 'SR-%s' % vol['id']
description = 'Test-SR'
sr_ref = helper.create_iscsi_storage(session, info, label, description)
srs = xenapi_fake.get_all('SR')
self.assertEqual(sr_ref, srs[0])
db.volume_destroy(context.get_admin_context(), vol['id'])
def test_parse_volume_info_raise_exception(self):
"""This shows how to test helper classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = volume_utils.VolumeHelper
helper.XenAPI = session.get_imported_xenapi()
vol = self._create_volume()
# oops, wrong mount point!
self.assertRaises(volume_utils.StorageError,
helper.parse_volume_info,
vol['id'],
'/dev/sd')
db.volume_destroy(context.get_admin_context(), vol['id'])
def test_attach_volume(self):
"""This shows how to test Ops classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.values)
vm = xenapi_fake.create_vm(instance.name, 'Running')
result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc')
def check():
# check that the VM has a VBD attached to it
# Get XenAPI record for VBD
vbds = xenapi_fake.get_all('VBD')
vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
self.assertEqual(vm_ref, vm)
check()
def test_attach_volume_raise_exception(self):
"""This shows how to test when exceptions are raised."""
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.values)
xenapi_fake.create_vm(instance.name, 'Running')
self.assertRaises(Exception,
conn.attach_volume,
instance.name,
volume['id'],
'/dev/sdc')
def tearDown(self):
super(XenAPIVolumeTestCase, self).tearDown()
self.stubs.UnsetAll()
def reset_network(*args):
pass
def _find_rescue_vbd_ref(*args):
pass
class XenAPIVMTestCase(test.TestCase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.network = utils.import_object(FLAGS.network_manager)
self.stubs = stubout.StubOutForTesting()
self.flags(xenapi_connection_url='test_url',
xenapi_connection_password='test_pass',
instance_name_template='%d')
xenapi_fake.reset()
xenapi_fake.create_local_srs()
xenapi_fake.create_local_pifs()
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_stream_disk(self.stubs)
stubs.stubout_is_vdi_pv(self.stubs)
self.stubs.Set(vmops.VMOps, 'reset_network', reset_network)
self.stubs.Set(vmops.VMOps, '_find_rescue_vbd_ref',
_find_rescue_vbd_ref)
stubs.stub_out_vm_methods(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
fake_utils.stub_out_utils_execute(self.stubs)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.get_connection(False)
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEquals(instances, [])
def test_get_diagnostics(self):
instance = self._create_instance()
self.conn.get_diagnostics(instance)
def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(vm_ref, vdi_ref):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': 'fake',
'currently_attached': False}
vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
self.stubs.Set(xenapi_fake, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
instance = self._create_instance()
name = "MySnapshot"
self.assertRaises(exception.Error, self.conn.snapshot,
self.context, instance, name)
def test_instance_snapshot(self):
stubs.stubout_instance_snapshot(self.stubs)
instance = self._create_instance()
name = "MySnapshot"
template_vm_ref = self.conn.snapshot(self.context, instance, name)
def ensure_vm_was_torn_down():
vm_labels = []
for vm_ref in xenapi_fake.get_all('VM'):
vm_rec = xenapi_fake.get_record('VM', vm_ref)
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
self.assertEquals(vm_labels, ['1'])
def ensure_vbd_was_torn_down():
vbd_labels = []
for vbd_ref in xenapi_fake.get_all('VBD'):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
self.assertEquals(vbd_labels, ['1'])
def ensure_vdi_was_torn_down():
for vdi_ref in xenapi_fake.get_all('VDI'):
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
name_label = vdi_rec["name_label"]
self.assert_(not name_label.endswith('snapshot'))
def check():
ensure_vm_was_torn_down()
ensure_vbd_was_torn_down()
ensure_vdi_was_torn_down()
check()
def create_vm_record(self, conn, os_type, instance_id=1):
instances = conn.list_instances()
self.assertEquals(instances, [str(instance_id)])
# Get Nova record for VM
vm_info = conn.get_info(instance_id)
# Get XenAPI record for VM
vms = [rec for ref, rec
in xenapi_fake.get_all_records('VM').iteritems()
if not rec['is_control_domain']]
vm = vms[0]
self.vm_info = vm_info
self.vm = vm
def check_vm_record(self, conn, check_injection=False):
# Check that m1.large above turned into the right thing.
instance_type = db.instance_type_get_by_name(conn, 'm1.large')
mem_kib = long(instance_type['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = instance_type['vcpus']
self.assertEquals(self.vm_info['max_mem'], mem_kib)
self.assertEquals(self.vm_info['mem'], mem_kib)
self.assertEquals(self.vm['memory_static_max'], mem_bytes)
self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes)
self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes)
self.assertEquals(self.vm['VCPUs_max'], str(vcpus))
self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus))
# Check that the VM is running according to Nova
self.assertEquals(self.vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to XenAPI.
self.assertEquals(self.vm['power_state'], 'Running')
if check_injection:
xenstore_data = self.vm['xenstore_data']
key = 'vm-data/networking/DEADBEEF0000'
xenstore_value = xenstore_data[key]
tcpip_data = ast.literal_eval(xenstore_value)
self.assertEquals(tcpip_data,
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00'})
def check_vm_params_for_windows(self):
self.assertEquals(self.vm['platform']['nx'], 'true')
self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'})
self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order')
# check that these are not set
self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], '')
self.assertEquals(self.vm['PV_kernel'], '')
self.assertEquals(self.vm['PV_ramdisk'], '')
def check_vm_params_for_linux(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
self.assertEquals(self.vm['PV_args'], '')
self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
# check that these are not set
self.assertEquals(self.vm['PV_kernel'], '')
self.assertEquals(self.vm['PV_ramdisk'], '')
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
def check_vm_params_for_linux_with_external_kernel(self):
self.assertEquals(self.vm['platform']['nx'], 'false')
self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1')
self.assertNotEquals(self.vm['PV_kernel'], '')
self.assertNotEquals(self.vm['PV_ramdisk'], '')
# check that these are not set
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
def _list_vdis(self):
url = FLAGS.xenapi_connection_url
username = FLAGS.xenapi_connection_username
password = FLAGS.xenapi_connection_password
session = xenapi_conn.XenAPISession(url, username, password)
return session.call_xenapi('VDI.get_all')
def _check_vdis(self, start_list, end_list):
for vdi_ref in end_list:
if not vdi_ref in start_list:
self.fail('Found unexpected VDI:%s' % vdi_ref)
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
hostname="test", architecture="x86-64", instance_id=1,
check_injection=False,
create_record=True, empty_dns=False):
stubs.stubout_loopingcall_start(self.stubs)
if create_record:
values = {'id': instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': image_ref,
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
'local_gb': 20,
'instance_type_id': instance_type_id,
'os_type': os_type,
'hostname': hostname,
'architecture': architecture}
instance = db.instance_create(self.context, values)
else:
instance = db.instance_get(self.context, instance_id)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': True},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
if empty_dns:
network_info[0][1]['dns'] = []
self.conn.spawn(self.context, instance, network_info)
self.create_vm_record(self.conn, os_type, instance_id)
self.check_vm_record(self.conn, check_injection)
self.assertTrue(instance.os_type)
self.assertTrue(instance.architecture)
def test_spawn_empty_dns(self):
""""Test spawning with an empty dns list"""
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
empty_dns=True)
self.check_vm_params_for_linux()
def test_spawn_not_enough_memory(self):
self.assertRaises(exception.InsufficientFreeMemory,
self._test_spawn,
1, 2, 3, "4") # m1.xlarge
def test_spawn_fail_cleanup_1(self):
"""Simulates an error while downloading an image.
Verifies that VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
stubs.stubout_fetch_image_glance_disk(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
self._check_vdis(vdi_recs_start, vdi_recs_end)
def test_spawn_fail_cleanup_2(self):
"""Simulates an error while creating VM record.
It verifies that VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
stubs.stubout_create_vm(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
self._check_vdis(vdi_recs_start, vdi_recs_end)
@stub_vm_utils_with_vdi_attached_here
def test_spawn_raw_glance(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_linux(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_swapdisk(self):
# Change the default host_call_plugin to one that'll return
# a swap disk
orig_func = stubs.FakeSessionForVMTests.host_call_plugin
stubs.FakeSessionForVMTests.host_call_plugin = \
stubs.FakeSessionForVMTests.host_call_plugin_swap
try:
# We'll steal the above glance linux test
self.test_spawn_vhd_glance_linux()
finally:
# Make sure to put this back
stubs.FakeSessionForVMTests.host_call_plugin = orig_func
# We should have 2 VBDs.
self.assertEqual(len(self.vm['VBDs']), 2)
# Now test that we have 1.
self.tearDown()
self.setUp()
self.test_spawn_vhd_glance_linux()
self.assertEqual(len(self.vm['VBDs']), 1)
def test_spawn_vhd_glance_windows(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_iso_glance(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_ISO, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_netinject_file(self):
self.flags(flat_injected=True)
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _tee_handler(cmd, **kwargs):
input = kwargs.get('process_input', None)
self.assertNotEqual(input, None)
config = [line.strip() for line in input.split("\n")]
# Find the start of eth0 configuration and check it
index = config.index('auto eth0')
self.assertEquals(config[index + 1:index + 8], [
'iface eth0 inet static',
'address 192.168.0.100',
'netmask 255.255.255.0',
'broadcast 192.168.0.255',
'gateway 192.168.0.1',
'dns-nameservers 192.168.0.1',
''])
self._tee_executed = True
return '', ''
fake_utils.fake_execute_set_repliers([
# Capture the tee .../etc/network/interfaces command
(r'tee.*interfaces', _tee_handler),
])
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK,
check_injection=True)
self.assertTrue(self._tee_executed)
def test_spawn_netinject_xenstore(self):
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
# When mounting, create real files under the mountpoint to simulate
# files in the mounted filesystem
# mount point will be the last item of the command list
self._tmpdir = cmd[len(cmd) - 1]
LOG.debug(_('Creating files in %s to simulate guest agent' %
self._tmpdir))
os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
# Touch the file using open
open(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'), 'w').close()
return '', ''
def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
# Umount would normall make files in the m,ounted filesystem
# disappear, so do that here
LOG.debug(_('Removing simulated guest agent files in %s' %
self._tmpdir))
os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'))
os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
os.rmdir(os.path.join(self._tmpdir, 'usr'))
return '', ''
def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
self._tee_executed = True
return '', ''
fake_utils.fake_execute_set_repliers([
(r'mount', _mount_handler),
(r'umount', _umount_handler),
(r'tee.*interfaces', _tee_handler)])
self._test_spawn(1, 2, 3, check_injection=True)
# tee must not run in this case, where an injection-capable
# guest agent is detected
self.assertFalse(self._tee_executed)
def test_spawn_vlanmanager(self):
self.flags(image_service='nova.image.glance.GlanceImageService',
network_manager='nova.network.manager.VlanManager',
vlan_interface='fake0')
def dummy(*args, **kwargs):
pass
self.stubs.Set(vmops.VMOps, 'create_vifs', dummy)
# Reset network table
xenapi_fake.reset_table('network')
# Instance id = 2 will use vlan network (see db/fakes.py)
ctxt = self.context.elevated()
instance = self._create_instance(2, False)
networks = self.network.db.network_get_all(ctxt)
for network in networks:
self.network.set_network_host(ctxt, network)
self.network.allocate_for_instance(ctxt,
instance_id=2,
host=FLAGS.host,
vpn=None,
instance_type_id=1,
project_id=self.project_id)
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK,
instance_id=2,
create_record=False)
# TODO(salvatore-orlando): a complete test here would require
# a check for making sure the bridge for the VM's VIF is
# consistent with bridge specified in nova db
def test_spawn_with_network_qos(self):
self._create_instance()
for vif_ref in xenapi_fake.get_all('VIF'):
vif_rec = xenapi_fake.get_record('VIF', vif_ref)
self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit')
self.assertEquals(vif_rec['qos_algorithm_params']['kbps'],
str(3 * 1024))
def test_rescue(self):
instance = self._create_instance()
conn = xenapi_conn.get_connection(False)
conn.rescue(self.context, instance, None, [])
def test_unrescue(self):
instance = self._create_instance()
conn = xenapi_conn.get_connection(False)
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(Exception, conn.unrescue, instance, None)
def test_revert_migration(self):
instance = self._create_instance()
class VMOpsMock():
def __init__(self):
self.revert_migration_called = False
def revert_migration(self, instance):
self.revert_migration_called = True
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
conn = xenapi_conn.get_connection(False)
conn._vmops = VMOpsMock()
conn.revert_migration(instance)
self.assertTrue(conn._vmops.revert_migration_called)
def _create_instance(self, instance_id=1, spawn=True):
"""Creates and spawns a test instance."""
stubs.stubout_loopingcall_start(self.stubs)
values = {
'id': instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'local_gb': 20,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
instance = db.instance_create(self.context, values)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
if spawn:
self.conn.spawn(self.context, instance, network_info)
return instance
class XenAPIDiffieHellmanTestCase(test.TestCase):
"""Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = vmops.SimpleDH()
self.bob = vmops.SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
bob_pub = self.bob.get_public()
alice_shared = self.alice.compute_shared(bob_pub)
bob_shared = self.bob.compute_shared(alice_pub)
self.assertEquals(alice_shared, bob_shared)
def _test_encryption(self, message):
enc = self.alice.encrypt(message)
self.assertFalse(enc.endswith('\n'))
dec = self.bob.decrypt(enc)
self.assertEquals(dec, message)
def test_encrypt_simple_message(self):
self._test_encryption('This is a simple message.')
def test_encrypt_message_with_newlines_at_end(self):
self._test_encryption('This message has a newline at the end.\n')
def test_encrypt_many_newlines_at_end(self):
self._test_encryption('Message with lotsa newlines.\n\n\n')
def test_encrypt_newlines_inside_message(self):
self._test_encryption('Message\nwith\ninterior\nnewlines.')
def test_encrypt_with_leading_newlines(self):
self._test_encryption('\n\nMessage with leading newlines.')
def test_encrypt_really_long_message(self):
self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
def tearDown(self):
super(XenAPIDiffieHellmanTestCase, self).tearDown()
class XenAPIMigrateInstance(test.TestCase):
"""Unit test for verifying migration-related actions."""
def setUp(self):
super(XenAPIMigrateInstance, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.flags(target_host='127.0.0.1',
xenapi_connection_url='test_url',
xenapi_connection_password='test_pass')
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': None,
'ramdisk_id': None,
'local_gb': 5,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
fake_utils.stub_out_utils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
stubs.stubout_get_this_vm_uuid(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.values)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
conn = xenapi_conn.get_connection(False)
conn.migrate_disk_and_power_off(instance, '127.0.0.1')
def test_revert_migrate(self):
instance = db.instance_create(self.context, self.values)
self.called = False
self.fake_vm_start_called = False
self.fake_revert_migration_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
def fake_revert_migration(*args, **kwargs):
self.fake_revert_migration_called = True
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize_online", fake_vdi_resize)
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(vmops.VMOps, 'revert_migration', fake_revert_migration)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
conn.finish_migration(self.context, instance,
dict(base_copy='hurr', cow='durr'),
network_info, resize_instance=True)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, True)
conn.revert_migration(instance)
self.assertEqual(self.fake_revert_migration_called, True)
def test_finish_migrate(self):
instance = db.instance_create(self.context, self.values)
self.called = False
self.fake_vm_start_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize_online", fake_vdi_resize)
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
conn.finish_migration(self.context, instance,
dict(base_copy='hurr', cow='durr'),
network_info, resize_instance=True)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, True)
def test_finish_migrate_no_local_storage(self):
tiny_type_id = \
instance_types.get_instance_type_by_name('m1.tiny')['id']
self.values.update({'instance_type_id': tiny_type_id, 'local_gb': 0})
instance = db.instance_create(self.context, self.values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
conn.finish_migration(self.context, instance,
dict(base_copy='hurr', cow='durr'),
network_info, resize_instance=True)
def test_finish_migrate_no_resize_vdi(self):
instance = db.instance_create(self.context, self.values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
'gateway': '192.168.0.1',
'gateway6': 'dead:beef::1',
'ip6s': [{'enabled': '1',
'ip': 'dead:beef::dcad:beff:feef:0',
'netmask': '64'}],
'ips': [{'enabled': '1',
'ip': '192.168.0.100',
'netmask': '255.255.255.0'}],
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
# Resize instance would be determined by the compute call
conn.finish_migration(self.context, instance,
dict(base_copy='hurr', cow='durr'),
network_info, resize_instance=False)
class XenAPIImageTypeTestCase(test.TestCase):
"""Test ImageType class."""
def test_to_string(self):
"""Can convert from type id to type string."""
self.assertEquals(
vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
vm_utils.ImageType.KERNEL_STR)
def test_from_string(self):
"""Can convert from string to type id."""
self.assertEquals(
vm_utils.ImageType.from_string(vm_utils.ImageType.KERNEL_STR),
vm_utils.ImageType.KERNEL)
class XenAPIDetermineDiskImageTestCase(test.TestCase):
"""Unit tests for code that detects the ImageType."""
def setUp(self):
super(XenAPIDetermineDiskImageTestCase, self).setUp()
glance_stubs.stubout_glance_client(self.stubs)
class FakeInstance(object):
pass
self.fake_instance = FakeInstance()
self.fake_instance.id = 42
self.fake_instance.os_type = 'linux'
self.fake_instance.architecture = 'x86-64'
def assert_disk_type(self, disk_type):
ctx = context.RequestContext('fake', 'fake')
dt = vm_utils.VMHelper.determine_disk_image_type(
self.fake_instance, ctx)
self.assertEqual(disk_type, dt)
def test_instance_disk(self):
"""If a kernel is specified, the image type is DISK (aka machine)."""
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_MACHINE
self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
self.assert_disk_type(vm_utils.ImageType.DISK)
def test_instance_disk_raw(self):
"""
If the kernel isn't specified, and we're not using Glance, then
DISK_RAW is assumed.
"""
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
def test_glance_disk_raw(self):
"""
If we're using Glance, then defer to the image_type field, which in
this case will be 'raw'.
"""
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
def test_glance_disk_vhd(self):
"""
If we're using Glance, then defer to the image_type field, which in
this case will be 'vhd'.
"""
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_VHD
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
class CompareVersionTestCase(test.TestCase):
def test_less_than(self):
"""Test that cmp_version compares a as less than b"""
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0)
def test_greater_than(self):
"""Test that cmp_version compares a as greater than b"""
self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0)
def test_equal(self):
"""Test that cmp_version compares a as equal to b"""
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0)
def test_non_lexical(self):
"""Test that cmp_version compares non-lexically"""
self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0)
def test_length(self):
"""Test that cmp_version compares by length as last resort"""
self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0)
class FakeXenApi(object):
"""Fake XenApi for testing HostState."""
class FakeSR(object):
def get_record(self, ref):
return {'virtual_allocation': 10000,
'physical_utilisation': 20000}
SR = FakeSR()
class FakeSession(object):
"""Fake Session class for HostState testing."""
def async_call_plugin(self, *args):
return None
def wait_for_task(self, *args):
vm = {'total': 10,
'overhead': 20,
'free': 30,
'free-computed': 40}
return json.dumps({'host_memory': vm})
def get_xenapi(self):
return FakeXenApi()
class HostStateTestCase(test.TestCase):
"""Tests HostState, which holds metrics from XenServer that get
reported back to the Schedulers."""
def _fake_safe_find_sr(self, session):
"""None SR ref since we're ignoring it in FakeSR."""
return None
def test_host_state(self):
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(vm_utils, 'safe_find_sr', self._fake_safe_find_sr)
host_state = xenapi_conn.HostState(FakeSession())
stats = host_state._stats
self.assertEquals(stats['disk_total'], 10000)
self.assertEquals(stats['disk_used'], 20000)
self.assertEquals(stats['host_memory_total'], 10)
self.assertEquals(stats['host_memory_overhead'], 20)
self.assertEquals(stats['host_memory_free'], 30)
self.assertEquals(stats['host_memory_free_computed'], 40)
|
|
"""Controller interfaces with the Alarm.com API via pyalarmdotcomajax."""
from __future__ import annotations
import asyncio
from datetime import timedelta
import logging
import aiohttp
import async_timeout
from homeassistant.components import persistent_notification
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from pyalarmdotcomajax import ADCController
from pyalarmdotcomajax.const import (
ADCDeviceType,
ADCGarageDoorCommand,
ADCLockCommand,
ADCPartitionCommand,
AuthResult,
)
from pyalarmdotcomajax.errors import (
AuthenticationFailed,
DataFetchFailed,
UnexpectedDataStructure,
)
from custom_components.alarmdotcom.errors import PartialInitialization
from . import const as adci
log = logging.getLogger(__name__)
# TODO: Commands and statuses here and in each platform file are too tightly coupled to pyalarmdotcomajax constants. Should have own constants instead.
class ADCIController:
"""Manages a single Alarm.com instance."""
_coordinator: DataUpdateCoordinator
_alarm: ADCController
def __init__(self, hass: HomeAssistant, config_entry: ConfigEntry | None = None):
"""Initialize the system."""
self.hass: HomeAssistant = hass
self.config_entry: ConfigEntry = config_entry
@property
def devices(self) -> adci.ADCIEntities:
"""Return dictionary of sensors for this controller."""
return self._coordinator.data # type: ignore
@property
def provider_name(self) -> str | None:
"""Return name of Alarm.com provider."""
return result if (result := self._alarm.provider_name) else None
@property
def user_id(self) -> str | None:
"""Return Alarm.com ID for logged-in user."""
return result if (result := self._alarm.user_id) else None
@property
def user_email(self) -> str | None:
"""Return email address for logged-in user."""
return result if (result := self._alarm.user_email) else None
@property
def two_factor_cookie(self) -> str | None:
"""Return email address for logged-in user."""
return result if (result := self._alarm.two_factor_cookie) else None
async def async_login(
self,
username: str,
password: str,
twofactorcookie: str,
forcebypass: adci.ADCIArmingOption | None = None,
noentrydelay: adci.ADCIArmingOption | None = None,
silentarming: adci.ADCIArmingOption | None = None,
) -> AuthResult:
"""Log into Alarm.com."""
try:
self._alarm = ADCController(
username=username,
password=password,
websession=async_get_clientsession(self.hass),
forcebypass=adci.ADCIArmingOption(forcebypass).to_adc
if forcebypass is not None
else adci.ADCIArmingOption.NEVER.value,
noentrydelay=adci.ADCIArmingOption(noentrydelay).to_adc
if noentrydelay is not None
else adci.ADCIArmingOption.NEVER.value,
silentarming=adci.ADCIArmingOption(silentarming).to_adc
if silentarming is not None
else adci.ADCIArmingOption.NEVER.value,
twofactorcookie=twofactorcookie,
)
async with async_timeout.timeout(15):
login_result = await self._alarm.async_login()
return login_result
except ConnectionError as err:
raise UpdateFailed("Error communicating with alarm.com") from err
except AuthenticationFailed as err:
raise ConfigEntryAuthFailed("Invalid account credentials.") from err
except (asyncio.TimeoutError, aiohttp.ClientError):
# Handled by Home Assistant Update Coordinator.
raise
async def async_send_otp(self, code: int) -> None:
"""Submit two-factor authentication code and return two factor authentication cookie."""
try:
await self._alarm.async_submit_otp(
code, f"Home Assistant ({self.hass.config.location_name})"
)
except (DataFetchFailed, AuthenticationFailed):
log.error("OTP submission failed.")
raise
async def async_setup(self, reload: bool = False) -> bool:
"""Set up Alarm.com system instance."""
log.debug(
"%s: Setting up controller with config_entry: %s",
__name__,
self.config_entry.data,
)
if not self.config_entry:
raise PartialInitialization
await self.async_login(
username=self.config_entry.data[adci.CONF_USERNAME],
password=self.config_entry.data[adci.CONF_PASSWORD],
twofactorcookie=self.config_entry.data.get(adci.CONF_2FA_COOKIE),
forcebypass=self.config_entry.options.get(adci.CONF_FORCE_BYPASS),
noentrydelay=self.config_entry.options.get(adci.CONF_NO_DELAY),
silentarming=self.config_entry.options.get(adci.CONF_SILENT_ARM),
)
if not reload:
log.debug("%s: Registered update listener.", __name__)
self.config_entry.async_on_unload(
self.config_entry.add_update_listener(self._async_update_listener)
)
# This is indicitive of a problem. Just flag for now.
if self.config_entry.title is None:
log.error("Config entry has no title.")
# Coordinator manages updates for all Alarmdotcom components
self._coordinator = DataUpdateCoordinator(
self.hass,
log,
# name=self.config_entry.title,
name="alarmdotcom",
update_method=self.async_update,
update_interval=timedelta(minutes=1),
)
# Fetch initial data so we have data when entities subscribe.
# Will throw exception and try again later on failure.
await self._coordinator.async_config_entry_first_refresh()
return True
async def async_update(self) -> adci.ADCIEntities:
"""Pull fresh data from Alarm.com for coordinator."""
if not self.config_entry:
raise PartialInitialization
try:
await self._alarm.async_update()
except (UnexpectedDataStructure, ConnectionError) as err:
log.error(
"%s: Update failed: %s",
__name__,
err,
)
raise UpdateFailed("Error communicating with api.") from err
# TypeError encountered when importing from configuration.yaml using
# a provider that requires 2FA with an account that does not
# have 2FA set up.
except TypeError as err:
raise ConfigEntryAuthFailed(
"Two-factor authentication must be enabled in order to log in with this"
" provider."
) from err
except PermissionError as err:
raise ConfigEntryAuthFailed(
"Account has insufficient permissions."
) from err
# Typically captured during login. Should only be captured here when
# updating after migration from configuration.yaml.
except AuthenticationFailed as err:
raise ConfigEntryAuthFailed("Invalid account credentials.") from err
except (asyncio.TimeoutError, aiohttp.ClientError):
# Handled by Home Assistant Update Coordinator
raise
entity_data: dict = {}
system_ids: set[str] = set()
partition_ids: set[str] = set()
sensor_ids: set[str] = set()
lock_ids: set[str] = set()
garage_door_ids: set[str] = set()
low_battery_ids: set[str] = set()
malfunction_ids: set[str] = set()
log.debug("Processing systems.")
# Process systems
for src_sys in self._alarm.systems:
log.debug("%s: %s", src_sys.id_, src_sys.name)
dest_sys: adci.ADCISystemData = {
"unique_id": src_sys.id_,
"name": src_sys.name,
"malfunction": src_sys.malfunction,
"unit_id": src_sys.unit_id,
"mac_address": src_sys.mac_address,
}
entity_data[src_sys.id_] = dest_sys
system_ids.add(src_sys.id_)
log.debug("Processing partitions.")
# Process partitions
for src_part in self._alarm.partitions:
log.debug("%s: %s", src_part.id_, src_part.name)
dest_part: adci.ADCIPartitionData = {
"unique_id": src_part.id_,
"name": src_part.name,
"state": src_part.state,
"malfunction": src_part.malfunction,
"parent_id": src_part.system_id,
"mac_address": src_part.mac_address,
"raw_state_text": src_part.raw_state_text,
"desired_state": src_part.desired_state,
"uncleared_issues": src_part.uncleared_issues,
"async_arm_away_callback": src_part.async_alarm_arm_away,
"async_arm_stay_callback": src_part.async_alarm_arm_stay,
"async_disarm_callback": src_part.async_alarm_disarm,
"async_arm_night_callback": src_part.async_alarm_arm_night,
}
entity_data[src_part.id_] = dest_part
partition_ids.add(src_part.id_)
log.debug("Processing sensors.")
# Process sensors
for src_sensor in self._alarm.sensors:
log.debug("%s: %s", src_sensor.id_, src_sensor.name)
dest_sensor: adci.ADCISensorData = {
"unique_id": src_sensor.id_,
"name": src_sensor.name,
"state": src_sensor.state,
"malfunction": src_sensor.malfunction,
"parent_id": src_sensor.partition_id,
"battery_low": src_sensor.battery_low or src_sensor.battery_critical,
"mac_address": src_sensor.mac_address,
"raw_state_text": src_sensor.raw_state_text,
"device_subtype": src_sensor.device_subtype,
}
entity_data[src_sensor.id_] = dest_sensor
sensor_ids.add(src_sensor.id_)
log.debug("Processing locks.")
# Process locks
for src_lock in self._alarm.locks:
log.debug("%s: %s", src_lock.id_, src_lock.name)
dest_lock: adci.ADCILockData = {
"unique_id": src_lock.id_,
"name": src_lock.name,
"state": src_lock.state,
"malfunction": src_lock.malfunction,
"parent_id": src_lock.partition_id,
"battery_low": src_lock.battery_low or src_lock.battery_critical,
"mac_address": src_lock.mac_address,
"raw_state_text": src_lock.raw_state_text,
"desired_state": src_lock.desired_state,
"async_lock_callback": src_lock.async_lock,
"async_unlock_callback": src_lock.async_unlock,
}
entity_data[src_lock.id_] = dest_lock
lock_ids.add(src_lock.id_)
log.debug("Processing garage doors.")
# Process garage doors
for src_garage in self._alarm.garage_doors:
log.debug("%s: %s", src_garage.id_, src_garage.name)
dest_garage: adci.ADCIGarageDoorData = {
"unique_id": src_garage.id_,
"name": src_garage.name,
"state": src_garage.state,
"malfunction": src_garage.malfunction,
"parent_id": src_garage.partition_id,
"mac_address": src_garage.mac_address,
"raw_state_text": src_garage.raw_state_text,
"desired_state": src_garage.desired_state,
"async_close_callback": src_garage.async_close,
"async_open_callback": src_garage.async_open,
}
entity_data[src_garage.id_] = dest_garage
garage_door_ids.add(src_garage.id_)
log.debug("Processing low battery sensors.")
# Process "virtual" battery sensors for sensors and locks.
for parent_id in sensor_ids.union(lock_ids):
battery_parent: adci.ADCISensorData | adci.ADCILockData = entity_data[
parent_id
]
dest_batt: adci.ADCISensorData = {
"unique_id": f"{battery_parent.get('unique_id')}_low_battery",
"name": f"{battery_parent.get('name')}: Battery",
"state": battery_parent.get("battery_low"),
"parent_id": battery_parent["unique_id"],
}
log.debug("%s: %s", dest_batt.get("unique_id"), dest_batt.get("name"))
entity_data[dest_batt["unique_id"]] = dest_batt
low_battery_ids.add(dest_batt["unique_id"])
log.debug("Processing malfunction sensors.")
# Process "virtual" malfunction sensors for sensors, locks, and partitions.
for parent_id in sensor_ids.union(lock_ids, partition_ids):
malfunction_parent: adci.ADCISensorData | adci.ADCILockData | adci.ADCIPartitionData = entity_data[
parent_id
]
dest_malfunction: adci.ADCISensorData = {
"unique_id": f"{malfunction_parent.get('unique_id')}_malfunction",
"name": f"{malfunction_parent.get('name')}: Malfunction",
"parent_id": malfunction_parent["unique_id"],
"state": malfunction_parent.get("malfunction"),
}
log.debug(
"%s: %s",
dest_malfunction.get("unique_id"),
dest_malfunction.get("name"),
)
entity_data[dest_malfunction["unique_id"]] = dest_malfunction
malfunction_ids.add(dest_malfunction["unique_id"])
# Load objects to devices dict:
devices: adci.ADCIEntities = {
"entity_data": entity_data,
"system_ids": system_ids,
"partition_ids": partition_ids,
"sensor_ids": sensor_ids,
"lock_ids": lock_ids,
"garage_door_ids": garage_door_ids,
"low_battery_ids": low_battery_ids,
"malfunction_ids": malfunction_ids,
}
return devices
async def _async_update_listener(
self, hass: HomeAssistant, entry: ConfigEntry
) -> None:
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
# #
# Actions
# #
async def _async_send_action(
self,
entity_id: str,
action: ADCPartitionCommand | ADCLockCommand | ADCGarageDoorCommand,
device_type: ADCDeviceType,
) -> bool:
"""Send action request to API & handle errors."""
if not self.config_entry:
raise PartialInitialization
try:
response: bool = await self._alarm.async_send_action(
device_type=device_type, event=action, device_id=entity_id
)
except PermissionError as err:
log.error("%s: Inadequate permissions. %s", __name__, str(err))
# TODO: This notification needs work. Actions should be user-readable. Device type should be a HA device type (alarm control panel instead of partition). Device name should be shown.
error_msg = (
"Your Alarm.com user does not have permission to"
f" {action.value.lower()} your {device_type.name.lower()}. Please log"
" in to Alarm.com to grant the appropriate permissions to your"
" account."
)
persistent_notification.async_create(
self.hass,
error_msg,
title="Alarm.com Error",
notification_id="alarmcom_permission_error",
)
return False
await self._coordinator.async_refresh()
return response
async def async_lock_action(self, entity_id: str, action: ADCLockCommand) -> bool:
"""Do something with a lock."""
if not self.config_entry:
raise PartialInitialization
if entity_id not in self.devices.get("lock_ids", {}) or action not in [
ADCLockCommand.LOCK,
ADCLockCommand.UNLOCK,
]:
return False
return await self._async_send_action(entity_id, action, ADCDeviceType.LOCK)
async def async_partition_action(
self, entity_id: str, action: ADCPartitionCommand
) -> bool:
"""Do something with a partition."""
if not self.config_entry:
raise PartialInitialization
if entity_id not in self.devices.get("partition_ids", {}) or action not in [
ADCPartitionCommand.ARM_STAY,
ADCPartitionCommand.DISARM,
ADCPartitionCommand.ARM_NIGHT,
ADCPartitionCommand.ARM_AWAY,
]:
return False
return await self._async_send_action(entity_id, action, ADCDeviceType.PARTITION)
async def async_garage_door_action(self, entity_id: str, action: str) -> bool:
"""Do something with a garage door."""
if not self.config_entry:
raise PartialInitialization
if entity_id not in self.devices.get("garage_door_ids", {}) or action not in [
ADCGarageDoorCommand.OPEN,
ADCGarageDoorCommand.CLOSE,
]:
return False
return await self._async_send_action(
entity_id, action, ADCDeviceType.GARAGE_DOOR
)
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import mock
from oslo_utils.fixture import uuidsentinel as uuids
from nova import objects
from nova.objects import fields
from nova.scheduler.filters import numa_topology_filter
from nova import test
from nova.tests.unit.scheduler import fakes
class TestNUMATopologyFilter(test.NoDBTestCase):
def setUp(self):
super(TestNUMATopologyFilter, self).setUp()
self.filt_cls = numa_topology_filter.NUMATopologyFilter()
def _get_spec_obj(self, numa_topology, network_metadata=None):
image_meta = objects.ImageMeta(properties=objects.ImageMetaProps())
spec_obj = objects.RequestSpec(numa_topology=numa_topology,
pci_requests=None,
instance_uuid=uuids.fake,
flavor=objects.Flavor(extra_specs={}),
image=image_meta)
if network_metadata:
spec_obj.network_metadata = network_metadata
return spec_obj
def test_numa_topology_filter_pass(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
spec_obj = self._get_spec_obj(numa_topology=instance_topology)
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1.5})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_numa_instance_no_numa_host_fail(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
spec_obj = self._get_spec_obj(numa_topology=instance_topology)
host = fakes.FakeHostState('host1', 'node1', {'pci_stats': None})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_numa_host_no_numa_instance_pass(self):
spec_obj = self._get_spec_obj(numa_topology=None)
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_fail_fit(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([2]), memory=512),
objects.InstanceNUMACell(id=2, cpuset=set([3]), memory=512)
])
spec_obj = self._get_spec_obj(numa_topology=instance_topology)
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1.5})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_fail_memory(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]),
memory=1024),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
spec_obj = self._get_spec_obj(numa_topology=instance_topology)
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_fail_cpu(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3, 4, 5]),
memory=512)])
spec_obj = self._get_spec_obj(numa_topology=instance_topology)
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 1,
'ram_allocation_ratio': 1.5})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_pass_set_limit(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
spec_obj = self._get_spec_obj(numa_topology=instance_topology)
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 21,
'ram_allocation_ratio': 1.3})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
limits = host.limits['numa_topology']
self.assertEqual(limits.cpu_allocation_ratio, 21)
self.assertEqual(limits.ram_allocation_ratio, 1.3)
@mock.patch('nova.objects.instance_numa_topology.InstanceNUMACell'
'.cpu_pinning_requested',
return_value=True)
def _do_test_numa_topology_filter_cpu_policy(
self, numa_topology, cpu_policy, cpu_thread_policy, passes,
mock_pinning_requested):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)
])
spec_obj = objects.RequestSpec(numa_topology=instance_topology,
pci_requests=None,
instance_uuid=uuids.fake)
extra_specs = [
{},
{
'hw:cpu_policy': cpu_policy,
'hw:cpu_thread_policy': cpu_thread_policy,
}
]
image_props = [
{},
{
'hw_cpu_policy': cpu_policy,
'hw_cpu_thread_policy': cpu_thread_policy,
}
]
host = fakes.FakeHostState('host1', 'node1', {
'numa_topology': numa_topology,
'pci_stats': None,
'cpu_allocation_ratio': 1,
'ram_allocation_ratio': 1.5})
assertion = self.assertTrue if passes else self.assertFalse
# test combinations of image properties and extra specs
for specs, props in itertools.product(extra_specs, image_props):
# ...except for the one where no policy is specified
if specs == props == {}:
continue
fake_flavor = objects.Flavor(memory_mb=1024, extra_specs=specs)
fake_image_props = objects.ImageMetaProps(**props)
fake_image = objects.ImageMeta(properties=fake_image_props)
spec_obj.image = fake_image
spec_obj.flavor = fake_flavor
assertion(self.filt_cls.host_passes(host, spec_obj))
self.assertIsNone(spec_obj.numa_topology.cells[0].cpu_pinning)
def test_numa_topology_filter_fail_cpu_thread_policy_require(self):
cpu_policy = fields.CPUAllocationPolicy.DEDICATED
cpu_thread_policy = fields.CPUThreadAllocationPolicy.REQUIRE
numa_topology = fakes.NUMA_TOPOLOGY
self._do_test_numa_topology_filter_cpu_policy(
numa_topology, cpu_policy, cpu_thread_policy, False)
def test_numa_topology_filter_pass_cpu_thread_policy_require(self):
cpu_policy = fields.CPUAllocationPolicy.DEDICATED
cpu_thread_policy = fields.CPUThreadAllocationPolicy.REQUIRE
for numa_topology in fakes.NUMA_TOPOLOGIES_W_HT:
self._do_test_numa_topology_filter_cpu_policy(
numa_topology, cpu_policy, cpu_thread_policy, True)
def test_numa_topology_filter_pass_cpu_thread_policy_others(self):
cpu_policy = fields.CPUAllocationPolicy.DEDICATED
cpu_thread_policy = fields.CPUThreadAllocationPolicy.PREFER
numa_topology = fakes.NUMA_TOPOLOGY
for cpu_thread_policy in [
fields.CPUThreadAllocationPolicy.PREFER,
fields.CPUThreadAllocationPolicy.ISOLATE]:
self._do_test_numa_topology_filter_cpu_policy(
numa_topology, cpu_policy, cpu_thread_policy, True)
def test_numa_topology_filter_pass_mempages(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([3]),
memory=128, pagesize=4),
objects.InstanceNUMACell(id=1, cpuset=set([1]),
memory=128, pagesize=16)
])
spec_obj = self._get_spec_obj(numa_topology=instance_topology)
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1.5})
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_fail_mempages(self):
instance_topology = objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(id=0, cpuset=set([3]),
memory=128, pagesize=8),
objects.InstanceNUMACell(id=1, cpuset=set([1]),
memory=128, pagesize=16)
])
spec_obj = self._get_spec_obj(numa_topology=instance_topology)
host = fakes.FakeHostState('host1', 'node1',
{'numa_topology': fakes.NUMA_TOPOLOGY,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1.5})
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
def _get_fake_host_state_with_networks(self):
network_a = objects.NetworkMetadata(physnets=set(['foo', 'bar']),
tunneled=False)
network_b = objects.NetworkMetadata(physnets=set(), tunneled=True)
host_topology = objects.NUMATopology(cells=[
objects.NUMACell(id=1, cpuset=set([1, 2]), memory=2048,
cpu_usage=2, memory_usage=2048, mempages=[],
siblings=[set([1]), set([2])],
pinned_cpus=set([]),
network_metadata=network_a),
objects.NUMACell(id=2, cpuset=set([3, 4]), memory=2048,
cpu_usage=2, memory_usage=2048, mempages=[],
siblings=[set([3]), set([4])],
pinned_cpus=set([]),
network_metadata=network_b)])
return fakes.FakeHostState('host1', 'node1', {
'numa_topology': host_topology,
'pci_stats': None,
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1.5})
def test_numa_topology_filter_pass_networks(self):
host = self._get_fake_host_state_with_networks()
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512),
objects.InstanceNUMACell(id=1, cpuset=set([3]), memory=512)])
network_metadata = objects.NetworkMetadata(
physnets=set(['foo']), tunneled=False)
spec_obj = self._get_spec_obj(numa_topology=instance_topology,
network_metadata=network_metadata)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
# this should pass because while the networks are affined to different
# host NUMA nodes, our guest itself has multiple NUMA nodes
network_metadata = objects.NetworkMetadata(
physnets=set(['foo', 'bar']), tunneled=True)
spec_obj = self._get_spec_obj(numa_topology=instance_topology,
network_metadata=network_metadata)
self.assertTrue(self.filt_cls.host_passes(host, spec_obj))
def test_numa_topology_filter_fail_networks(self):
host = self._get_fake_host_state_with_networks()
instance_topology = objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(id=0, cpuset=set([1]), memory=512)])
# this should fail because the networks are affined to different host
# NUMA nodes but our guest only has a single NUMA node
network_metadata = objects.NetworkMetadata(
physnets=set(['foo']), tunneled=True)
spec_obj = self._get_spec_obj(numa_topology=instance_topology,
network_metadata=network_metadata)
self.assertFalse(self.filt_cls.host_passes(host, spec_obj))
|
|
import pygame, random, sys, copy
from pygame.locals import *
from collections import deque
# initialize pygame
pygame.init()
gameClock = pygame.time.Clock()
# set up main window
WINDOWWIDTH = 300
WINDOWHEIGHT = 300
windowSurface = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT), 0, 32)
pygame.display.set_caption('snake')
# set game area
gameArea = pygame.Rect(17, 17, 266, 266) # game area is set up as a 12x12 grid
# set up colours
BLACK = (0, 0, 0)
GRAY = (200, 200, 200)
WHITE = (255, 255, 255)
# set up directions
UP = 1
DOWN = 2
LEFT = 3
RIGHT = 4
# set up snake
SNAKESIZE = 20
STARTINGX = gameArea.left + 178
STARTINGY = gameArea.top + 134
INITLENGTH = 3
INITDIR = LEFT
SNAKEMS = SNAKESIZE + 2
# set font
smallFont = pygame.font.SysFont(None, 12)
medFont = pygame.font.SysFont(None, 18)
largeFont = pygame.font.SysFont(None, 24)
# set initial text
name = smallFont.render('By YS Chua', True, WHITE)
nameRect = name.get_rect()
nameRect.top = gameArea.bottom + 2
nameRect.right = gameArea.right
instr = smallFont.render('Press keys 1-5 to change level', True, WHITE)
instrRect = instr.get_rect()
instrRect.centerx = gameArea.centerx
instrRect.bottom = gameArea.top - 2
score = smallFont.render('Score: 0', True, WHITE)
scoreRect = score.get_rect()
scoreRect.top = gameArea.bottom + 2
scoreRect.centerx = gameArea.centerx
level = smallFont.render('Level: 1', True, WHITE)
levelRect = level.get_rect()
levelRect.top = gameArea.bottom + 2
levelRect.left = gameArea.left
def cloneList(someList):
# creates an object clone for each object in the list
clone = []
for item in someList:
clone.append(copy.copy(item))
return clone
def createFood(x, y):
# create a food on 12x12 grid position x, y
gameAreaX = gameArea.left + 7 + 22 * (x - 1)
gameAreaY = gameArea.top + 7 + 22 * (y - 1)
return pygame.Rect(gameAreaX, gameAreaY, 10, 10)
def createRandomFood(snakeBody):
# create a food on random location on game area
rerollFood = True
while rerollFood:
# this is to ensure no food is created under snake
rerollFood = False
foodItem = createFood(random.randint(1, 12), random.randint(1, 12))
for snakePart in snakeBody:
if snakePart.colliderect(foodItem):
rerollFood = True
return foodItem
def reset():
# initialize snake
global gameOverStatus, snakeBody, snakeDir, snakeHead, food, consumedFoodQ, nextFood, scoreCounter
scoreCounter = 0
gameOverStatus = False
snakeBody = []
food = [] # list that always contains only one food rect
consumedFoodQ = deque([]) # queue containing coordinates of consumed foods that have yet to be added to snake's length
nextFood = () # coordinates of next food to be consumed in queue
snakeDir = INITDIR
snakeBody.append(pygame.Rect(STARTINGX, STARTINGY, SNAKESIZE, SNAKESIZE))
snakeHead = snakeBody[0]
for i in range(1, INITLENGTH):
snakeBody.append(pygame.Rect(STARTINGX + i * (SNAKESIZE + 2), STARTINGY, SNAKESIZE, SNAKESIZE))
food.append(createRandomFood(snakeBody))
reset()
gameSpeed = 2 # initial speed
# game loop
while True:
# event handling
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_UP:
snakeDir = UP
elif event.key == K_DOWN:
snakeDir = DOWN
elif event.key == K_RIGHT:
snakeDir = RIGHT
elif event.key == K_LEFT:
snakeDir = LEFT
if event.key == K_1:
gameSpeed = 2
elif event.key == K_2:
gameSpeed = 4
elif event.key == K_3:
gameSpeed = 6
elif event.key == K_4:
gameSpeed = 8
elif event.key == K_5:
gameSpeed = 10
if event.type == KEYUP:
if event.key == K_SPACE:
reset()
if not gameOverStatus:
# store current copy of snake
snakeClone = cloneList(snakeBody)
# movement
if snakeDir == UP:
if snakeHead.left == snakeBody[1].left and snakeHead.top == (snakeBody[1].bottom + 2):
# prevent going to opposite direction, causing collision
snakeHead.top += SNAKEMS # DOWN
else:
snakeHead.top -= SNAKEMS # UP
elif snakeDir == DOWN:
if snakeHead.left == snakeBody[1].left and snakeHead.bottom == (snakeBody[1].top - 2):
snakeHead.top -= SNAKEMS # UP
else:
snakeHead.top += SNAKEMS # DOWN
elif snakeDir == RIGHT:
if snakeHead.right == (snakeBody[1].left - 2) and snakeHead.top == snakeBody[1].top:
snakeHead.right -= SNAKEMS # LEFT
else:
snakeHead.right += SNAKEMS # RIGHT
elif snakeDir == LEFT:
if snakeHead.left == (snakeBody[1].right + 2) and snakeHead.top == snakeBody[1].top:
snakeHead.right += SNAKEMS # RIGHT
else:
snakeHead.right -= SNAKEMS # LEFT
for i in range(len(snakeBody) - 1):
# this is the snake body following the head
snakeBody[i + 1].x = snakeClone[i].x
snakeBody[i + 1].y = snakeClone[i].y
# collision detection
for i in range(len(snakeBody) - 1):
# collision with snake body
if snakeHead.colliderect(snakeBody[i + 1]):
gameOverStatus = True
break
if not gameArea.collidepoint(snakeHead.center):
# collision with border
gameOverStatus = True
if snakeHead.colliderect(food[0]):
# collision with food
consumedFoodQ.append((food[0].centerx, food[0].centery))
food.remove(food[0])
scoreCounter += 1
# create new food if previous was consumed
if not food:
food.append(createRandomFood(snakeBody))
# check to add length to snake
if len(consumedFoodQ) != 0 or nextFood != ():
if nextFood == ():
nextFood = consumedFoodQ.popleft()
if snakeClone[-1].centerx == nextFood[0] and snakeClone[-1].centery == nextFood[1]:
snakeBody.append(pygame.Rect(snakeClone[-1].x, snakeClone[-1].y, SNAKESIZE, SNAKESIZE))
nextFood = ()
# update text
score = smallFont.render('Score: %s ' % scoreCounter, True, WHITE)
level = smallFont.render('Level: %d ' % (gameSpeed / 2), True, WHITE)
if not gameOverStatus:
# draw background
windowSurface.fill(BLACK)
windowSurface.blit(name, nameRect)
windowSurface.blit(instr, instrRect)
windowSurface.blit(score, scoreRect)
windowSurface.blit(level, levelRect)
pygame.draw.rect(windowSurface, WHITE, gameArea, 1)
# draw food
pygame.draw.rect(windowSurface, WHITE, food[0])
# draw snake
for snakePart in snakeBody:
pygame.draw.rect(windowSurface, WHITE, snakePart)
pygame.draw.rect(windowSurface, GRAY, snakeHead)
# draw window
pygame.display.update()
# game speed
gameClock.tick(gameSpeed)
if gameOverStatus:
# display game over text
gameOver = largeFont.render(' Game Over! ', True, WHITE, BLACK)
startNewGame = medFont.render(' Press space to start a new game ', True, WHITE, BLACK)
gameOverRect = gameOver.get_rect()
startNewGameRect = startNewGame.get_rect()
gameOverRect.centerx = gameArea.centerx
gameOverRect.bottom = gameArea.centery
startNewGameRect.centerx = gameArea.centerx
startNewGameRect.top = gameArea.centery
windowSurface.blit(gameOver, gameOverRect)
windowSurface.blit(startNewGame, startNewGameRect)
pygame.display.update()
gameClock.tick(2)
|
|
#! /usr/bin/env python3
"""
Counterparty setup script - works under Ubuntu Linux and Windows at the present moment
"""
import os
import sys
import getopt
import logging
import shutil
import zipfile
import platform
import tempfile
import subprocess
import stat
import string
import random
import tarfile
import urllib
import urllib.request
try: #ignore import errors on windows
import pwd
import grp
except ImportError:
pass
from setup_util import *
PYTHON3_VER = None
DEFAULT_CONFIG = "[Default]\nbackend-rpc-connect=localhost\nbackend-rpc-port=8332\nbackend-rpc-user=rpc\nbackend-rpc-password=1234\nrpc-host=localhost\nrpc-port=4000\nrpc-user=rpc\nrpc-password=xcppw1234"
DEFAULT_CONFIG_TESTNET = "[Default]\nbackend-rpc-connect=localhost\nbackend-rpc-port=18332\nbackend-rpc-user=rpc\nbackend-rpc-password=1234\nrpc-host=localhost\nrpc-port=14000\nrpc-user=rpc\nrpc-password=xcppw1234\ntestnet=1"
DEFAULT_CONFIG_COUNTERBLOCKD = "[Default]\nbackend-rpc-connect=localhost\nbackend-rpc-port=8332\nbackend-rpc-user=rpc\nbackend-rpc-password=rpcpw1234\ncounterpartyd-rpc-host=localhost\ncounterpartyd-rpc-port=4000\ncounterpartyd-rpc-user=rpc\ncounterpartyd-rpc-password=xcppw1234\nrpc-host=0.0.0.0\nsocketio-host=0.0.0.0\nsocketio-chat-host=0.0.0.0\nredis-enable-apicache=0"
DEFAULT_CONFIG_COUNTERBLOCKD_TESTNET = "[Default]\nbackend-rpc-connect=localhost\nbackend-rpc-port=18332\nbackend-rpc-user=rpc\nbackend-rpc-password=1234\ncounterpartyd-rpc-host=localhost\ncounterpartyd-rpc-port=14000\ncounterpartyd-rpc-user=rpc\ncounterpartyd-rpc-password=xcppw1234\nrpc-host=0.0.0.0\nsocketio-host=0.0.0.0\nsocketio-chat-host=0.0.0.0\nredis-enable-apicache=0\ntestnet=1"
def _get_app_cfg_paths(appname, run_as_user):
import appdirs #installed earlier
cfg_path = os.path.join(
appdirs.user_data_dir(appauthor='Counterparty', appname=appname, roaming=True) \
if os.name == "nt" else ("%s/.config/%s" % (os.path.expanduser("~%s" % run_as_user), appname)),
"%s.conf" % appname.replace('-testnet', ''))
data_dir = os.path.dirname(cfg_path)
return (data_dir, cfg_path)
def do_prerun_checks():
#make sure this is running on a supported OS
if os.name not in ("nt", "posix"):
logging.error("Build script only supports Linux or Windows at this time")
sys.exit(1)
if os.name == "posix" and platform.dist()[0] != "Ubuntu":
logging.error("Non-Ubuntu install detected. Only Ubuntu Linux is supported at this time")
sys.exit(1)
#under *nix, script must be run as root
if os.name == "posix" and os.geteuid() != 0:
logging.error("This script must be run as root (use 'sudo' to run)")
sys.exit(1)
if os.name == "posix" and "SUDO_USER" not in os.environ:
logging.error("Please use `sudo` to run this script.")
#establish python version
global PYTHON3_VER
if os.name == "nt":
PYTHON3_VER = "3.3"
elif os.name == "posix" and platform.dist()[0] == "Ubuntu" and platform.linux_distribution()[1] == "12.04":
#ubuntu 12.04 -- python 3.3 will be installed in install_dependencies, as flask requires it
PYTHON3_VER = "3.3"
else:
allowed_vers = ["3.4", "3.3"]
for ver in allowed_vers:
if which("python%s" % ver):
PYTHON3_VER = ver
logging.info("Found Python version %s" % PYTHON3_VER)
break
else:
logging.error("Cannot find your Python version in your path. You need one of the following versions: %s"
% ', '.join(allowed_vers))
sys.exit(1)
def get_paths(with_counterblockd):
paths = {}
paths['sys_python_path'] = os.path.dirname(sys.executable)
paths['base_path'] = os.path.normpath(os.path.dirname(os.path.realpath(sys.argv[0])))
#^ the dir of where counterparty source was downloaded to
logging.debug("base path: '%s'" % paths['base_path'])
#find the location of the virtualenv command and make sure it exists
if os.name == "posix":
paths['virtualenv_path'] = "/usr/bin/virtualenv"
paths['virtualenv_args'] = "--system-site-packages --python=python%s" % PYTHON3_VER
elif os.name == "nt":
paths['virtualenv_path'] = os.path.join(paths['sys_python_path'], "Scripts", "virtualenv.exe")
paths['virtualenv_args'] = "--system-site-packages"
#^ we use system-site-packages here because we need to be able to import cx_freeze from the sys packages
#compose the rest of the paths...
paths['dist_path'] = os.path.join(paths['base_path'], "dist")
logging.debug("dist path: '%s'" % paths['dist_path'])
paths['env_path'] = os.path.join(paths['base_path'], "env") # home for the virtual environment
logging.debug("env path: '%s'" % paths['env_path'])
paths['bin_path'] = os.path.join(paths['base_path'], "bin")
logging.debug("bin path: '%s'" % paths['bin_path'])
#the pip executable that we'll be using does not exist yet, but it will, once we've created the virtualenv
paths['pip_path'] = os.path.join(paths['env_path'], "Scripts" if os.name == "nt" else "bin", "pip.exe" if os.name == "nt" else "pip")
paths['python_path'] = os.path.join(paths['env_path'], "Scripts" if os.name == "nt" else "bin", "python.exe" if os.name == "nt" else "python3")
#for now, counterblockd currently uses Python 2.7 due to gevent-socketio's lack of support for Python 3
#because of this, it needs its own virtual environment
if with_counterblockd:
paths['virtualenv_args.counterblockd'] = "--system-site-packages --python=python2.7"
paths['env_path.counterblockd'] = os.path.join(paths['base_path'], "env.counterblockd") # home for the virtual environment
paths['pip_path.counterblockd'] = os.path.join(paths['env_path.counterblockd'], "Scripts" if os.name == "nt" else "bin", "pip.exe" if os.name == "nt" else "pip")
paths['python_path.counterblockd'] = os.path.join(paths['env_path.counterblockd'], "Scripts" if os.name == "nt" else "bin", "python.exe" if os.name == "nt" else "python")
return paths
def checkout(branch, paths, run_as_user, with_counterblockd, is_update):
git_repo_clone("counterpartyd", "https://github.com/CounterpartyXCP/counterpartyd.git",
os.path.join(paths['dist_path'], "counterpartyd"), branch=branch, for_user=run_as_user)
if with_counterblockd:
git_repo_clone("counterblockd", "https://github.com/CounterpartyXCP/counterblockd.git",
os.path.join(paths['dist_path'], "counterblockd"), branch=branch, for_user=run_as_user)
if is_update: #update mode specified... update ourselves (counterpartyd_build) as well
git_repo_clone("counterpartyd_build", "https://github.com/CounterpartyXCP/counterpartyd_build.git",
paths['base_path'], branch=branch, for_user=run_as_user)
sys.path.insert(0, os.path.join(paths['dist_path'], "counterpartyd")) #can now import counterparty modules
def install_dependencies(paths, with_counterblockd, noninteractive):
if os.name == "posix" and platform.dist()[0] == "Ubuntu":
ubuntu_release = platform.linux_distribution()[1]
logging.info("UBUNTU LINUX %s: Installing Required Packages..." % ubuntu_release)
runcmd("apt-get -y update")
if ubuntu_release in ("14.04", "13.10"):
runcmd("apt-get -y install runit software-properties-common python-software-properties git-core wget cx-freeze \
python3 python3-setuptools python3-dev python3-pip build-essential python3-sphinx python-virtualenv libsqlite3-dev python3-apsw python3-zmq")
if with_counterblockd:
#counterblockd currently uses Python 2.7 due to gevent-socketio's lack of support for Python 3
runcmd("apt-get -y install python python-dev python-setuptools python-pip python-sphinx python-zmq libzmq3 libzmq3-dev libxml2-dev libxslt-dev zlib1g-dev libimage-exiftool-perl libevent-dev cython")
if noninteractive:
db_locally = 'y'
else:
while True:
db_locally = input("counterblockd: Run mongo and redis locally? (y/n): ")
if db_locally.lower() not in ('y', 'n'):
logger.error("Please enter 'y' or 'n'")
else:
break
if db_locally.lower() == 'y':
#install mongo-10gen (newer than what ubuntu has), pegged to a specific version
MONGO_VERSION = "2.6.4"
runcmd("apt-get -y remove mongodb mongodb-server") #remove ubuntu stock packages, if installed
runcmd("apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 7F0CEB10")
runcmd("/bin/bash -c \"echo 'deb http://downloads-distro.mongodb.org/repo/ubuntu-upstart dist 10gen' | sudo tee /etc/apt/sources.list.d/mongodb.list\"")
runcmd("apt-get update")
runcmd("apt-get -y install mongodb-org=%s mongodb-org-server=%s mongodb-org-shell=%s mongodb-org-mongos=%s mongodb-org-tools=%s" % (
MONGO_VERSION, MONGO_VERSION, MONGO_VERSION, MONGO_VERSION, MONGO_VERSION))
for p in ('mongodb-org', 'mongodb-org-server', 'mongodb-org-shell', 'mongodb-org-mongos', 'mongodb-org-tools'):
runcmd("echo \"%s hold\" | sudo dpkg --set-selections" % p)
#replace use of mongo init script with our runit version
runcmd("""bash -c "echo 'manual' > /etc/init/mongod.override" """)
runcmd("service mongod stop", abort_on_failure=False)
config_runit_for_service(paths['dist_path'], "mongod", manual_control=False)
#also install redis
runcmd("apt-get -y install redis-server")
elif ubuntu_release == "12.04":
#12.04 deps. 12.04 doesn't include python3-pip, so we need to use the workaround at http://stackoverflow.com/a/12262143
runcmd("apt-get -y install runit software-properties-common python-software-properties git-core wget cx-freeze \
python3 python3-setuptools python3-dev build-essential python3-sphinx python-virtualenv libsqlite3-dev")
#install python 3.3 (required for flask)
runcmd("add-apt-repository -y ppa:fkrull/deadsnakes")
runcmd("apt-get update; apt-get -y install python3.3 python3.3-dev")
runcmd("ln -sf /usr/bin/python3.3 /usr/bin/python3")
#now actually run the ez_setup.py script as pip3 is broken
runcmd("rm -f /tmp/ez_setup.py; curl -o /tmp/ez_setup.py https://bootstrap.pypa.io/ez_setup.py")
runcmd("python3 /tmp/ez_setup.py")
runcmd("easy_install-3.3 pip==1.4.1") #pip1.5 breaks things due to its use of wheel by default
#for some reason, it installs "pip" to /usr/local/bin, instead of "pip3"
runcmd("cp -a /usr/local/bin/pip /usr/local/bin/pip3")
##ASPW
#12.04 also has no python3-apsw module as well (unlike 13.10), so we need to do this one manually
# Ubuntu 12.04 (Precise) ships with sqlite3 version 3.7.9 - the apsw version needs to match that exactly
runcmd("pip3 install https://github.com/rogerbinns/apsw/zipball/f5bf9e5e7617bc7ff2a5b4e1ea7a978257e08c95#egg=apsw")
##LIBZMQ
#12.04 has no python3-zmq module
runcmd("apt-get -y install libzmq-dev")
runcmd("pip3 install pyzmq")
#also update virtualenv
runcmd("pip3 install virtualenv")
paths['virtualenv_path'] = "/usr/local/bin/virtualenv-3.3" #HACK
else:
logging.error("Unsupported Ubuntu version, please use 14.04 LTS, 13.10 or 12.04 LTS")
sys.exit(1)
#install sqlite utilities (not technically required, but nice to have)
runcmd("apt-get -y install sqlite sqlite3 libleveldb-dev")
#now that pip is installed, install necessary deps outside of the virtualenv (e.g. for this script)
runcmd("pip3 install appdirs==1.2.0")
elif os.name == 'nt':
logging.info("WINDOWS: Installing Required Packages...")
if not os.path.exists(os.path.join(paths['sys_python_path'], "Scripts", "easy_install.exe")):
#^ ez_setup.py doesn't seem to tolerate being run after it's already been installed... errors out
#run the ez_setup.py script to download and install easy_install.exe so we can grab virtualenv and more...
runcmd("pushd %%TEMP%% && %s %s && popd" % (sys.executable,
os.path.join(paths['dist_path'], "windows", "ez_setup.py")))
#now easy_install is installed, install virtualenv, and pip
runcmd("%s virtualenv==1.11.6 pip==1.4.1" % (os.path.join(paths['sys_python_path'], "Scripts", "easy_install.exe")))
#now that pip is installed, install necessary deps outside of the virtualenv (e.g. for this script)
runcmd("%s install appdirs==1.2.0" % (os.path.join(paths['sys_python_path'], "Scripts", "pip.exe")))
def create_virtualenv(paths, with_counterblockd):
def create_venv(env_path, pip_path, python_path, virtualenv_args, reqs_filename, delete_if_exists=True):
if paths['virtualenv_path'] is None or not os.path.exists(paths['virtualenv_path']):
logging.debug("ERROR: virtualenv missing (%s)" % (paths['virtualenv_path'],))
sys.exit(1)
if delete_if_exists and os.path.exists(env_path):
logging.warning("Deleting existing virtualenv...")
rmtree(env_path)
assert not os.path.exists(os.path.join(env_path, 'bin'))
logging.info("Creating virtualenv at '%s' ..." % env_path)
runcmd("%s %s %s" % (paths['virtualenv_path'], virtualenv_args, env_path))
#pip should now exist
if not os.path.exists(pip_path):
logging.error("pip does not exist at path '%s'" % pip_path)
sys.exit(1)
#install packages from manifest via pip
runcmd("%s install -r %s" % (pip_path, os.path.join(paths['dist_path'], reqs_filename)))
create_venv(paths['env_path'], paths['pip_path'], paths['python_path'], paths['virtualenv_args'],
os.path.join(paths['dist_path'], "counterpartyd", "pip-requirements.txt"))
if with_counterblockd: #as counterblockd uses python 2.x, it needs its own virtualenv
runcmd("rm -rf %s && mkdir -p %s" % (paths['env_path.counterblockd'], paths['env_path.counterblockd']))
create_venv(paths['env_path.counterblockd'], paths['pip_path.counterblockd'], paths['python_path.counterblockd'],
paths['virtualenv_args.counterblockd'],
os.path.join(paths['dist_path'], "counterblockd", "pip-requirements.txt"), delete_if_exists=False)
def setup_startup(paths, run_as_user, with_counterblockd, with_testnet, noninteractive):
if os.name == "posix":
runcmd("ln -sf %s/run.py /usr/local/bin/counterpartyd" % paths['base_path'])
if with_counterblockd:
#make a short script to launch counterblockd
f = open("/usr/local/bin/counterblockd", 'w')
f.write("#!/bin/sh\n%s/run.py counterblockd \"$@\"" % paths['base_path'])
f.close()
runcmd("chmod +x /usr/local/bin/counterblockd")
elif os.name == "nt":
#create a batch script
batch_contents = "echo off%sREM Launch counterpartyd (source build) under windows%s%s %s %%*" % (
os.linesep, os.linesep, os.path.join(paths['sys_python_path'], "python.exe"), os.path.join(paths['base_path'], "run.py"))
f = open("%s" % os.path.join(os.environ['WINDIR'], "counterpartyd.bat"), "w")
f.write(batch_contents)
f.close()
if noninteractive:
start_choice = 'y'
else:
while True:
start_choice = input("Start counterpartyd automatically on system startup? (y/n): ")
if start_choice.lower() not in ('y', 'n'):
logger.error("Please enter 'y' or 'n'")
else:
break
if start_choice.lower() == 'n':
return
#windows - no automatic startup from the script itself (auto startup is used when installing from installer)
if os.name == "nt":
#add a shortcut to run.py to the startup group
import win32com.client
import ctypes.wintypes
CSIDL_PERSONAL=7 # Startup folder
SHGFP_TYPE_CURRENT= 0 # Want current, not default value
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(0, CSIDL_PERSONAL, 0, SHGFP_TYPE_CURRENT, buf)
logging.debug("Installing startup shortcut(s) to '%s'" % buf.value)
ws = win32com.client.Dispatch("wscript.shell")
scut = ws.CreateShortcut(os.path.join(buf.value, 'run_counterpartyd.lnk'))
scut.TargetPath = '"c:/Python32/python.exe"'
scut.Arguments = os.path.join(paths['base_path'], 'run.py') + ' server'
scut.Save()
if with_testnet:
ws = win32com.client.Dispatch("wscript.shell")
scut = ws.CreateShortcut(os.path.join(buf.value, 'run_counterpartyd_testnet.lnk' % s))
scut.TargetPath = '"c:/Python32/python.exe"'
scut.Arguments = os.path.join(paths['base_path'], 'run.py') \
+ (' --testnet --data-dir="%s" server' % data_dir, _get_app_cfg_paths('counterpartyd-testnet', run_as_user))
scut.Save()
else:
logging.info("Setting up init scripts...")
assert run_as_user
user_homedir = os.path.expanduser("~" + run_as_user)
config_runit_for_service(paths['dist_path'], "counterpartyd", manual_control=True)
config_runit_for_service(paths['dist_path'], "counterpartyd-testnet", enabled=with_testnet, manual_control=True)
config_runit_for_service(paths['dist_path'], "counterblockd", enabled=with_counterblockd, manual_control=True)
config_runit_for_service(paths['dist_path'], "counterblockd-testnet", enabled=with_counterblockd and with_testnet, manual_control=True)
runcmd("sed -ri \"s/USER=xcpd/USER=%s/g\" /etc/service/counterpartyd/run" % run_as_user)
runcmd("sed -ri \"s/USER_HOME=\/home\/xcp/USER_HOME=%s/g\" /etc/service/counterpartyd/run" % user_homedir.replace('/', '\/'))
if with_testnet:
runcmd("sed -ri \"s/USER=xcpd/USER=%s/g\" /etc/service/counterpartyd-testnet/run" % run_as_user)
runcmd("sed -ri \"s/USER_HOME=\/home\/xcp/USER_HOME=%s/g\" /etc/service/counterpartyd-testnet/run" % user_homedir.replace('/', '\/'))
if with_counterblockd:
runcmd("sed -ri \"s/USER=xcpd/USER=%s/g\" /etc/service/counterblockd/run" % run_as_user)
runcmd("sed -ri \"s/USER_HOME=\/home\/xcp/USER_HOME=%s/g\" /etc/service/counterblockd/run" % user_homedir.replace('/', '\/'))
if with_counterblockd and with_testnet:
runcmd("sed -ri \"s/USER=xcpd/USER=%s/g\" /etc/service/counterblockd-testnet/run" % run_as_user)
runcmd("sed -ri \"s/USER_HOME=\/home\/xcp/USER_HOME=%s/g\" /etc/service/counterblockd-testnet/run" % user_homedir.replace('/', '\/'))
def create_default_datadir_and_config(paths, run_as_user, with_bootstrap_db, with_counterblockd, with_testnet):
def create_config(appname, default_config):
data_dir, cfg_path = _get_app_cfg_paths(appname, run_as_user)
if not os.path.exists(data_dir):
os.makedirs(data_dir)
config_missing = not os.path.exists(cfg_path)
if not os.path.exists(cfg_path):
logging.info("Creating new configuration file at: %s" % cfg_path)
#create a default config file
cfg = open(cfg_path, 'w')
cfg.write(default_config)
cfg.close()
if os.name != "nt": #set permissions on non-windows
uid = pwd.getpwnam(run_as_user).pw_uid
gid = grp.getgrnam(run_as_user).gr_gid
#set directory ownership
os.chown(data_dir, uid, gid)
#set proper file ownership and mode
os.chown(cfg_path, uid, gid)
os.chmod(cfg_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP) #660
else:
#on windows, open notepad to the config file to help out
import win32api
try:
win32api.WinExec('NOTEPAD.exe "%s"' % cfg_path)
except:
pass
logging.info("NOTE: %s config file has been created at '%s'" % (appname, cfg_path))
else:
logging.info("%s config file already exists at: '%s'" % (appname, cfg_path))
if appname in ("counterpartyd", "counterpartyd-testnet") and config_missing and with_bootstrap_db:
fetch_counterpartyd_bootstrap_db(data_dir, testnet=appname=="counterpartyd-testnet")
create_config('counterpartyd', DEFAULT_CONFIG)
if with_testnet:
create_config('counterpartyd-testnet', DEFAULT_CONFIG_TESTNET)
if with_counterblockd:
create_config('counterblockd', DEFAULT_CONFIG_COUNTERBLOCKD)
if with_testnet:
create_config('counterblockd-testnet', DEFAULT_CONFIG_COUNTERBLOCKD_TESTNET)
def usage():
print("SYNTAX: %s [-h] [--noninteractive] [--branch=AUTO|master|develop|etc] [--with-bootstrap-db] [--with-counterblockd] [--with-testnet] [--for-user=] [setup|update]" % sys.argv[0])
print("* The 'setup' command will setup and install counterpartyd as a source installation (including automated setup of its dependencies)")
print("* The 'update' command updates the git repo for both counterpartyd, counterpartyd_build, and counterblockd (if --with-counterblockd is specified)")
print("* 'setup' is chosen by default if neither the 'update' or 'setup' arguments are specified.")
print("* If you want to install counterblockd along with counterpartyd, specify the --with-counterblockd option")
def main():
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s|%(levelname)s: %(message)s')
do_prerun_checks()
run_as_user = None
if os.name != "nt":
run_as_user = os.environ["SUDO_USER"] #default value, can be overridden via the --for-user= switch
assert run_as_user
#parse any command line objects
command = None
with_bootstrap_db = False #bootstrap DB by default
with_counterblockd = False
with_testnet = False
noninteractive = False #headless operation
branch = "AUTO" #default
try:
opts, args = getopt.getopt(sys.argv[1:], "h",
["help", "with-bootstrap-db", "with-counterblockd", "with-testnet", "noninteractive", "for-user=", "branch="])
except getopt.GetoptError as err:
usage()
sys.exit(2)
mode = None
for o, a in opts:
if o in ("--with-bootstrap-db",):
with_bootstrap_db = True
elif o in ("--with-counterblockd",):
with_counterblockd = True
elif o in ("--with-testnet",):
with_testnet = True
elif o in ("--branch",):
branch = a
elif o in ("--for-user",):
assert os.name != "nt" #not supported
#allow overriding run_as_user
try: #make sure user exists
assert(a)
pwd.getpwnam(a)
except:
logging.error(("User '%s' does not appear to exist" % a) if a else 'You must specify a username')
sys.exit(2)
else:
run_as_user = a
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("--noninteractive"):
noninteractive = True
else:
assert False, "Unhandled or unimplemented switch or option"
if len(args) not in [0, 1]:
usage()
sys.exit(2)
if args and args[0] == "update":
command = "update"
else: #either setup specified explicitly, or no command specified
command = "setup"
assert command in ["update", "setup"]
paths = get_paths(with_counterblockd)
if command == "update": #auto update from git
logging.info("Updating relevant Counterparty repos")
checkout(branch, paths, run_as_user, with_counterblockd, command == "update")
else: #setup mode
assert command == "setup"
logging.info("Installing Counterparty from source%s..." % (
(" for user '%s'" % run_as_user) if os.name != "nt" else '',))
checkout(branch, paths, run_as_user, with_counterblockd, command == "update")
install_dependencies(paths, with_counterblockd, noninteractive)
create_virtualenv(paths, with_counterblockd)
setup_startup(paths, run_as_user, with_counterblockd, with_testnet, noninteractive)
create_default_datadir_and_config(paths, run_as_user, with_bootstrap_db, with_counterblockd, with_testnet)
logging.info("SETUP DONE. (It's time to kick ass, and chew bubblegum... and I'm all outta gum.)")
if __name__ == "__main__":
main()
|
|
import os
from datetime import datetime
import pytz
from tempfile import NamedTemporaryFile
from uuid import uuid4
from urllib.parse import urlencode
from django.utils.deconstruct import deconstructible
from django.conf import settings
from django.core.files.storage import Storage
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from django_irods import icommands
from .icommands import Session, GLOBAL_SESSION, GLOBAL_ENVIRONMENT, SessionException, IRodsEnv
@deconstructible
class IrodsStorage(Storage):
def __init__(self, option=None):
if option == 'federated':
# resource should be saved in federated zone
self.set_fed_zone_session()
else:
self.session = GLOBAL_SESSION
self.environment = GLOBAL_ENVIRONMENT
icommands.ACTIVE_SESSION = self.session
@property
def getUniqueTmpPath(self):
# return a unique temporary path under IRODS_ROOT directory
return os.path.join(getattr(settings, 'IRODS_ROOT', '/tmp'), uuid4().hex)
@staticmethod
def get_absolute_path_query(path, parent=False):
"""
Get iquest query string that needs absolute path of the input path for the HydroShare iRODS data zone
:param path: input path to be converted to absolute path if needed
:param parent: indicating whether query string should be checking COLL_PARENT_NAME rather than COLL_NAME
:return: iquest query string that has the logical path of the input path as input
"""
# iquest has a bug that cannot handle collection name containing single quote as reported here
# https://github.com/irods/irods/issues/4887. This is a work around and can be removed after the bug is fixed
if "'" in path:
path = path.replace("'", "%")
qry_str = "COLL_PARENT_NAME like '{}'" if parent else "COLL_NAME like '{}'"
else:
qry_str = "COLL_PARENT_NAME = '{}'" if parent else "COLL_NAME = '{}'"
if os.path.isabs(path):
# iRODS federated logical path which is already absolute path
return qry_str.format(path)
return qry_str.format(os.path.join(settings.IRODS_HOME_COLLECTION, path))
def set_user_session(self, username=None, password=None, host=settings.IRODS_HOST,
port=settings.IRODS_PORT, def_res=None, zone=settings.IRODS_ZONE,
userid=0, sess_id=None):
homedir = "/" + zone + "/home/" + username
userEnv = IRodsEnv(
pk=userid,
host=host,
port=port,
def_res=def_res,
home_coll=homedir,
cwd=homedir,
username=username,
zone=zone,
auth=password,
irods_default_hash_scheme='MD5'
)
if sess_id is None:
self.session = Session(session_id=uuid4())
self.environment = self.session.create_environment(myEnv=userEnv)
else:
self.session = Session(session_id=sess_id)
if self.session.session_file_exists():
self.environment = userEnv
else:
self.environment = self.session.create_environment(myEnv=userEnv)
self.session.run('iinit', None, self.environment.auth)
icommands.ACTIVE_SESSION = self.session
# Set iRODS session to wwwHydroProxy for irods_storage input object for iRODS federated
# zone direct file operations
def set_fed_zone_session(self):
if settings.REMOTE_USE_IRODS:
self.set_user_session(username=settings.IRODS_USERNAME,
password=settings.IRODS_AUTH,
host=settings.IRODS_HOST,
port=settings.IRODS_PORT,
def_res=settings.HS_IRODS_USER_ZONE_DEF_RES,
zone=settings.IRODS_ZONE,
sess_id='federated_session')
def delete_user_session(self):
if self.session != GLOBAL_SESSION and self.session.session_file_exists():
self.session.delete_environment()
def download(self, name):
return self._open(name, mode='rb')
def getFile(self, src_name, dest_name):
self.session.run("iget", None, '-f', src_name, dest_name)
def runBagitRule(self, rule_name, input_path, input_resource):
"""
run iRODS bagit rule which generated bag-releated files without bundling
:param rule_name: the iRODS rule name to run
:param input_path: input parameter to the rule that indicates the collection path to
create bag for
:param input_resource: input parameter to the rule that indicates the default resource
to store generated bag files
:return: None
"""
# SessionException will be raised from run() in icommands.py
self.session.run("irule", None, '-F', rule_name, input_path, input_resource)
def zipup(self, in_name, out_name):
"""
run iRODS ibun command to generate zip file for the bag
:param in_name: input parameter to indicate the collection path to generate zip
:param out_name: the output zipped file name
:return: None
"""
self.session.run("imkdir", None, '-p', out_name.rsplit('/', 1)[0])
# SessionException will be raised from run() in icommands.py
self.session.run("ibun", None, '-cDzip', '-f', out_name, in_name)
def unzip(self, zip_file_path, unzipped_folder=''):
"""
run iRODS ibun command to unzip files into a new folder
:param zip_file_path: path of the zipped file to be unzipped
:param unzipped_folder: Optional defaults to the basename of zip_file_path when not
provided. The folder to unzip to.
:return: the folder files were unzipped to
"""
abs_path = os.path.dirname(zip_file_path)
if not unzipped_folder:
unzipped_folder = os.path.splitext(os.path.basename(zip_file_path))[0].strip()
unzipped_folder = self._get_nonexistant_path(os.path.join(abs_path, unzipped_folder))
# SessionException will be raised from run() in icommands.py
self.session.run("ibun", None, '-xDzip', zip_file_path, unzipped_folder)
return unzipped_folder
def _get_nonexistant_path(self, path):
if not self.exists(path):
return path
i = 1
new_path = "{}-{}".format(path, i)
while self.exists(new_path):
i += 1
new_path = "{}-{}".format(path, i)
return new_path
def setAVU(self, name, attName, attVal, attUnit=None):
"""
set AVU on resource collection - this is used for on-demand bagging by indicating
whether the resource has been modified via AVU pairs
Parameters:
:param
name: the resource collection name to set AVU.
attName: the attribute name to set
attVal: the attribute value to set
attUnit: the attribute Unit to set, default is None, but can be set to
indicate additional info
"""
# SessionException will be raised from run() in icommands.py
if attUnit:
self.session.run("imeta", None, 'set', '-C', name, attName, attVal, attUnit)
else:
self.session.run("imeta", None, 'set', '-C', name, attName, attVal)
def getAVU(self, name, attName):
"""
set AVU on resource collection - this is used for on-demand bagging by indicating
whether the resource has been modified via AVU pairs
Parameters:
:param
name: the resource collection name to set AVU.
attName: the attribute name to set
attVal: the attribute value to set
attUnit: the attribute Unit to set, default is None, but can be set to
indicate additional info
"""
# SessionException will be raised from run() in icommands.py
stdout = self.session.run("imeta", None, 'ls', '-C', name, attName)[0].split("\n")
ret_att = stdout[1].strip()
if ret_att == 'None': # queried attribute does not exist
return None
else:
vals = stdout[2].split(":")
return vals[1].strip()
def copyFiles(self, src_name, dest_name, ires=None):
"""
Parameters:
:param
src_name: the iRODS data-object or collection name to be copied from.
dest_name: the iRODS data-object or collection name to be copied to
copyFiles() copied an irods data-object (file) or collection (directory)
to another data-object or collection
"""
if src_name and dest_name:
if '/' in dest_name:
splitstrs = dest_name.rsplit('/', 1)
if not self.exists(splitstrs[0]):
self.session.run("imkdir", None, '-p', splitstrs[0])
if ires:
self.session.run("icp", None, '-rf', '-R', ires, src_name, dest_name)
else:
self.session.run("icp", None, '-rf', src_name, dest_name)
return
def moveFile(self, src_name, dest_name):
"""
Parameters:
:param
src_name: the iRODS data-object or collection name to be moved from.
dest_name: the iRODS data-object or collection name to be moved to
moveFile() moves/renames an irods data-object (file) or collection
(directory) to another data-object or collection
"""
if src_name and dest_name:
if '/' in dest_name:
splitstrs = dest_name.rsplit('/', 1)
if not self.exists(splitstrs[0]):
self.session.run("imkdir", None, '-p', splitstrs[0])
self.session.run("imv", None, src_name, dest_name)
return
def saveFile(self, from_name, to_name, create_directory=False, data_type_str=''):
"""
Parameters:
:param
from_name: the temporary file name in local disk to be uploaded from.
to_name: the data object path in iRODS to be uploaded to
create_directory: create directory as needed when set to True. Default is False
Note if only directory needs to be created without saving a file, from_name should be empty
and to_name should have "/" as the last character
"""
if create_directory:
splitstrs = to_name.rsplit('/', 1)
self.session.run("imkdir", None, '-p', splitstrs[0])
if len(splitstrs) <= 1:
return
if from_name:
try:
if data_type_str:
self.session.run("iput", None, '-D', data_type_str, '-f', from_name, to_name)
else:
self.session.run("iput", None, '-f', from_name, to_name)
except:
if data_type_str:
self.session.run("iput", None, '-D', data_type_str, '-f', from_name, to_name)
else:
# IRODS 4.0.2, sometimes iput fails on the first try.
# A second try seems to fix it.
self.session.run("iput", None, '-f', from_name, to_name)
return
def _open(self, name, mode='rb'):
tmp = NamedTemporaryFile()
self.session.run("iget", None, '-f', name, tmp.name)
return tmp
def _save(self, name, content):
self.session.run("imkdir", None, '-p', name.rsplit('/', 1)[0])
with NamedTemporaryFile(delete=False) as f:
for chunk in content.chunks():
f.write(chunk)
f.flush()
f.close()
try:
self.session.run("iput", None, '-f', f.name, name)
except:
# IRODS 4.0.2, sometimes iput fails on the first try. A second try seems to fix it.
self.session.run("iput", None, '-f', f.name, name)
os.unlink(f.name)
return name
def delete(self, name):
self.session.run("irm", None, "-rf", name)
def exists(self, name):
try:
stdout = self.session.run("ils", None, name)[0]
return stdout != ""
except SessionException:
return False
def _list_files(self, path):
"""
internal method to only list data objects/files under path
:param path: iRODS collection/directory path
:return: ordered filename_list and filesize_list
"""
fname_list = []
fsize_list = []
# the query below returns name and size (separated in comma) of all data
# objects/files under the path collection/directory
qrystr = "select DATA_NAME, DATA_SIZE where DATA_REPL_STATUS != '0' " \
"AND {}".format(IrodsStorage.get_absolute_path_query(path))
stdout = self.session.run("iquest", None, "--no-page", "%s,%s",
qrystr)[0].split("\n")
for i in range(len(stdout)):
if not stdout[i] or "CAT_NO_ROWS_FOUND" in stdout[i]:
break
file_info = stdout[i].rsplit(',', 1)
fname_list.append(file_info[0])
fsize_list.append(file_info[1])
return fname_list, fsize_list
def _list_subdirs(self, path):
"""
internal method to only list sub-collections/sub-directories under path
:param path: iRODS collection/directory path
:return: sub-collection/directory name list
"""
subdir_list = []
# the query below returns name of all sub-collections/sub-directories
# under the path collection/directory
qrystr = "select COLL_NAME where {}".format(IrodsStorage.get_absolute_path_query(path, parent=True))
stdout = self.session.run("iquest", None, "--no-page", "%s",
qrystr)[0].split("\n")
for i in range(len(stdout)):
if not stdout[i] or "CAT_NO_ROWS_FOUND" in stdout[i]:
break
dirname = stdout[i]
# remove absolute path prefix to only show relative sub-dir name
idx = dirname.find(path)
if idx > 0:
dirname = dirname[idx + len(path) + 1:]
subdir_list.append(dirname)
return subdir_list
def listdir(self, path):
"""
return list of sub-collections/sub-directories, data objects/files and their sizes
:param path: iRODS collection/directory path
:return: (sub_directory_list, file_name_list, file_size_list)
"""
# remove any trailing slashes if any; otherwise, iquest would fail
path = path.strip()
while path.endswith('/'):
path = path[:-1]
# check first whether the path is an iRODS collection/directory or not, and if not, need
# to raise SessionException, and if yes, can proceed to get files and sub-dirs under it
qrystr = "select COLL_NAME where {}".format(IrodsStorage.get_absolute_path_query(path))
stdout = self.session.run("iquest", None, "%s", qrystr)[0]
if "CAT_NO_ROWS_FOUND" in stdout:
raise SessionException(-1, '', 'folder {} does not exist'.format(path))
fname_list, fsize_list = self._list_files(path)
subdir_list = self._list_subdirs(path)
listing = (subdir_list, fname_list, fsize_list)
return listing
def size(self, name):
"""
return the size of the data object/file with file name being passed in
:param name: file name
:return: the size of the file
"""
file_info = name.rsplit('/', 1)
if len(file_info) < 2:
raise ValidationError('{} is not a valid file path to retrieve file size '
'from iRODS'.format(name))
coll_name = file_info[0]
file_name = file_info[1]
qrystr = "select DATA_SIZE where DATA_REPL_STATUS != '0' AND " \
"{} AND DATA_NAME = '{}'".format(IrodsStorage.get_absolute_path_query(coll_name), file_name)
stdout = self.session.run("iquest", None, "%s", qrystr)[0]
if "CAT_NO_ROWS_FOUND" in stdout:
raise ValidationError("{} cannot be found in iRODS to retrieve "
"file size".format(name))
return int(float(stdout))
def checksum(self, full_name, force_compute=True):
"""
Compute/Update checksum of file object and return the checksum
:param full_name: the data object name with full collection path in order to locate data object from current
working directory
:return: checksum of the file object
"""
# first force checksum (re)computation
if force_compute:
self.session.run("ichksum", None, "-f", full_name)
# retrieve checksum using iquest
# get data object name only from the full_name input parameter to be used by iquest
if '/' in full_name:
file_info = full_name.rsplit('/', 1)
coll_name_query = IrodsStorage.get_absolute_path_query(file_info[0])
obj_name = file_info[1]
else:
coll_name_query = "COLL_NAME = '{}'".format(settings.IRODS_HOME_COLLECTION)
obj_name = full_name
qrystr = "SELECT DATA_CHECKSUM WHERE {} AND DATA_NAME = '{}'".format(coll_name_query, obj_name)
stdout = self.session.run("iquest", None, "%s", qrystr)[0]
if "CAT_NO_ROWS_FOUND" in stdout:
raise ValidationError("{} cannot be found in iRODS to retrieve "
"checksum".format(obj_name))
# remove potential '\n' from stdout
checksum = stdout.strip('\n')
if not checksum:
if force_compute:
raise ValidationError("checksum for {} cannot be found in iRODS".format(obj_name))
# checksum hasn't been computed, so force the checksum computation
return self.checksum(full_name, force_compute=True)
return checksum
def url(self, name, url_download=False, zipped=False, aggregation=False):
reverse_url = reverse('rest_download', kwargs={'path': name})
query_params = {'url_download': url_download, "zipped": zipped, 'aggregation': aggregation}
return reverse_url + '?' + urlencode(query_params)
def get_available_name(self, name, max_length=None):
"""
Reject duplicate file names rather than renaming them.
"""
if self.exists(name):
raise ValidationError(str.format("File {} already exists.", name))
return name
def get_modified_time(self, name):
"""
Return the last modified time (as a datetime in UTC timezone) of the file specified by full_name.
:param name: data object (file) name with full collection path in order to locate file from current
working directory
:return: last modified time of the file in UTC timezone
"""
if '/' in name:
file_info = name.rsplit('/', 1)
coll_name_query = IrodsStorage.get_absolute_path_query(file_info[0])
obj_name = file_info[1]
else:
coll_name_query = "COLL_NAME = '{}'".format(settings.IRODS_HOME_COLLECTION)
obj_name = name
qrystr = "SELECT DATA_MODIFY_TIME WHERE {} AND DATA_NAME = '{}'".format(coll_name_query, obj_name)
stdout = self.session.run("iquest", None, "%s", qrystr)[0]
if "CAT_NO_ROWS_FOUND" in stdout:
raise ValidationError("{} cannot be found in iRODS".format(name))
# remove potential '\n' from stdout
timestamp = float(stdout.split("\n", 1)[0])
utc_dt = datetime.fromtimestamp(timestamp, pytz.utc)
return utc_dt
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import uuid
import mock
from oslo_config import fixture
from oslotest import base
from oslotest import mockpatch
from rally.common import db
from rally import plugins
from tests.unit import fakes
class DatabaseFixture(fixture.Config):
"""Create clean DB before starting test."""
def setUp(self):
super(DatabaseFixture, self).setUp()
db_url = os.environ.get("RALLY_UNITTEST_DB_URL", "sqlite://")
db.engine_reset()
self.conf.set_default("connection", db_url, group="database")
db.schema_cleanup()
db.schema_create()
class TestCase(base.BaseTestCase):
"""Test case base class for all unit tests."""
def setUp(self):
super(TestCase, self).setUp()
self.addCleanup(mock.patch.stopall)
plugins.load()
def _test_atomic_action_timer(self, atomic_actions, name):
action_duration = atomic_actions.get(name)
self.assertIsNotNone(action_duration)
self.assertIsInstance(action_duration, float)
def assertSequenceEqual(self, iterable_1, iterable_2, msg=None):
self.assertEqual(tuple(iterable_1), tuple(iterable_2), msg)
class DBTestCase(TestCase):
"""Base class for tests which use DB."""
def setUp(self):
super(DBTestCase, self).setUp()
self.useFixture(DatabaseFixture())
# TODO(boris-42): This should be moved to test.plugins.test module
# or similar
class ScenarioTestCase(TestCase):
"""Base class for Scenario tests using mocked self.clients."""
benchmark_utils = "rally.task.utils"
patch_benchmark_utils = True
def client_factory(self, client_type, version=None, admin=False):
"""Create a new client object."""
return mock.MagicMock(client_type=client_type,
version=version,
admin=admin)
def clients(self, client_type, version=None, admin=False):
"""Get a mocked client."""
key = (client_type, version, admin)
if key not in self._clients:
self._clients[key] = self.client_factory(client_type,
version=version,
admin=admin)
return self._clients[key]
def admin_clients(self, client_type, version=None):
"""Get a mocked admin client."""
return self.clients(client_type, version=version, admin=True)
def client_created(self, client_type, version=None, admin=False):
"""Determine if a client has been created.
This can be used to see if a scenario calls
'self.clients("foo")', without checking to see what was done
with the client object returned by that call.
"""
key = (client_type, version, admin)
return key in self._clients
def get_client_mocks(self):
base_path = "rally.plugins.openstack"
return [
mock.patch(
"%s.scenario.OpenStackScenario.clients" % base_path,
mock.Mock(side_effect=self.clients)),
mock.patch(
"%s.scenario.OpenStackScenario.admin_clients" % base_path,
mock.Mock(side_effect=self.admin_clients))
]
def setUp(self):
super(ScenarioTestCase, self).setUp()
if self.patch_benchmark_utils:
self.mock_resource_is = mockpatch.Patch(
self.benchmark_utils + ".resource_is")
self.mock_get_from_manager = mockpatch.Patch(
self.benchmark_utils + ".get_from_manager")
self.mock_wait_for = mockpatch.Patch(
self.benchmark_utils + ".wait_for")
self.mock_wait_for_delete = mockpatch.Patch(
self.benchmark_utils + ".wait_for_delete")
self.mock_wait_for_status = mockpatch.Patch(
self.benchmark_utils + ".wait_for_status")
self.useFixture(self.mock_resource_is)
self.useFixture(self.mock_get_from_manager)
self.useFixture(self.mock_wait_for)
self.useFixture(self.mock_wait_for_delete)
self.useFixture(self.mock_wait_for_status)
self.mock_sleep = mockpatch.Patch("time.sleep")
self.useFixture(self.mock_sleep)
self._clients = {}
self._client_mocks = self.get_client_mocks()
for patcher in self._client_mocks:
patcher.start()
self.context = get_test_context()
def tearDown(self):
for patcher in self._client_mocks:
patcher.stop()
super(ScenarioTestCase, self).tearDown()
class ContextClientAdapter(object):
def __init__(self, endpoint, test_case):
self.endpoint = endpoint
self.test_case = test_case
def mock_client(self, name, version=None):
admin = self.endpoint.startswith("admin")
client = self.test_case.clients(name, version=version, admin=admin)
if not isinstance(client.return_value, mock.Mock):
return client.return_value
if client.side_effect is not None:
# NOTE(pboldin): if a client has side_effects that means the
# user wants some of the returned values overrided (look at
# the test_existing_users for instance)
return client()
return client
def __getattr__(self, name):
# NOTE(pboldin): __getattr__ magic is called last, after the value
# were looked up for in __dict__
return lambda version=None: self.mock_client(name, version)
class ContextTestCase(ScenarioTestCase):
def setUp(self):
super(ContextTestCase, self).setUp()
self._adapters = {}
def context_client(self, endpoint, api_info=None):
if endpoint not in self._adapters:
self._adapters[endpoint] = ContextClientAdapter(endpoint, self)
return self._adapters[endpoint]
def get_client_mocks(self):
return [
mock.patch(
"rally.osclients.Clients",
mock.Mock(side_effect=self.context_client))
]
class FakeClientsScenarioTestCase(ScenarioTestCase):
"""Base class for Scenario tests using fake (not mocked) self.clients."""
def client_factory(self, client_type, version=None, admin=False):
return getattr(self._fake_clients, client_type)()
def setUp(self):
super(FakeClientsScenarioTestCase, self).setUp()
self._fake_clients = fakes.FakeClients()
def get_test_context(**kwargs):
kwargs["task"] = {"uuid": str(uuid.uuid4())}
return kwargs
|
|
# -*- coding: utf-8 -*-
"""
Django settings for on_admin project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('on_admin')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
# Apps specific for this project go here.
LOCAL_APPS = (
'on_admin.users', # custom users app
'on_admin.home',
'on_admin.dashboard',
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'on_admin.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
("""Andus""", '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///on_admin"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Asia/Singapore'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'allauth.account.context_processors.account',
'allauth.socialaccount.context_processors.socialaccount',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
LOGIN_REDIRECT_URL = 'dashboard:redirect'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Your common stuff: Below this line define 3rd party library settings
EMAIL_USE_TLS = False
EMAIL_HOST = 'smtp.mailgun.org'
EMAIL_HOST_USER = '[email protected]'
EMAIL_HOST_PASSWORD = 'wolfen83'
EMAIL_PORT = 587
|
|
import os
import sys
import copy
import Common
import KernelParameters
import AutoGemmParameters
import argparse
##############################################################################
# Make OpenCL Kernel String
##############################################################################
def makeOpenCLKernelString(kernel):
endLine = "\\n\"\n\""
####################################
# parameters valid?
if kernel.isValid() == False:
return kernel.getName() + " invalid"
####################################
# initializations
kStr = ""
kStr += endLine
kStr += "/* %s */" % kernel.getName()
kStr += endLine
####################################
# Double precision pragma
prec = kernel.getName()[0].lower()
if prec == "d" or prec == "z":
kStr += endLine
kStr += "#pragma OPENCL EXTENSION cl_khr_fp64 : enable" + endLine
####################################
# kernel parameters
kStr += endLine
kStr += "/* kernel parameters */" + endLine
#if kernel.order == "clblasColumnMajor":
# kStr += "#define COLUMN_MAJOR 1" + endLine
#else:
# kStr += "#define COLUMN_MAJOR 0" + endLine
#if kernel.transA == "T":
# kStr += "#define TRANSPOSE_A 1" + endLine
#else:
# kStr += "#define TRANSPOSE_A 0" + endLine
#if kernel.transB == "T":
# kStr += "#define TRANSPOSE_B 1" + endLine
#else:
# kStr += "#define TRANSPOSE_B 0" + endLine
#kStr += "" + endLine
kStr += "#define WG_NUM_ROWS %d%s" % (kernel.workGroupNumRows, endLine )
kStr += "#define WG_NUM_COLS %d%s" % (kernel.workGroupNumCols, endLine )
kStr += "#define MICRO_TILE_NUM_ROWS %d%s" % (kernel.microTileNumRows, endLine )
kStr += "#define MICRO_TILE_NUM_COLS %d%s" % (kernel.microTileNumCols, endLine )
kStr += "#define MACRO_TILE_NUM_ROWS %s%s" % ((kernel.workGroupNumRows * kernel.microTileNumRows), endLine )
kStr += "#define MACRO_TILE_NUM_COLS %s%s" % ((kernel.workGroupNumCols * kernel.microTileNumCols), endLine )
kStr += "#define NUM_UNROLL_ITER %s%s" % (kernel.unroll, endLine )
kStr += "" + endLine
kStr += "#define LOCAL_ROW_PAD %s%s" % (kernel.localRowPad, endLine)
kStr += "#define LOCAL_COL_PAD %s%s" % (kernel.localColPad, endLine)
####################################
# global memory indices
# A
kStr += endLine
kStr += "/* global memory indices */" + endLine
if (kernel.order=="clblasColumnMajor")==(kernel.transA=="N"):
kStr += "#define GET_GLOBAL_INDEX_A(ROW,COL) ((COL)*lda+(ROW))" + endLine
else:
kStr += "#define GET_GLOBAL_INDEX_A(ROW,COL) ((ROW)*lda+(COL))" + endLine
# B
if (kernel.order=="clblasColumnMajor")==(kernel.transB=="N"):
kStr += "#define GET_GLOBAL_INDEX_B(ROW,COL) ((COL)*ldb+(ROW))" + endLine
else:
kStr += "#define GET_GLOBAL_INDEX_B(ROW,COL) ((ROW)*ldb+(COL))" + endLine
# C
if (kernel.order=="clblasColumnMajor"):
kStr += "#define GET_GLOBAL_INDEX_C(ROW,COL) ((COL)*ldc+(ROW))" + endLine
else:
kStr += "#define GET_GLOBAL_INDEX_C(ROW,COL) ((ROW)*ldc+(COL))" + endLine
####################################
# local memory indices
# A
kStr += endLine
kStr += "/* local memory indices */" + endLine
kStr += "#define GET_LOCAL_INDEX_A(ROW,COL) ((ROW) + (COL)*((MACRO_TILE_NUM_ROWS)+(LOCAL_COL_PAD)) )" + endLine
# B
kStr += "#define GET_LOCAL_INDEX_B(ROW,COL) ((COL) + (ROW)*((MACRO_TILE_NUM_COLS)+(LOCAL_ROW_PAD)) )" + endLine
####################################
# data types
kStr += endLine
kStr += "/* data types */" + endLine
kStr += "#define DATA_TYPE_STR %s%s" \
% (Common.openclDataType[kernel.precision], endLine)
if kernel.precision=="s" or kernel.precision=="d":
# real arithmetic
kStr += "#define TYPE_MAD(MULA,MULB,DST) DST = mad(MULA,MULB,DST);" + endLine
if kernel.beta==1:
kStr += "#define TYPE_MAD_WRITE(DST,ALPHA,REG,BETA) DST = (ALPHA)*(REG) + (BETA)*(DST);" + endLine
else:
kStr += "#define TYPE_MAD_WRITE(DST,ALPHA,REG,BETA) DST = (ALPHA)*(REG);" + endLine
else:
# complex arithmetic
if kernel.transA!="C" and kernel.transB!="C":
# neither conjugate
kStr += (
"#define TYPE_MAD(MULA,MULB,DST) \\\\" + endLine +
" DST.s0 = mad( MULA.s0, MULB.s0, DST.s0 ); \\\\" + endLine +
" DST.s0 = mad( -MULA.s1, MULB.s1, DST.s0 ); \\\\" + endLine +
" DST.s1 = mad( MULA.s0, MULB.s1, DST.s1 ); \\\\" + endLine +
" DST.s1 = mad( MULA.s1, MULB.s0, DST.s1 );" + endLine )
elif kernel.transA=="C" and kernel.transB!="C":
# A conjugate (negate imaginary A.s1)
kStr += (
"#define TYPE_MAD(MULA,MULB,DST) \\\\" + endLine +
" DST.s0 = mad( MULA.s0, MULB.s0, DST.s0 ); \\\\" + endLine +
" DST.s0 = mad( MULA.s1, MULB.s1, DST.s0 ); \\\\" + endLine +
" DST.s1 = mad( MULA.s0, MULB.s1, DST.s1 ); \\\\" + endLine +
" DST.s1 = mad( -MULA.s1, MULB.s0, DST.s1 );" + endLine )
elif kernel.transA!="C" and kernel.transB=="C":
# B conjugate (negate imaginary B.s1)
kStr += (
"#define TYPE_MAD(MULA,MULB,DST) \\\\" + endLine +
" DST.s0 = mad( MULA.s0, MULB.s0, DST.s0 ); \\\\" + endLine +
" DST.s0 = mad( -MULA.s1, -MULB.s1, DST.s0 ); \\\\" + endLine +
" DST.s1 = mad( MULA.s0, -MULB.s1, DST.s1 ); \\\\" + endLine +
" DST.s1 = mad( MULA.s1, MULB.s0, DST.s1 );" + endLine )
else:
# A & B conjugate (negate imaginary .s1)
kStr += (
"#define TYPE_MAD(MULA,MULB,DST) \\\\" + endLine +
" DST.s0 = mad( MULA.s0, MULB.s0, DST.s0 ); \\\\" + endLine +
" DST.s0 = mad( MULA.s1, -MULB.s1, DST.s0 ); \\\\" + endLine +
" DST.s1 = mad( MULA.s0, -MULB.s1, DST.s1 ); \\\\" + endLine +
" DST.s1 = mad( -MULA.s1, MULB.s0, DST.s1 );" + endLine )
if kernel.beta==1:
kStr += (
"#define TYPE_MAD_WRITE( DST, ALPHA, REG, BETA ) \\\\" + endLine +
" /* (1) */ \\\\" + endLine +
" type_mad_tmp = REG.s0; \\\\" + endLine +
" REG.s0 *= ALPHA.s0; \\\\" + endLine +
" REG.s0 = mad( -ALPHA.s1, REG.s1, REG.s0 ); \\\\" + endLine +
" REG.s1 *= ALPHA.s0; \\\\" + endLine +
" REG.s1 = mad( ALPHA.s1, type_mad_tmp, REG.s1 ); \\\\" + endLine +
" /* (2) */ \\\\" + endLine +
" REG.s0 = mad( BETA.s0, DST.s0, REG.s0 ); \\\\" + endLine +
" REG.s0 = mad( -BETA.s1, DST.s1, REG.s0 ); \\\\" + endLine +
" REG.s1 = mad( BETA.s1, DST.s0, REG.s1 ); \\\\" + endLine +
" REG.s1 = mad( BETA.s0, DST.s1, REG.s1 ); \\\\" + endLine +
" /* (3) */ \\\\" + endLine +
" DST = REG;" + endLine )
else:
kStr += (
"#define TYPE_MAD_WRITE( DST, ALPHA, REG, BETA ) \\\\" + endLine +
" /* (1) */ \\\\" + endLine +
" type_mad_tmp = REG.s0; \\\\" + endLine +
" REG.s0 *= ALPHA.s0; \\\\" + endLine +
" REG.s0 = mad( -ALPHA.s1, REG.s1, REG.s0 ); \\\\" + endLine +
" REG.s1 *= ALPHA.s0; \\\\" + endLine +
" REG.s1 = mad( ALPHA.s1, type_mad_tmp, REG.s1 ); \\\\" + endLine +
" DST = REG;" + endLine )
####################################
# micro-tile
kStr += endLine
kStr += "/* %dx%d micro-tile */%s" % (kernel.microTileNumRows, kernel.microTileNumCols, endLine)
kStr += "#define MICRO_TILE \\\\" + endLine
for a in range(0, int(kernel.microTileNumRows)):
kStr += " rA[%d] = localA[offA + %d*WG_NUM_ROWS]; \\\\%s" % (a, a, endLine)
for b in range(0, int(kernel.microTileNumCols)):
kStr += " rB[%d] = localB[offB + %d*WG_NUM_COLS]; \\\\%s" % (b, b, endLine)
kStr += " offA += (MACRO_TILE_NUM_ROWS+LOCAL_COL_PAD); \\\\" + endLine
kStr += " offB += (MACRO_TILE_NUM_COLS+LOCAL_ROW_PAD); \\\\" + endLine
for a in range(0, int(kernel.microTileNumRows)):
for b in range(0, int(kernel.microTileNumCols)):
kStr += " TYPE_MAD(rA[%d],rB[%d],rC[%d][%d]); \\\\%s" % (a, b, a, b, endLine)
kStr += " mem_fence(CLK_LOCAL_MEM_FENCE);" + endLine
kStr += endLine
####################################
# function signature
####################################
kStr += "__attribute__((reqd_work_group_size(WG_NUM_COLS,WG_NUM_ROWS,1)))" + endLine
kStr += "__kernel void %s" % ( kernel.getName() )
kStr += "(" + endLine
# arguments
kStr += (
" __global DATA_TYPE_STR const * restrict A," + endLine +
" __global DATA_TYPE_STR const * restrict B," + endLine +
" __global DATA_TYPE_STR * C," + endLine +
" DATA_TYPE_STR const alpha," + endLine +
" DATA_TYPE_STR const beta," + endLine +
" uint const M," + endLine +
" uint const N," + endLine +
" uint const K," + endLine +
" uint const lda," + endLine +
" uint const ldb," + endLine +
" uint const ldc," + endLine +
" uint const offsetA," + endLine +
" uint const offsetB," + endLine +
" uint const offsetC" + endLine +
") {" + endLine )
####################################
# apply offsets
kStr += endLine
kStr += (
" /* apply offsets */" + endLine +
" A += offsetA;" + endLine +
" B += offsetB;" + endLine +
" C += offsetC;" + endLine )
####################################
# allocate registers
kStr += endLine
kStr += (
" /* allocate registers */" + endLine +
" DATA_TYPE_STR rC[MICRO_TILE_NUM_ROWS][MICRO_TILE_NUM_COLS] = { {0} };" + endLine +
" DATA_TYPE_STR rA[MICRO_TILE_NUM_ROWS];" + endLine +
" DATA_TYPE_STR rB[MICRO_TILE_NUM_COLS];" + endLine )
####################################
# allocate local memory
kStr += endLine
kStr += (
" /* allocate local memory */" + endLine +
" __local DATA_TYPE_STR localA[NUM_UNROLL_ITER*(MACRO_TILE_NUM_ROWS+LOCAL_COL_PAD)];" + endLine +
" __local DATA_TYPE_STR localB[NUM_UNROLL_ITER*(MACRO_TILE_NUM_COLS+LOCAL_ROW_PAD)];" + endLine )
####################################
# work item indices
kStr += endLine
kStr += " /* work item indices */" + endLine
if kernel.isRowKernel():
kStr += " uint groupRow = M / " + str(kernel.workGroupNumRows*kernel.microTileNumRows) + "; // last row" + endLine
else:
kStr += " uint groupRow = get_group_id(0);" + endLine
if kernel.isColKernel():
kStr += " uint groupCol = N / " + str(kernel.workGroupNumCols*kernel.microTileNumCols) + "; // last column" + endLine
else:
kStr += " uint groupCol = get_group_id(1);" + endLine
####################################
# z-order - TODO doesn't improve caching, only lowers occupancy
if False:
kStr += (
" // convert work-group order to z-order" + endLine +
" unsigned int morton = get_group_id(1) * get_num_groups(0) + get_group_id(0);" + endLine +
" groupRow = morton;" + endLine +
" groupCol = ( groupRow >> 1 );" + endLine +
" groupRow &= 0x55555555;" + endLine +
" groupCol &= 0x55555555;" + endLine +
" groupRow |= ( groupRow >> 1 );" + endLine +
" groupCol |= ( groupCol >> 1 );" + endLine +
" groupRow &= 0x33333333;" + endLine +
" groupCol &= 0x33333333;" + endLine +
" groupRow |= ( groupRow >> 2 );" + endLine +
" groupCol |= ( groupCol >> 2 );" + endLine +
" groupRow &= 0x0f0f0f0f;" + endLine +
" groupCol &= 0x0f0f0f0f;" + endLine +
" groupRow |= ( groupRow >> 4 );" + endLine +
" groupCol |= ( groupCol >> 4 );" + endLine +
" groupRow &= 0x00ff00ff;" + endLine +
" groupCol &= 0x00ff00ff;" + endLine +
" groupRow |= ( groupRow >> 8 );" + endLine +
" groupCol |= ( groupCol >> 8 );" + endLine +
" groupRow &= 0x0000ffff;" + endLine +
" groupCol &= 0x0000ffff;" + endLine + endLine
)
kStr += (
" uint localRow = get_local_id(0);" + endLine +
" uint localCol = get_local_id(1);" + endLine +
" uint localSerial = localRow + localCol*WG_NUM_ROWS;" + endLine )
####################################
# global indices being loaded
kStr += endLine
kStr += " /* global indices being loaded */" + endLine
if (kernel.order=="clblasColumnMajor")==(kernel.transA=="N"):
kStr += (
"#define globalARow(LID) (groupRow*MACRO_TILE_NUM_ROWS + (localSerial+(LID)*WG_NUM_ROWS*WG_NUM_COLS)%MACRO_TILE_NUM_ROWS)" + endLine +
"#define globalACol(LID) ((localSerial+(LID)*WG_NUM_ROWS*WG_NUM_COLS)/MACRO_TILE_NUM_ROWS)" + endLine )
else:
kStr += (
"#define globalARow(LID) (groupRow*MACRO_TILE_NUM_ROWS + (localSerial+(LID)*WG_NUM_ROWS*WG_NUM_COLS)/NUM_UNROLL_ITER)" + endLine +
"#define globalACol(LID) ((localSerial+(LID)*WG_NUM_ROWS*WG_NUM_COLS)%NUM_UNROLL_ITER)" + endLine )
if (kernel.order=="clblasColumnMajor")==(kernel.transB=="N"):
kStr += (
"#define globalBRow(LID) ((localSerial+(LID)*WG_NUM_ROWS*WG_NUM_COLS)%NUM_UNROLL_ITER)" + endLine +
"#define globalBCol(LID) (groupCol*MACRO_TILE_NUM_COLS + (localSerial+(LID)*WG_NUM_ROWS*WG_NUM_COLS)/NUM_UNROLL_ITER)" + endLine )
else:
kStr += (
"#define globalBRow(LID) ((localSerial+(LID)*WG_NUM_ROWS*WG_NUM_COLS)/MACRO_TILE_NUM_COLS)" + endLine +
"#define globalBCol(LID) (groupCol*MACRO_TILE_NUM_COLS + (localSerial+(LID)*WG_NUM_ROWS*WG_NUM_COLS)%MACRO_TILE_NUM_COLS)" + endLine )
#kStr += (
# " A += GET_GLOBAL_INDEX_A( globalARow, globalACol );" + endLine +
# " B += GET_GLOBAL_INDEX_B( globalBRow, globalBCol );" + endLine )
####################################
# loop over k
kStr += endLine
kStr += (
" /* loop over k */" + endLine +
" uint block_k = K / NUM_UNROLL_ITER;" + endLine +
" do {" + endLine )
####################################
# local indices being written
kStr += endLine
kStr += " /* local indices being written */" + endLine
if (kernel.order=="clblasColumnMajor")==(kernel.transA=="N"):
kStr += (
"#define localARow (localSerial % MACRO_TILE_NUM_ROWS)" + endLine +
"#define localACol (localSerial / MACRO_TILE_NUM_ROWS)" + endLine +
"#define localAStride (WG_NUM_ROWS*WG_NUM_COLS)" + endLine )
else:
kStr += (
"#define localARow (localSerial / NUM_UNROLL_ITER)" + endLine +
"#define localACol (localSerial % NUM_UNROLL_ITER)" + endLine +
"#define localAStride (WG_NUM_ROWS*WG_NUM_COLS/NUM_UNROLL_ITER)" + endLine )
if (kernel.order=="clblasColumnMajor")==(kernel.transB=="N"):
kStr += (
"#define localBRow ( localSerial % NUM_UNROLL_ITER )" + endLine +
"#define localBCol ( localSerial / NUM_UNROLL_ITER )" + endLine +
"#define localBStride (WG_NUM_ROWS*WG_NUM_COLS/NUM_UNROLL_ITER)" + endLine )
else:
kStr += (
"#define localBRow ( localSerial / MACRO_TILE_NUM_COLS )" + endLine +
"#define localBCol ( localSerial % MACRO_TILE_NUM_COLS )" + endLine +
"#define localBStride (WG_NUM_ROWS*WG_NUM_COLS)" + endLine )
kStr += (
" __local DATA_TYPE_STR *lA = localA + GET_LOCAL_INDEX_A(localARow, localACol);" + endLine +
" __local DATA_TYPE_STR *lB = localB + GET_LOCAL_INDEX_B(localBRow, localBCol);" + endLine +
" barrier(CLK_LOCAL_MEM_FENCE);" + endLine )
####################################
# load global -> local
# threads to do loading = (workGroupNumRows*workGroupNumCols)
# A elements to be loaded = workGroupNumRows*microTileNumRows*unroll
# B elements to be loaded = workGroupNumCols*microTileNumCols*unroll
kStr += endLine
kStr += " /* load global -> local */" + endLine
numALoads = (kernel.workGroupNumRows*kernel.microTileNumRows*kernel.unroll) \
// (kernel.workGroupNumRows*kernel.workGroupNumCols) # // -- integer divide
numALoadsR = (kernel.workGroupNumRows*kernel.microTileNumRows*kernel.unroll) \
% (kernel.workGroupNumRows*kernel.workGroupNumCols)
numBLoads = (kernel.workGroupNumCols*kernel.microTileNumCols*kernel.unroll) \
// (kernel.workGroupNumRows*kernel.workGroupNumCols) # // - integer divide
numBLoadsR = (kernel.workGroupNumCols*kernel.microTileNumCols*kernel.unroll) \
% (kernel.workGroupNumRows*kernel.workGroupNumCols)
# TODO - zeroString for real and complex
if kernel.precision == "c":
zeroString = "(float2)(0.f, 0.f)"
elif kernel.precision == "z":
zeroString = "(double2)(0.0, 0.0)"
else:
zeroString = "0.0"
for a in range(0, int(numALoads)):
kStr += " lA[ %d*localAStride ] = " % a
if kernel.isRowKernel():
kStr += "( globalARow(%d) >= M) ? %s : " % ( a, zeroString )
kStr += "A[ GET_GLOBAL_INDEX_A( globalARow(%d), globalACol(%d) ) ];%s" % (a, a, endLine)
if numALoadsR:
kStr += " if ( localSerial + " + str(numALoads) + "*WG_NUM_ROWS*WG_NUM_COLS < (WG_NUM_ROWS*MICRO_TILE_NUM_ROWS*NUM_UNROLL_ITER) ) {" + endLine
kStr += " lA[ %d*localAStride ] = " % numALoads
if kernel.isRowKernel():
kStr += "( globalARow(%d) >= M) ? %s : " % ( numALoads, zeroString )
kStr += "A[ GET_GLOBAL_INDEX_A( globalARow(%d), globalACol(%d) ) ];%s" % (numALoads, numALoads, endLine)
kStr += " }" + endLine
for b in range(0, int(numBLoads)):
kStr += " lB[ %d*localBStride ] = " % b
if kernel.isColKernel():
kStr += "( globalBCol(%d) >= N) ? %s : " % ( b, zeroString )
kStr += "B[ GET_GLOBAL_INDEX_B( globalBRow(%d), globalBCol(%d) ) ];%s" % (b, b, endLine)
if numBLoadsR:
kStr += " if ( localSerial + " + str(numBLoads) + "*WG_NUM_ROWS*WG_NUM_COLS < (WG_NUM_COLS*MICRO_TILE_NUM_COLS*NUM_UNROLL_ITER) ) {" + endLine
kStr += " lB[ %d*localBStride ] = " % numBLoads
if kernel.isColKernel():
kStr += "(globalBCol(%d) >= N) ? %s : " % ( numBLoads, zeroString )
kStr += "B[ GET_GLOBAL_INDEX_B( globalBRow(%d), globalBCol(%d) ) ];%s" % (numBLoads, numBLoads, endLine)
kStr += " }" + endLine
kStr += (
" barrier(CLK_LOCAL_MEM_FENCE);" + endLine +
" uint offA = localRow;" + endLine +
" uint offB = localCol;" + endLine )
####################################
# do mads
kStr += endLine
kStr += " /* do mads */" + endLine
for u in range(0, int(kernel.unroll)):
kStr += " MICRO_TILE" + endLine
####################################
# shift to next k block
kStr += endLine
kStr += " /* shift to next k block */" + endLine
if (kernel.order=="clblasColumnMajor")==(kernel.transA=="N"):
kStr += " A += lda*NUM_UNROLL_ITER;" + endLine
else:
kStr += " A += NUM_UNROLL_ITER;" + endLine
if (kernel.order=="clblasColumnMajor")==(kernel.transB=="N"):
kStr += " B += NUM_UNROLL_ITER;" + endLine
else:
kStr += " B += ldb*NUM_UNROLL_ITER;" + endLine
####################################
# end loop
kStr += endLine
kStr += " } while (--block_k > 0);" + endLine
kStr += endLine
####################################
# which global Cij index
kStr += endLine
kStr += " /* which global Cij index */" + endLine
kStr += " uint globalCRow = groupRow * MACRO_TILE_NUM_ROWS + localRow;" + endLine
kStr += " uint globalCCol = groupCol * MACRO_TILE_NUM_COLS + localCol;" + endLine
####################################
# write global Cij
kStr += endLine
kStr += " /* write global Cij */" + endLine
if kernel.precision=="c":
kStr += " float type_mad_tmp;" + endLine
if kernel.precision=="z":
kStr += " double type_mad_tmp;" + endLine
for a in range(0, int(kernel.microTileNumRows)):
for b in range(0, int(kernel.microTileNumCols)):
if kernel.isRowKernel():
kStr += " if (globalCRow+%d*WG_NUM_ROWS < M)" % a
if kernel.isColKernel():
kStr += " if (globalCCol+%d*WG_NUM_COLS < N)" % b
if kernel.isRowKernel() or kernel.isColKernel():
kStr += "{"
kStr += " TYPE_MAD_WRITE( C[ GET_GLOBAL_INDEX_C( globalCRow+%d*WG_NUM_ROWS, globalCCol+%d*WG_NUM_COLS) ], alpha, rC[%d][%d], beta )" % (a, b, a, b)
if kernel.isRowKernel() or kernel.isColKernel():
kStr += "}"
kStr += endLine
####################################
# end kernel
kStr += endLine
kStr += "}" + endLine
return kStr
##############################################################################
# Write OpenCL kernel to file
##############################################################################
def writeOpenCLKernelToFile(kernel):
kernelName = kernel.getName()
kernelString = makeOpenCLKernelString(kernel)
kernelFileName = Common.getKernelSourcePath() + kernelName +"_src.cpp"
kernelFile = open(kernelFileName, "w")
kernelFile.write( Common.getAutoGemmHeader() )
kernelFile.write("#ifndef KERNEL_" + kernelName.upper() + "_SRC_H\n")
kernelFile.write("#define KERNEL_" + kernelName.upper() + "_SRC_H\n")
kernelFile.write("\n")
kernelFile.write("const unsigned int %s_workGroupNumRows = %u;\n" % (kernel.getName(), kernel.workGroupNumRows ) )
kernelFile.write("const unsigned int %s_workGroupNumCols = %u;\n" % (kernel.getName(), kernel.workGroupNumCols ) )
kernelFile.write("const unsigned int %s_microTileNumRows = %u;\n" % (kernel.getName(), kernel.microTileNumRows ) )
kernelFile.write("const unsigned int %s_microTileNumCols = %u;\n" % (kernel.getName(), kernel.microTileNumCols ) )
kernelFile.write("const unsigned int %s_unroll = %u;\n" % (kernel.getName(), kernel.unroll) )
kernelFile.write("\n")
kernelFile.write("const char * const %s_src =\"" % (kernelName) )
kernelFile.write(kernelString)
kernelFile.write("\";\n")
kernelFile.write("\n")
kernelFile.write("#else\n")
# kernelFile.write("#pragma message(\"AutoGemmKernelSources.cpp: %s was overriden by user kernel.\")\n" % kernel.getName() )
kernelFile.write("#endif\n")
kernelFile.close()
##############################################################################
# Write OpenCL kernel to file
##############################################################################
def writeOpenCLKernels():
if not os.path.exists( Common.getKernelSourcePath() ):
os.makedirs( Common.getKernelSourcePath() )
if not os.path.exists( Common.getKernelBinaryPath() ):
os.makedirs( Common.getKernelBinaryPath() )
numKernels = 0
# for each precision
kernel = KernelParameters.KernelParameters()
for precision in AutoGemmParameters.precisions:
kernel.precision = precision
# valid tiles for this precision
tiles = AutoGemmParameters.getTilesForPrecision(precision)
# for non tile parameters
for order in AutoGemmParameters.orders:
kernel.order = order
for transA in AutoGemmParameters.transposes[precision]:
kernel.transA = transA
for transB in AutoGemmParameters.transposes[precision]:
kernel.transB = transB
for beta in AutoGemmParameters.betas:
kernel.beta = beta
# for tile parameters
for tile in tiles:
# tile kernel
kernel.useTile(tile)
writeOpenCLKernelToFile(kernel)
# row kernel
rowKernel = copy.copy(kernel)
rowKernel.macroTileNumRows = 1
writeOpenCLKernelToFile(rowKernel)
# col kernel
colKernel = copy.copy(kernel)
colKernel.macroTileNumCols = 1
writeOpenCLKernelToFile(colKernel)
# corner kernel
cornerKernel = copy.copy(kernel)
cornerKernel.macroTileNumRows = 1
cornerKernel.macroTileNumCols = 1
writeOpenCLKernelToFile(cornerKernel)
numKernels += 4
print("AutoGemm.py: generated %d kernels" % numKernels)
################################################################################
# Main
################################################################################
if __name__ == "__main__":
ap = argparse.ArgumentParser(description="KernelOpenCL")
ap.add_argument("precision", choices=["s","d","c","z"], help="precision" )
ap.add_argument("order", choices=["row","col"], help="order: row major or column major" )
ap.add_argument("transA", choices=["N","T", "C"], help="transA" )
ap.add_argument("transB", choices=["N","T", "C"], help="transB" )
ap.add_argument("beta", choices=[0, 1], type=int, help="0 for beta is zero, 1 for beta is non-zero" )
ap.add_argument("workGroupNumRows", type=int )
ap.add_argument("workGroupNumCols", type=int )
ap.add_argument("microTileNumRows", type=int )
ap.add_argument("microTileNumCols", type=int )
ap.add_argument("unroll", type=int, help="number of iterations to unroll the loop over k" )
ap.add_argument("outputPath", default=".", help="output path; %s will be appended to path" % Common.getRelativeKernelSourcePath() )
args = ap.parse_args()
kernel = KernelParameters.KernelParameters()
kernel.precision = args.precision
if args.order == "col":
kernel.order = "clblasColumnMajor"
else:
kernel.order = "clblasRowMajor"
kernel.transA = args.transA
kernel.transB = args.transB
kernel.beta = args.beta
kernel.workGroupNumRows = args.workGroupNumRows
kernel.workGroupNumCols = args.workGroupNumCols
kernel.microTileNumRows = args.microTileNumRows
kernel.microTileNumCols = args.microTileNumCols
kernel.unroll = args.unroll
Common.setOutputPath(args.outputPath)
kernel.macroTileNumRows = kernel.workGroupNumRows * kernel.microTileNumRows
kernel.macroTileNumCols = kernel.workGroupNumCols * kernel.microTileNumCols
if not os.path.exists( Common.getKernelSourcePath() ):
os.makedirs( Common.getKernelSourcePath() )
writeOpenCLKernelToFile(kernel)
kernelName = kernel.getName()
kernelFileName = Common.getKernelSourcePath() + kernelName +"_src.cpp"
print("kernel \"%s\" written to %s" % (kernelName, kernelFileName))
|
|
from copy import deepcopy
from typing import List, Callable, Mapping, Union, Optional
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from sklearn.base import RegressorMixin, BaseEstimator
from bartpy.data import Data
from bartpy.initializers.initializer import Initializer
from bartpy.initializers.sklearntreeinitializer import SklearnTreeInitializer
from bartpy.model import Model
from bartpy.samplers.leafnode import LeafNodeSampler
from bartpy.samplers.modelsampler import ModelSampler, Chain
from bartpy.samplers.schedule import SampleSchedule
from bartpy.samplers.sigma import SigmaSampler
from bartpy.samplers.treemutation import TreeMutationSampler
from bartpy.samplers.unconstrainedtree.treemutation import get_tree_sampler
from bartpy.sigma import Sigma
def run_chain(model: 'SklearnModel', X: np.ndarray, y: np.ndarray):
"""
Run a single chain for a model
Primarily used as a building block for constructing a parallel run of multiple chains
"""
model.model = model._construct_model(X, y)
return model.sampler.samples(model.model,
model.n_samples,
model.n_burn,
model.thin,
model.store_in_sample_predictions,
model.store_acceptance_trace)
def delayed_run_chain():
return run_chain
class SklearnModel(BaseEstimator, RegressorMixin):
"""
The main access point to building BART models in BartPy
Parameters
----------
n_trees: int
the number of trees to use, more trees will make a smoother fit, but slow training and fitting
n_chains: int
the number of independent chains to run
more chains will improve the quality of the samples, but will require more computation
sigma_a: float
shape parameter of the prior on sigma
sigma_b: float
scale parameter of the prior on sigma
n_samples: int
how many recorded samples to take
n_burn: int
how many samples to run without recording to reach convergence
thin: float
percentage of samples to store.
use this to save memory when running large models
p_grow: float
probability of choosing a grow mutation in tree mutation sampling
p_prune: float
probability of choosing a prune mutation in tree mutation sampling
alpha: float
prior parameter on tree structure
beta: float
prior parameter on tree structure
store_in_sample_predictions: bool
whether to store full prediction samples
set to False if you don't need in sample results - saves a lot of memory
store_acceptance_trace: bool
whether to store acceptance rates of the gibbs samples
unless you're very memory constrained, you wouldn't want to set this to false
useful for diagnostics
tree_sampler: TreeMutationSampler
Method of sampling used on trees
defaults to `bartpy.samplers.unconstrainedtree`
initializer: Initializer
Class that handles the initialization of tree structure and leaf values
n_jobs: int
how many cores to use when computing MCMC samples
set to `-1` to use all cores
"""
def __init__(self,
n_trees: int = 200,
n_chains: int = 4,
sigma_a: float = 0.001,
sigma_b: float = 0.001,
n_samples: int = 200,
n_burn: int = 200,
thin: float = 0.1,
alpha: float = 0.95,
beta: float = 2.,
store_in_sample_predictions: bool=False,
store_acceptance_trace: bool=False,
tree_sampler: TreeMutationSampler=get_tree_sampler(0.5, 0.5),
initializer: Optional[Initializer]=None,
n_jobs=-1):
self.n_trees = n_trees
self.n_chains = n_chains
self.sigma_a = sigma_a
self.sigma_b = sigma_b
self.n_burn = n_burn
self.n_samples = n_samples
self.p_grow = 0.5
self.p_prune = 0.5
self.alpha = alpha
self.beta = beta
self.thin = thin
self.n_jobs = n_jobs
self.store_in_sample_predictions = store_in_sample_predictions
self.store_acceptance_trace = store_acceptance_trace
self.columns = None
self.tree_sampler = tree_sampler
self.initializer = initializer
self.schedule = SampleSchedule(self.tree_sampler, LeafNodeSampler(), SigmaSampler())
self.sampler = ModelSampler(self.schedule)
self.sigma, self.data, self.model, self._prediction_samples, self._model_samples, self.extract = [None] * 6
def fit(self, X: Union[np.ndarray, pd.DataFrame], y: np.ndarray) -> 'SklearnModel':
"""
Learn the model based on training data
Parameters
----------
X: pd.DataFrame
training covariates
y: np.ndarray
training targets
Returns
-------
SklearnModel
self with trained parameter values
"""
self.model = self._construct_model(X, y)
self.extract = Parallel(n_jobs=self.n_jobs)(self.f_delayed_chains(X, y))
self.combined_chains = self._combine_chains(self.extract)
self._model_samples, self._prediction_samples = self.combined_chains["model"], self.combined_chains["in_sample_predictions"]
self._acceptance_trace = self.combined_chains["acceptance"]
return self
@staticmethod
def _combine_chains(extract: List[Chain]) -> Chain:
keys = list(extract[0].keys())
combined = {}
for key in keys:
combined[key] = np.concatenate([chain[key] for chain in extract], axis=0)
return combined
@staticmethod
def _convert_covariates_to_data(X: np.ndarray, y: np.ndarray) -> Data:
from copy import deepcopy
if type(X) == pd.DataFrame:
X: pd.DataFrame = X
X = X.values
return Data(deepcopy(X), deepcopy(y), normalize=True)
def _construct_model(self, X: np.ndarray, y: np.ndarray) -> Model:
if len(X) == 0 or X.shape[1] == 0:
raise ValueError("Empty covariate matrix passed")
self.data = self._convert_covariates_to_data(X, y)
self.sigma = Sigma(self.sigma_a, self.sigma_b, self.data.y.normalizing_scale)
self.model = Model(self.data,
self.sigma,
n_trees=self.n_trees,
alpha=self.alpha,
beta=self.beta,
initializer=self.initializer)
return self.model
def f_delayed_chains(self, X: np.ndarray, y: np.ndarray):
"""
Access point for getting access to delayed methods for running chains
Useful for when you want to run multiple instances of the model in parallel
e.g. when calculating a null distribution for feature importance
Parameters
----------
X: np.ndarray
Covariate matrix
y: np.ndarray
Target array
Returns
-------
List[Callable[[], ChainExtract]]
"""
return [delayed(x)(self, X, y) for x in self.f_chains()]
def f_chains(self) -> List[Callable[[], Chain]]:
"""
List of methods to run MCMC chains
Useful for running multiple models in parallel
Returns
-------
List[Callable[[], Extract]]
List of method to run individual chains
Length of n_chains
"""
return [delayed_run_chain() for _ in range(self.n_chains)]
def predict(self, X: np.ndarray=None) -> np.ndarray:
"""
Predict the target corresponding to the provided covariate matrix
If X is None, will predict based on training covariates
Prediction is based on the mean of all samples
Parameters
----------
X: pd.DataFrame
covariates to predict from
Returns
-------
np.ndarray
predictions for the X covariates
"""
if X is None and self.store_in_sample_predictions:
return self.data.y.unnormalize_y(np.mean(self._prediction_samples, axis=0))
elif X is None and not self.store_in_sample_predictions:
raise ValueError(
"In sample predictions only possible if model.store_in_sample_predictions is `True`. Either set the parameter to True or pass a non-None X parameter")
else:
return self._out_of_sample_predict(X)
def residuals(self, X=None, y=None) -> np.ndarray:
"""
Array of error for each observation
Parameters
----------
X: np.ndarray
Covariate matrix
y: np.ndarray
Target array
Returns
-------
np.ndarray
Error for each observation
"""
if y is None:
return self.model.data.y.unnormalized_y - self.predict(X)
else:
return y - self.predict(X)
def l2_error(self, X=None, y=None) -> np.ndarray:
"""
Calculate the squared errors for each row in the covariate matrix
Parameters
----------
X: np.ndarray
Covariate matrix
y: np.ndarray
Target array
Returns
-------
np.ndarray
Squared error for each observation
"""
return np.square(self.residuals(X, y))
def rmse(self, X, y) -> float:
"""
The total RMSE error of the model
The sum of squared errors over all observations
Parameters
----------
X: np.ndarray
Covariate matrix
y: np.ndarray
Target array
Returns
-------
float
The total summed L2 error for the model
"""
return np.sqrt(np.sum(self.l2_error(X, y)))
def _out_of_sample_predict(self, X):
return self.data.y.unnormalize_y(np.mean([x.predict(X) for x in self._model_samples], axis=0))
def fit_predict(self, X, y):
self.fit(X, y)
if self.store_in_sample_predictions:
return self.predict()
else:
return self.predict(X)
@property
def model_samples(self) -> List[Model]:
"""
Array of the model as it was after each sample.
Useful for examining for:
- examining the state of trees, nodes and sigma throughout the sampling
- out of sample prediction
Returns None if the model hasn't been fit
Returns
-------
List[Model]
"""
return self._model_samples
@property
def acceptance_trace(self) -> List[Mapping[str, float]]:
"""
List of Mappings from variable name to acceptance rates
Each entry is the acceptance rate of the variable in each iteration of the model
Returns
-------
List[Mapping[str, float]]
"""
return self._acceptance_trace
@property
def prediction_samples(self) -> np.ndarray:
"""
Matrix of prediction samples at each point in sampling
Useful for assessing convergence, calculating point estimates etc.
Returns
-------
np.ndarray
prediction samples with dimensionality n_samples * n_points
"""
return self.prediction_samples
def from_extract(self, extract: List[Chain], X: np.ndarray, y: np.ndarray) -> 'SklearnModel':
"""
Create a copy of the model using an extract
Useful for doing operations on extracts created in external processes like feature selection
Parameters
----------
extract: Extract
samples produced by delayed chain methods
X: np.ndarray
Covariate matrix
y: np.ndarray
Target variable
Returns
-------
SklearnModel
Copy of the current model with samples
"""
new_model = deepcopy(self)
combined_chain = self._combine_chains(extract)
self._model_samples, self._prediction_samples = combined_chain["model"], combined_chain["in_sample_predictions"]
self._acceptance_trace = combined_chain["acceptance"]
new_model.data = self._convert_covariates_to_data(X, y)
return new_model
|
|
#!/usr/bin/env python
import os
import logging
import mango.unittest
import scipy as sp
import numpy as np
import mango
import mango.data
import mango.optimize
import mango.mpi as mpi
from mango.application.cylinder_fit import *
logger, rootLogger = mpi.getLoggers(__name__)
if (mango.haveRestricted):
class CylinderFitGradientMetricTest(mango.unittest.TestCase):
"""
Test for the :class:`mango.application.cylinder_fit.CylinderFitGradientMetric`.
"""
def setUp(self):
pass
def testNonDistributedMetric(self):
if ((mpi.world == None) or (mpi.world.Get_size() == 1)):
img = mango.zeros((128,64,64), mtype="tomo")
img.md.setVoxelSize((1,1,1), "voxel")
c = img.origin + img.shape*0.5
r = np.min(img.shape)*0.4
a = (1,0,0)
mango.data.fill_circular_cylinder(img, c, r, img.shape[0]*1.1, fill=32000, coordsys="abs", unit="voxel")
metric = cylinder_fit_gradient_metric()
metric.setGradientFromImage(img)
x = metric.getDefaultX()
rootLogger.info("Default x = %s" % (x,))
x[metric.CENTRE_X_IDX] = c[2]
x[metric.CENTRE_Y_IDX] = c[1]
x[metric.CENTRE_Z_IDX] = c[0]
x[metric.AXIS_X_IDX] = a[2]
x[metric.AXIS_Y_IDX] = a[1]
x[metric.AXIS_Z_IDX] = a[0]
bestSoln = (0,0)
for rr in range(-5, 6):
x[metric.RADIUS_IDX] = r + rr
val = metric(x)
if (val > bestSoln[0]):
bestSoln = (val, x[metric.RADIUS_IDX])
rootLogger.info("metric[%s] = %16.2f" % (x, val))
self.assertEqual(bestSoln[1], r)
self.assertLess(0, bestSoln[0])
def testNonDistributedOptimization(self):
if ((mpi.world == None) or (mpi.world.Get_size() == 1)):
img = mango.zeros((128,64,64), mtype="tomo")
img.md.setVoxelSize((1,1,1), "voxel")
c = img.origin + img.shape*0.5
r = np.min(img.shape)*0.4
a = (1,0,0)
mango.data.fill_circular_cylinder(img, c, r, img.shape[0]*1.1, fill=32000, coordsys="abs", unit="voxel")
metric = cylinder_fit_gradient_metric()
metric.setGradientFromImage(img)
x = metric.getDefaultX()
rootLogger.info("Default x = %s" % (x,))
x[metric.RADIUS_IDX] = r
x[metric.CENTRE_X_IDX] = c[2]
x[metric.CENTRE_Y_IDX] = c[1]
x[metric.CENTRE_Z_IDX] = c[0]
x[metric.AXIS_X_IDX] = a[2]
x[metric.AXIS_Y_IDX] = a[1]
x[metric.AXIS_Z_IDX] = a[0]
solnX = x.copy()
bestRes = None
for rr in range(-5, 6):
x[metric.RADIUS_IDX] = r + rr
res = sp.optimize.minimize(lambda x: -sp.absolute(metric(x)), x[0:6], method="Powell", options={'xtol':1.0e-1, 'ftol':1.0e-2})
#res = sp.optimize.minimize(metric, x[0:6], jac=False, method="BFGS", options={'gtol':1.0e3, 'eps':(0.05,0.05,0.05,0.001,0.001,0.001)})
rootLogger.info("result.success = %s, result.message = %s" % (res.success, res.message))
rootLogger.info("optimized (metric_val, x) = (%16.2f, %s) = " % (res.fun, res.x))
rootLogger.info("res.nfev = %8d" % (res.nfev,))
if ((bestRes == None) or (res.fun < bestRes.fun)):
bestRes = res
bestRes.x[3:] /= sp.sqrt(sp.sum(bestRes.x[3:]*bestRes.x[3:]))
rootLogger.info("soln x = %s" % (solnX[0:bestRes.x.size],))
rootLogger.info("best x = %s" % (bestRes.x,))
self.assertTrue(sp.all(sp.absolute(bestRes.x - solnX[0:bestRes.x.size]) < 0.1))
self.assertGreater(0, bestRes.fun)
def testDistributedOptimization(self):
if ((mpi.size % 4) == 0):
mpidims = (0,2,2)
elif ((mpi.size % 2) == 0):
mpidims = (0,0,2)
else:
mpidims = (0,0,0)
img = mango.zeros((120+8*mpi.size,64,64), mtype="tomo", mpidims=mpidims)
img.md.setVoxelSize((1,1,1), "voxel")
c = img.origin + img.shape*0.5
r = np.min(img.shape)*0.4
a = (1,0,0)
mango.data.fill_circular_cylinder(img, c, r, img.shape[0]*1.1, fill=32000, coordsys="abs", unit="voxel")
metric = cylinder_fit_gradient_metric()
metric = DistributedGradCylFitMetric(metric, comm=img.mpi.comm, root=0)
metric.setGradientFromImage(img)
x = metric.getDefaultX()
rootLogger.info("Default x = %s" % (x,))
x[metric.RADIUS_IDX] = r
x[metric.CENTRE_X_IDX] = c[2]
x[metric.CENTRE_Y_IDX] = c[1]
x[metric.CENTRE_Z_IDX] = c[0]
x[metric.AXIS_X_IDX] = a[2]
x[metric.AXIS_Y_IDX] = a[1]
x[metric.AXIS_Z_IDX] = a[0]
solnX = x.copy()
bestRes = None
for rr in range(-5, 6):
x[metric.RADIUS_IDX] = r + rr
res = mango.optimize.distributed_minimize(metric, x[0:5], method="Powell", options={'xtol':1.0e-1, 'ftol':1.0e-2})
#res = sp.optimize.minimize(metric, x[0:6], jac=False, method="BFGS", options={'gtol':1.0e3, 'eps':(0.05,0.05,0.05,0.001,0.001,0.001)})
rootLogger.info("result.success = %s, result.message = %s" % (res.success, res.message))
rootLogger.info("optimized (metric_val, x) = (%16.2f, %s) = " % (res.fun, res.x))
rootLogger.info("res.nfev = %8d" % (res.nfev,))
if ((bestRes == None) or (res.fun < bestRes.fun)):
bestRes = res
#bestRes.x[3:] /= sp.sqrt(sp.sum(bestRes.x[3:]*bestRes.x[3:]))
rootLogger.info("soln x = %s" % (solnX[0:bestRes.x.size],))
rootLogger.info("best x = %s" % (bestRes.x,))
self.assertTrue(sp.all(sp.absolute(bestRes.x - solnX[0:bestRes.x.size]) < 0.1))
self.assertGreater(0, bestRes.fun)
class CylinderFitTest(mango.unittest.TestCase):
"""
Test for the :func:`mango.application.cylinder_fit.cylinder_fit` function.
"""
def setUp(self):
pass
def testDistributedMetricFit1(self):
dir = self.createTmpDir("CylinderFitTest_testDistributedMetricFit1")
#dir = "."
rootLogger.info("Generating synthetic image...")
#img = mango.zeros((128+((mpi.size//8)*16),64,64), mtype="tomo")
img = mango.zeros((256+((mpi.size//8)*16),128,128), mtype="tomo")
#img = mango.zeros((1024,512,512), mtype="tomo")
img.md.setVoxelSize((0.002,0.002,0.002), "mm")
c = (img.origin + img.shape*0.5)*img.md.getVoxelSize()
r = np.min(img.shape*img.md.getVoxelSize())*0.4
a = (1,0,0)
img.subd_h.asarray()[...] = 10000
mango.data.fill_circular_cylinder(img, c, r, img.shape[0]*1.1, fill=25000, coordsys="abs", unit=img.md.getVoxelSizeUnit())
img.subd.asarray()[...] += mango.data.gaussian_noise_like(img, mean=0, stdd=300).subd.asarray()
mango.io.writeDds(os.path.join(dir,"tomoCyl1.nc"), img)
rootLogger.info("Done generating synthetic image.")
fit, cylImg = cylinder_fit(img, 1, retcylimg=True, metricThreshRelTol=0.50)
mango.io.writeDds(os.path.join(dir,"segmentedCyl1AnnularMsk.nc"), cylImg)
rootLogger.info("Img voxel size = %s %s" % (img.md.getVoxelSize(), img.md.getVoxelSizeUnit()))
rootLogger.info("Cyl img voxel size = %s %s" % (cylImg.md.getVoxelSize(), cylImg.md.getVoxelSizeUnit()))
rootLogger.info("c=%s, r=%s, a=%s" % (c,r,a))
rootLogger.info("Fit Results:\n%s" % ("\n".join(map(str,fit)),))
f0 = fit[0]
self.assertAlmostEqual(c[1], f0[1][0][1], 3)
self.assertAlmostEqual(c[2], f0[1][0][2], 3)
self.assertAlmostEqual(r, f0[1][1], 3)
self.assertAlmostEqual(sp.absolute(sp.dot(a, f0[1][3])), 1.0, 3)
def _testDistributedMetricFit3(self):
dir = self.createTmpDir("CylinderFitTest_testDistributedMetricFit3")
#dir = "."
rootLogger.info("Generating synthetic image...")
#img = mango.zeros((128+((mpi.size//8)*16),64,64), mtype="tomo")
img = mango.zeros((256+((mpi.size//8)*16),128,128), mtype="tomo")
#img = mango.zeros((1024,512,512), mtype="tomo")
img.md.setVoxelSize((0.002,0.002,0.002), "mm")
c = (img.origin + img.shape*0.5)*img.md.getVoxelSize()
r = np.min(img.shape*img.md.getVoxelSize())*0.4
a = sp.array((1.,0.,0.))
cList = [c, c, c+(img.shape*0.02)*img.md.getVoxelSize()]
rList = [r, 0.8*r, 0.65*r]
aList = [a, a, sp.array([1.0, 0.01,0.02])]
fList = [20000, 15000, 18000]
img.subd_h.asarray()[...] = 10000
for i in range(len(cList)):
rMtx = mango.math.rotation_matrix_from_cross_prod(sp.array([1.,0.,0.]), aList[i])
mango.data.fill_circular_cylinder(img, cList[i], rList[i], img.shape[0]*1.25, fill=fList[i], rotation=rMtx, coordsys="abs", unit=img.md.getVoxelSizeUnit())
img.subd.asarray()[...] += mango.data.gaussian_noise_like(img, mean=0, stdd=300).subd.asarray()
mango.io.writeDds(os.path.join(dir, "tomoCyl3.nc"), img)
rootLogger.info("Done generating synthetic image.")
fit, cylImg = \
cylinder_fit(
img,
numcyl=3,
retcylimg=True,
distributedMetricEvaluation=True,
metricThreshRelTol=0.5
)
mango.io.writeDds(os.path.join(dir,"segmentedCyl3AnnularMsk.nc"), cylImg)
rootLogger.info("Voxel size = %s %s" % (img.md.getVoxelSize(), img.md.getVoxelSizeUnit()))
for i in range(len(cList)):
rootLogger.info("c%s=%s, r%s=%s, a%s=%s" % (i,cList[i],i,rList[i],i,aList[i]))
rootLogger.info("Fit Results:\n%s" % ("\n".join(map(str,fit)),))
def _testNonDistributedMetricFit3(self):
dir = self.createTmpDir("CylinderFitTest_testNonDistributedMetricFit3")
#dir = "."
rootLogger.info("Generating synthetic image...")
#img = mango.zeros((128+((mpi.size//8)*16),64,64), mtype="tomo")
img = mango.zeros((256+((mpi.size//8)*16),128,128), mtype="tomo")
#img = mango.zeros((1024,512,512), mtype="tomo")
img.md.setVoxelSize((0.002,0.002,0.002), "mm")
c = (img.origin + img.shape*0.5)*img.md.getVoxelSize()
r = np.min(img.shape*img.md.getVoxelSize())*0.4
a = sp.array((1.,0.,0.))
cList = [c, c, c+(img.shape*0.02)*img.md.getVoxelSize()]
rList = [r, 0.8*r, 0.65*r]
aList = [a, a, sp.array([1.0, 0.01,0.02])]
fList = [20000, 15000, 18000]
img.subd_h.asarray()[...] = 10000
for i in range(len(cList)):
rMtx = mango.math.rotation_matrix_from_cross_prod(sp.array([1.,0.,0.]), aList[i])
mango.data.fill_circular_cylinder(img, cList[i], rList[i], img.shape[0]*1.25, fill=fList[i], rotation=rMtx, coordsys="abs", unit=img.md.getVoxelSizeUnit())
img.subd.asarray()[...] += mango.data.gaussian_noise_like(img, mean=0, stdd=300).subd.asarray()
mango.io.writeDds(os.path.join(dir, "tomoCyl3.nc"), img)
rootLogger.info("Done generating synthetic image.")
fit, cylImg = \
cylinder_fit(
img,
numcyl=3,
retcylimg=True,
distributedMetricEvaluation=False,
metricThreshRelTol=0.5
)
mango.io.writeDds(os.path.join(dir,"segmentedCyl3AnnularMsk.nc"), cylImg)
rootLogger.info("Voxel size = %s %s" % (img.md.getVoxelSize(), img.md.getVoxelSizeUnit()))
for i in range(len(cList)):
rootLogger.info("c%s=%s, r%s=%s, a%s=%s" % (i,cList[i],i,rList[i],i,aList[i]))
rootLogger.info("Fit Results:\n%s" % ("\n".join(map(str,fit)),))
class CylinderFitMultiResTest(mango.unittest.TestCase):
"""
Test for the :func:`mango.application.cylinder_fit.cylinder_fit_multi_res` function.
"""
def setUp(self):
pass
def testMultiResFit1(self):
dir = self.createTmpDir("CylinderFitMultiResTest_testMultiResFit1")
#dir = "."
rootLogger.info("Generating synthetic image...")
#img = mango.zeros((128+((mpi.size//8)*16),64,64), mtype="tomo")
img = mango.zeros((512+((mpi.size//8)*16),256,256), mtype="tomo")
#img = mango.zeros((1024,512,512), mtype="tomo")
img.md.setVoxelSize((0.002,0.002,0.002), "mm")
c = (img.origin + img.shape*0.5)*img.md.getVoxelSize()
r = np.min(img.shape*img.md.getVoxelSize())*0.4
a = (1,0,0)
img.subd_h.asarray()[...] = 10000
mango.data.fill_circular_cylinder(img, c, r, img.shape[0]*1.1, fill=25000, coordsys="abs", unit=img.md.getVoxelSizeUnit())
img.subd.asarray()[...] += mango.data.gaussian_noise_like(img, mean=0, stdd=300).subd.asarray()
mango.io.writeDds(os.path.join(dir,"tomoCyl1.nc"), img)
rootLogger.info("Done generating synthetic image.")
fit, cylImg = cylinder_fit_multi_res(img, 1, resolutions=[64, 128, 512], retcylimg=True, metricThreshRelTol=0.50)
mango.io.writeDds(os.path.join(dir,"segmentedCyl1AnnularMsk.nc"), cylImg)
rootLogger.info("Img voxel size = %s %s" % (img.md.getVoxelSize(), img.md.getVoxelSizeUnit()))
rootLogger.info("Cyl img voxel size = %s %s" % (cylImg.md.getVoxelSize(), cylImg.md.getVoxelSizeUnit()))
rootLogger.info("c=%s, r=%s, a=%s" % (c,r,a))
rootLogger.info("Fit Results:\n%s" % ("\n".join(map(str,fit)),))
f0 = fit[0]
self.assertAlmostEqual(c[1], f0[1][0][1], 3)
self.assertAlmostEqual(c[2], f0[1][0][2], 3)
self.assertAlmostEqual(r, f0[1][1], 3)
self.assertAlmostEqual(sp.absolute(sp.dot(a, f0[1][3])), 1.0, 3)
def _testMultiResFit3(self):
dir = self.createTmpDir("CylinderFitMultiResTest_testMultiResFit3")
#dir = "."
rootLogger.info("Generating synthetic image...")
#img = mango.zeros((128+((world.size//8)*16),64,64), mtype="tomo")
img = mango.zeros((256+((world.size//8)*16),128,128), mtype="tomo")
#img = mango.zeros((1024,512,512), mtype="tomo")
img.md.setVoxelSize((0.002,0.002,0.002), "mm")
c = (img.origin + img.shape*0.5)*img.md.getVoxelSize()
r = np.min(img.shape*img.md.getVoxelSize())*0.4
a = sp.array((1.,0.,0.))
cList = [c, c, c+(img.shape*0.02)*img.md.getVoxelSize()]
rList = [r, 0.8*r, 0.65*r]
aList = [a, a, sp.array([1.0, 0.01,0.02])]
fList = [20000, 15000, 18000]
img.subd_h.asarray()[...] = 10000
for i in range(len(cList)):
rMtx = mango.math.rotation_matrix_from_cross_prod(sp.array([1.,0.,0.]), aList[i])
mango.data.fill_circular_cylinder(img, cList[i], rList[i], img.shape[0]*1.25, fill=fList[i], rotation=rMtx, coordsys="abs", unit=img.md.getVoxelSizeUnit())
img.subd.asarray()[...] += mango.data.gaussian_noise_like(img, mean=0, stdd=300).subd.asarray()
mango.io.writeDds(os.path.join(dir, "tomoCyl3.nc"), img)
rootLogger.info("Done generating synthetic image.")
fit, cylImg = \
cylinder_fit_multi_res(
img,
3,
resolutions=[64, 128, 512],
retcylimg=True,
metricThreshRelTol=0.50
)
mango.io.writeDds(os.path.join(dir,"segmentedCyl3AnnularMsk.nc"), cylImg)
rootLogger.info("Voxel size = %s %s" % (img.md.getVoxelSize(), img.md.getVoxelSizeUnit()))
for i in range(len(cList)):
rootLogger.info("c%s=%s, r%s=%s, a%s=%s" % (i,cList[i],i,rList[i],i,aList[i]))
rootLogger.info("Fit Results:\n%s" % ("\n".join(map(str,fit)),))
if __name__ == "__main__":
mpi.initialiseLoggers(
[__name__, "mango.application"],
logLevel=logging.INFO
)
mango.setLoggingVerbosityLevel("high")
mango.unittest.main()
|
|
#!/usr/bin/env python3
'''
Icinga (Nagios) plugin that checks the total amount of current, concurrent
sessions on a Cisco ASA and evaluates them against 'warning' and 'critical' value
thresholds.
'''
__version__ = 'v0.2'
__author__ = '[email protected]'
import sys
import argparse
import subprocess
def ok(msg):
print('OK:', msg)
sys.exit(0)
def warning(msg):
print('WARNING:', msg)
sys.exit(1)
def critical(msg):
print('CRITICAL:', msg)
sys.exit(2)
def unknown(msg):
print('UNKNOWN:', msg)
sys.exit(3)
def error(msg):
print('ERROR:', msg)
sys.exit(3)
def check_asa_sessions(snmp_check_values):
try:
# Build snmpwalk command: get sessions
command_output_sessions = subprocess.check_output(
[
'snmpwalk', '-v', '2c', '-c',
snmp_check_values['snmp_community'],
snmp_check_values['snmp_host'],
snmp_check_values['snmp_oid_asa_sessions']
]
)
# Build snmpwalk command: get model/type
command_output_model = subprocess.check_output(
[
'snmpwalk', '-v', '2c', '-c',
snmp_check_values['snmp_community'],
snmp_check_values['snmp_host'],
snmp_check_values['snmp_oid_asa_model']
]
)
except:
msg = 'Something went wrong with subprocess command \'snmpwalk\''
msg += '\nIs the host ' + snmp_check_values['snmp_host'] + ' reachable?'
msg += '\nIs it configured to accept SNMP polls from this host?'
msg += '\nIs SNMP community string \'' + snmp_check_values['snmp_community'] + '\' valid?'
error(msg)
try:
# Parse command output: current concurrent sessions
current_asa_sessions = command_output_sessions.decode().split()[3]
# Parse command output: ASA model/type
asa_model = command_output_model.decode().split()[3].split('"')[1]
except:
msg = 'Something went wrong parsing data. Probably wrong SNMP OID for this device.'
error(msg)
# Use model/type to determine 'critical high' threshold automatically ..
# .. But only if no 'critical high' threshold was set manually
if snmp_check_values['high_threshold_set'] == False:
if snmp_check_values[asa_model]:
snmp_check_values['critical_high'] = snmp_check_values[asa_model]
else:
snmp_check_values['critical_high'] = snmp_check_values['UNKNOWN_MODEL']
# DEBUG OUTPUT
if snmp_check_values['debug'] == True:
print('\n // DEBUG: settings //\n')
for key, value in sorted(snmp_check_values.items()):
print(' {key:22} : {value:<}'.format(**locals()))
print()
print('\n // DEBUG: command output //\n')
print(' Raw data:\n ', command_output_sessions)
print(' Parsed to:\n ', current_asa_sessions)
print('\n Raw data:\n ', command_output_model)
print(' Parsed to:\n ', asa_model)
print()
# Default output message and .. | .. perfdata
msg = 'Current sessions: ' + current_asa_sessions + ' MODEL: ' + asa_model + ' | current_sessions=' + current_asa_sessions
# Evaluate thresholds and generate plugin status message for Icinga
if int(current_asa_sessions) <= snmp_check_values['critical_low']:
critical(msg)
if snmp_check_values['critical_low'] < int(current_asa_sessions) <= snmp_check_values['warning_low']:
warning(msg)
if snmp_check_values['warning_low'] < int(current_asa_sessions) < snmp_check_values['warning_high']:
ok(msg)
if snmp_check_values['warning_high'] <= int(current_asa_sessions) < snmp_check_values['critical_high']:
warning(msg)
if int(current_asa_sessions) >= snmp_check_values['critical_high']:
critical(msg)
def main():
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Icinga (Nagios) plugin that checks the total amount of current, concurrent \
sessions on a Cisco ASA and evaluates them against \'warning\' and \'critical\' value \
thresholds.',
epilog='Written in Python 3.'
)
parser.add_argument('--version', action='version', version=__version__)
parser.add_argument('--debug', action='store_true', help='debug output')
parser.add_argument('SNMP_COMMUNITY', type=str, help='the SNMP community string of the remote device')
parser.add_argument('HOST', type=str, help='the IP of the remote host you want to check')
parser.add_argument('-w', '--warning', type=int, help='set high warning threshold')
parser.add_argument('-c', '--critical', type=int, help='set high critical threshold')
parser.add_argument('-wl', type=int, help='set low warning threshold')
parser.add_argument('-cl', type=int, help='set low critical threshold')
args = parser.parse_args()
# Required values
snmp_check_values = {
'snmp_community' : args.SNMP_COMMUNITY,
'snmp_host' : args.HOST,
'snmp_oid_asa_model' : '.1.3.6.1.2.1.47.1.1.1.1.13.1',
'snmp_oid_asa_sessions' : '.1.3.6.1.4.1.9.9.147.1.2.2.2.1.5.40.6',
'warning_low' : -1, # Default: don't whine about low values
'warning_high' : 50000,
'critical_low' : -2, # Default: don't whine about low values
'critical_high' : 100000,
'high_threshold_set' : False,
'debug' : False,
'ASA5505' : 10000,
'ASA5510' : 50000,
'ASA5512' : 280000,
'ASA5520' : 280000,
'ASA5540' : 400000,
'ASA5550' : 650000,
'UNKNOWN_MODEL' : 800000
}
# Any thresholds set?
if args.wl:
snmp_check_values['warning_low'] = args.wl
if args.cl:
snmp_check_values['critical_low'] = args.cl
if args.warning:
snmp_check_values['warning_high'] = args.warning
if args.critical:
snmp_check_values['critical_high'] = args.critical
# If 'critical high' is manually set now, don't use default thresholds later
snmp_check_values['high_threshold_set'] = True
# Debug mode enabled?
if args.debug:
snmp_check_values['debug'] = True
# Do it. Do it nau.
check_asa_sessions(snmp_check_values)
if __name__ == '__main__':
main()
# Copyright (c) 2014, [email protected]
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
from django.conf import settings
from nose.tools import eq_
from pyquery import PyQuery as pq
from tidings.models import Watch
from kitsune.flagit.models import FlaggedObject
from kitsune.kbadge.tests import AwardFactory, BadgeFactory
from kitsune.questions.events import QuestionReplyEvent
from kitsune.questions.tests import QuestionFactory
from kitsune.sumo.tests import get
from kitsune.sumo.urlresolvers import reverse
from kitsune.users.models import Profile
from kitsune.users.tests import TestCaseBase, UserFactory, add_permission
from kitsune.wiki.tests import RevisionFactory
class EditProfileTests(TestCaseBase):
def test_edit_my_ProfileFactory(self):
user = UserFactory()
url = reverse("users.edit_my_profile")
self.client.login(username=user.username, password="testpass")
data = {
"username": "jonh_doe",
"name": "John Doe",
"public_email": True,
"bio": "my bio",
"website": "http://google.com/",
"twitter": "",
"community_mozilla_org": "",
"people_mozilla_org": "",
"matrix_handle": "johndoe",
"timezone": "America/New_York",
"country": "US",
"city": "Disney World",
"locale": "en-US",
}
response = self.client.post(url, data)
eq_(302, response.status_code)
profile = Profile.objects.get(user=user)
for key in data:
if key not in ["timezone", "username"]:
assert data[key] == getattr(profile, key), "%r != %r (for key '%s')" % (
data[key],
getattr(profile, key),
key,
)
eq_(data["timezone"], profile.timezone.zone)
eq_(data["username"], profile.user.username)
def test_user_cant_edit_others_profile_without_permission(self):
u1 = UserFactory()
url = reverse("users.edit_profile", args=[u1.username])
u2 = UserFactory()
self.client.login(username=u2.username, password="testpass")
# Try GET
r = self.client.get(url)
eq_(403, r.status_code)
# Try POST
r = self.client.post(url, {})
eq_(403, r.status_code)
def test_user_can_edit_others_profile_with_permission(self):
user1 = UserFactory()
url = reverse("users.edit_profile", args=[user1.username])
user2 = UserFactory()
add_permission(user2, Profile, "change_profile")
self.client.login(username=user2.username, password="testpass")
# Try GET
resp = self.client.get(url)
eq_(200, resp.status_code)
# Try POST
data = {
"username": "jonh_doe",
"name": "John Doe",
"public_email": True,
"bio": "my bio",
"website": "http://google.com/",
"twitter": "",
"community_mozilla_org": "",
"people_mozilla_org": "",
"matrix_handle": "johndoe",
"timezone": "America/New_York",
"country": "US",
"city": "Disney World",
"locale": "en-US",
}
resp = self.client.post(url, data)
eq_(302, resp.status_code)
profile = Profile.objects.get(user=user1)
for key in data:
if key not in ["timezone", "username"]:
assert data[key] == getattr(profile, key), "%r != %r (for key '%s')" % (
data[key],
getattr(profile, key),
key,
)
eq_(data["timezone"], profile.timezone.zone)
eq_(data["username"], profile.user.username)
class ViewProfileTests(TestCaseBase):
def setUp(self):
self.u = UserFactory(profile__name="", profile__website="")
self.profile = self.u.profile
def test_view_ProfileFactory(self):
r = self.client.get(reverse("users.profile", args=[self.u.username]))
eq_(200, r.status_code)
doc = pq(r.content)
eq_(0, doc("#edit-profile-link").length)
eq_(self.u.username, doc("h2.user").text())
# No name set => no optional fields.
eq_(0, doc(".contact").length)
# Check canonical url
eq_(
"%s/en-US/user/%s" % (settings.CANONICAL_URL, self.u.username),
doc('link[rel="canonical"]')[0].attrib["href"],
)
def test_view_profile_mine(self):
"""Logged in, on my profile, I see an edit link."""
self.client.login(username=self.u.username, password="testpass")
r = self.client.get(reverse("users.profile", args=[self.u.username]))
eq_(200, r.status_code)
doc = pq(r.content)
eq_(
1,
len(doc(f"#user-nav li a[href='{reverse('users.edit_my_profile', locale='en-US')}']")),
)
self.client.logout()
def test_bio_links_nofollow(self):
self.profile.bio = "http://getseo.com, [http://getseo.com]"
self.profile.save()
r = self.client.get(reverse("users.profile", args=[self.u.username]))
eq_(200, r.status_code)
doc = pq(r.content)
eq_(2, len(doc('.bio a[rel="nofollow"]')))
def test_bio_links_no_img(self):
# bug 1427813
self.profile.bio = '<p>my dude image <img src="https://www.example.com/the-dude.jpg"></p>'
self.profile.save()
r = self.client.get(reverse("users.profile", args=[self.u.username]))
eq_(200, r.status_code)
assert b"<p>my dude image </p>" in r.content
def test_num_documents(self):
"""Verify the number of documents contributed by user."""
u = UserFactory()
RevisionFactory(creator=u)
RevisionFactory(creator=u)
r = self.client.get(reverse("users.profile", args=[u.username]))
eq_(200, r.status_code)
assert b"2 documents" in r.content
def test_deactivate_button(self):
"""Check that the deactivate button is shown appropriately"""
u = UserFactory()
r = self.client.get(reverse("users.profile", args=[u.username]))
assert b"Deactivate this user" not in r.content
add_permission(self.u, Profile, "deactivate_users")
self.client.login(username=self.u.username, password="testpass")
r = self.client.get(reverse("users.profile", args=[u.username]))
assert b"Deactivate this user" in r.content
u.is_active = False
u.save()
r = self.client.get(reverse("users.profile", args=[u.username]))
assert b"This user has been deactivated." in r.content
r = self.client.get(reverse("users.profile", args=[self.u.username]))
assert b"Deactivate this user" not in r.content
def test_badges_listed(self):
"""Verify that awarded badges appear on the profile page."""
badge_title = "awesomesauce badge"
b = BadgeFactory(title=badge_title)
u = UserFactory()
AwardFactory(user=u, badge=b)
r = self.client.get(reverse("users.profile", args=[u.username]))
assert badge_title.encode() in r.content
class FlagProfileTests(TestCaseBase):
def test_flagged_and_deleted_ProfileFactory(self):
u = UserFactory()
p = u.profile
flag_user = UserFactory()
# Flag a profile and delete it
f = FlaggedObject(content_object=p, reason="spam", creator_id=flag_user.id)
f.save()
p.delete()
# Verify flagit queue
u = UserFactory()
add_permission(u, FlaggedObject, "can_moderate")
self.client.login(username=u.username, password="testpass")
response = get(self.client, "flagit.queue")
eq_(200, response.status_code)
doc = pq(response.content)
eq_(1, len(doc("#flagged-queue form.update")))
class EditWatchListTests(TestCaseBase):
"""Test manage watch list"""
def setUp(self):
self.user = UserFactory()
self.client.login(username=self.user.username, password="testpass")
self.question = QuestionFactory(creator=self.user)
QuestionReplyEvent.notify(self.user, self.question)
def test_GET(self):
r = self.client.get(reverse("users.edit_watch_list"))
eq_(200, r.status_code)
assert "question: " + self.question.title in r.content.decode("utf8")
def test_POST(self):
w = Watch.objects.get(object_id=self.question.id, user=self.user)
eq_(w.is_active, True)
self.client.post(reverse("users.edit_watch_list"))
w = Watch.objects.get(object_id=self.question.id, user=self.user)
eq_(w.is_active, False)
self.client.post(reverse("users.edit_watch_list"), {"watch_%s" % w.id: "1"})
w = Watch.objects.get(object_id=self.question.id, user=self.user)
eq_(w.is_active, True)
|
|
import argparse
import os
import shutil
import nltk
from collections import defaultdict
from tqdm import tqdm
import gc
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data.dataloader import DataLoader
from dpp_nets.utils.language import Vocabulary, BeerDataset, simple_collate, custom_collate_reinforce
from dpp_nets.layers.layers import ChunkTrainerRelReinforce
parser = argparse.ArgumentParser(description='REINFORCE Trainer')
parser.add_argument('-a', '--aspect', type=str, choices=['aspect1', 'aspect2', 'aspect3', 'all', 'short'],
help='what is the target?', required=True)
parser.add_argument('-m', '--mode', type=str, choices=['words', 'chunks', 'sents'],
help='what is the mode?', required=True)
parser.add_argument('-b', '--batch-size', default=100, type=int,
metavar='N', help='mini-batch size (default: 100)')
parser.add_argument( '--start-epoch', default=0, type=int,
metavar='N', help='start epoch')
parser.add_argument('--epochs', default=20, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--lr', '--learning_rate', default=1e-4, type=float,
metavar='', help='initial learning rate')
parser.add_argument('--reg', type=float, required=True,
metavar='reg', help='regularization constant')
parser.add_argument('--reg_mean', type=float, required=True,
metavar='reg_mean', help='regularization_mean')
parser.add_argument('-r', '--remote', type=int,
help='training locally or on cluster?', required=True)
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--resume', type=str, default=None, metavar='S',
help='specify model name')
parser.add_argument('-e', '--euler', type=str, default=None, metavar='S',
help='specify model name')
parser.add_argument('-ai', '--alpha_iter', type=int, default=2, metavar='S',
help='alpha_iter')
def main():
global vocab, args
lowest_loss = 100 # arbitrary high number as upper bound for loss
# Check for GPU
args = parser.parse_args()
args.cuda = torch.cuda.is_available()
# Set Seed
torch.manual_seed(args.seed)
if args.cuda:
torch.cuda.manual_seed(args.seed)
# Set-up data
if args.remote:
train_path = '/home/paulusm/data/beer_reviews/' + 'reviews.' + args.aspect + '.train.' + args.mode + '.txt.gz'
val_path = '/home/paulusm/data/beer_reviews/' + 'reviews.' + args.aspect + '.heldout.' + args.mode + '.txt.gz'
embd_path = '/home/paulusm/data/beer_reviews/' + 'review+wiki.filtered.200.txt.gz'
word_path = '/home/paulusm/data/beer_reviews/' + 'reviews.' + 'all' + '.train.' + 'words.txt.gz'
if args.euler:
train_path = '/cluster' + train_path
val_path = '/cluster' + val_path
embd_path = '/cluster' + embd_path
word_path = '/cluster' + word_path
else:
train_path = '/Users/Max/data/beer_reviews/' + 'reviews.' + args.aspect + '.train.' + args.mode + '.txt.gz'
val_path = '/Users/Max/data/beer_reviews/' + 'reviews.' + args.aspect + '.heldout.' + args.mode + '.txt.gz'
embd_path = '/Users/Max/data/beer_reviews/' + 'review+wiki.filtered.200.txt.gz'
word_path = '/Users/Max/data/beer_reviews/' + 'reviews.' + 'all' + '.train.' + 'words.txt.gz'
# Set-up vocabulary
vocab = Vocabulary()
vocab.loadPretrained(embd_path)
vocab.setStops()
vocab.loadCorpus(word_path)
vocab.updateEmbedding()
vocab.setCuda(args.cuda)
# Set up datasets and -loader
train_set = BeerDataset(train_path)
val_set = BeerDataset(val_path)
kwargs = {'num_workers': 0, 'pin_memory': False} if args.cuda else {}
if args.cuda and args.mode == 'chunks':
args.batch_size = 100
train_loader = torch.utils.data.DataLoader(train_set, collate_fn=simple_collate, batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(val_set, collate_fn=simple_collate, batch_size=args.batch_size, **kwargs)
# Network parameters
EMBD_DIM = 200
KERNEL_DIM = 200
HIDDEN_DIM = 500
ENC_DIM = 200
TARGET_DIM = 3 if args.aspect in set(['all', 'short']) else 1
ALPHA_ITER = args.alpha_iter
# Conventional trainer
if args.mode == 'sents':
trainer = ChunkTrainerRelReinforce(EMBD_DIM, HIDDEN_DIM, KERNEL_DIM, ENC_DIM, TARGET_DIM, ALPHA_ITER)
else:
trainer = ChunkTrainerRelReinforce(EMBD_DIM, HIDDEN_DIM, KERNEL_DIM, ENC_DIM, TARGET_DIM, ALPHA_ITER)
trainer.activation = nn.Sigmoid()
trainer.reg = args.reg
trainer.reg_mean = args.reg_mean
if args.cuda:
trainer.cuda()
print("created trainer")
# Set-up optimizer
params = [{'params': vocab.EmbeddingBag.parameters()}, {'params': trainer.parameters()}]
optimizer = torch.optim.Adam(params, lr=args.lr)
# Set-up Savers
train_loss, train_pred_loss, train_reg_loss = defaultdict(list), defaultdict(list), defaultdict(list)
val_loss, val_pred_loss, val_reg_loss = defaultdict(list), defaultdict(list), defaultdict(list)
# optionally resume from a checkpoint
if args.resume:
if args.remote:
check_path = '/home/paulusm/checkpoints/beer_reviews/' + args.resume
if args.euler:
check_path = '/cluster' + check_path
else:
check_path = '/Users/Max/checkpoints/beer_reviews/' + args.resume
if os.path.isfile(check_path):
print("=> loading checkpoint '{}'".format(check_path))
if args.cuda:
checkpoint = torch.load(check_path)
else:
checkpoint = torch.load(check_path, map_location=lambda storage, loc: storage)
args.start_epoch = len(checkpoint['train_loss'])
#lowest_loss = checkpoint['lowest_loss']
trainer.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
vocab.EmbeddingBag.load_state_dict(checkpoint['embedding'])
train_loss, train_pred_loss, train_reg_loss = checkpoint['train_loss'], checkpoint['train_pred_loss'], checkpoint['train_reg_loss'],
val_loss, val_pred_loss, val_reg_loss = checkpoint['val_loss'], checkpoint['val_pred_loss'], checkpoint['val_reg_loss'],
if not (args.reg == checkpoint['reg'] and args.reg_mean == checkpoint['reg_mean'] and args.mode == checkpoint['mode']):
print('wrong specs')
if not args.mode == checkpoint['mode']:
print('You confused mode')
assert args.mode == checkpoint['mode']
else:
print("=> no checkpoint found at '{}'".format(args.resume))
### Loop
for epoch in range(args.start_epoch, args.start_epoch + args.epochs):
# adjust_learning_rate(optimizer, epoch)
print('started training')
loss, pred_loss, reg_loss = train(train_loader, trainer, optimizer)
train_loss[epoch].append(loss)
train_pred_loss[epoch].append(pred_loss)
train_reg_loss[epoch].append(reg_loss)
print('started validation')
loss, pred_loss, reg_loss = validate(val_loader, trainer)
val_loss[epoch].append(loss)
val_pred_loss[epoch].append(pred_loss)
val_reg_loss[epoch].append(reg_loss)
is_best = pred_loss < lowest_loss
lowest_loss = min(pred_loss, lowest_loss)
checkpoint = {'train_loss': train_loss,
'train_pred_loss': train_pred_loss,
'train_reg_loss': train_reg_loss,
'val_loss': val_loss,
'val_pred_loss': val_pred_loss,
'val_reg_loss': val_reg_loss,
'embedding': vocab.EmbeddingBag.state_dict(),
'model': trainer.state_dict(),
'optimizer': optimizer.state_dict(),
'model_type': type(trainer),
'mode': args.mode,
'seed': args.seed,
'aspect': args.aspect,
'reg': args.reg,
'reg_mean': args.reg_mean,
'lowest_loss': lowest_loss}
save_checkpoint(checkpoint, is_best)
print("saved a checkpoint")
print('*'*20, 'SUCCESS','*'*20)
def memReport():
for obj in gc.get_objects():
if torch.is_tensor(obj) or (hasattr(obj, 'data') and torch.is_tensor(obj.data)):
print(type(obj), obj.size())
def cpuStats():
print(sys.version)
print(psutil.cpu_percent())
print(psutil.virtual_memory()) # physical memory usage
pid = os.getpid()
py = psutil.Process(pid)
memoryUse = py.memory_info()[0] / 2. ** 30 # memory use in GB...I think
print('memory GB:', memoryUse)
def train(loader, trainer, optimizer):
trainer.train()
total_loss = 0.0
total_pred_loss = 0.0
total_reg_loss = 0.0
exceptions = 0
for i, batch in tqdm(enumerate(loader, 1)):
reviews, target = custom_collate_reinforce(batch, vocab, args.alpha_iter, args.cuda)
loss = trainer(reviews, target)
optimizer.zero_grad()
try:
loss.backward()
optimizer.step()
except:
print('An Error occured in this batch. Batch was ignored.')
exceptions += 1
pass
loss, pred_loss, reg_loss = trainer.loss.data[0], trainer.pred_loss.data[0], trainer.reg_loss.data[0]
delta = loss - total_loss
total_loss += (delta / i)
delta = pred_loss - total_pred_loss
total_pred_loss += (delta / i)
delta = reg_loss - total_reg_loss
total_reg_loss += (delta / i)
print("trained one batch")
gc.collect()
# cpuStats()
# memReport()
print('No of exceptions in this epoch:', exceptions)
return total_loss, total_pred_loss, total_reg_loss
def validate(loader, trainer):
trainer.eval()
total_loss = 0.0
total_pred_loss = 0.0
total_reg_loss = 0.0
for i, batch in enumerate(loader, 1):
review, target = custom_collate_reinforce(batch, vocab, args.alpha_iter, args.cuda)
trainer(review, target)
loss = trainer.loss.data[0]
pred_loss = trainer.pred_loss.data[0]
reg_loss = trainer.reg_loss.data[0]
delta = loss - total_loss
total_loss += (delta / i)
delta = pred_loss - total_pred_loss
total_pred_loss += (delta / i)
delta = reg_loss - total_reg_loss
total_reg_loss += (delta / i)
# print("validated one batch")
return total_loss, total_pred_loss, total_reg_loss
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR multiplied by factor 0.1 for every 10 epochs"""
if not ((epoch + 1) % 10):
factor = 0.1
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * factor
def save_checkpoint(state, is_best, filename='reinforce_chunk_checkpoint.pth.tar'):
"""
State is a dictionary that cotains valuable information to be saved.
"""
if args.remote:
destination = '/home/paulusm/checkpoints/beer_reviews/reinforce/' + str(args.aspect) + str(args.mode) + str(args.seed) + 'reg' + str(args.reg) + 'reg_mean' + str(args.reg_mean) + 'lr' + str(args.lr) + str(args.alpha_iter) + 'reinforce_ckp.pth.tar'
if args.euler:
destination = '/cluster' + destination
else:
destination = '/Users/Max/checkpoints/beer_reviews/reinforce/' + str(args.aspect) + str(args.mode) + str(args.seed) + 'reg' + str(args.reg) + 'reg_mean' + str(args.reg_mean) + 'lr' + str(args.lr) + str(args.alpha_iter) + 'reinforce_ckp.pth.tar'
torch.save(state, destination)
if is_best:
if args.remote:
best_destination = '/home/paulusm/checkpoints/beer_reviews/reinforce/' + str(args.aspect) + str(args.mode) + str(args.seed) + 'reg' + str(args.reg) + 'reg_mean' + str(args.reg_mean) + 'lr' + str(args.lr) + str(args.alpha_iter) + 'reinforce_best_ckp.pth.tar'
if args.euler:
best_destination = '/cluster' + best_destination
else:
best_destination = '/Users/Max/checkpoints/beer_reviews/reinforce/' + str(args.aspect) + str(args.mode) + str(args.seed) + 'reg' + str(args.reg) + 'reg_mean' + str(args.reg_mean) + 'lr' + str(args.lr) + str(args.alpha_iter) + 'reinforce_best_ckp.pth.tar'
shutil.copyfile(destination, best_destination)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Qubole operator"""
import re
from typing import FrozenSet, Iterable, Optional
from airflow.contrib.hooks.qubole_hook import (
COMMAND_ARGS, HYPHEN_ARGS, POSITIONAL_ARGS, QuboleHook, flatten_list,
)
from airflow.hooks.base_hook import BaseHook
from airflow.models import BaseOperator, BaseOperatorLink
from airflow.models.taskinstance import TaskInstance
from airflow.utils.decorators import apply_defaults
class QDSLink(BaseOperatorLink):
"""Link to QDS"""
name = 'Go to QDS'
def get_link(self, operator, dttm):
"""
Get link to qubole command result page.
:param operator: operator
:param dttm: datetime
:return: url link
"""
ti = TaskInstance(task=operator, execution_date=dttm)
conn = BaseHook.get_connection(
getattr(operator, "qubole_conn_id", None) or operator.kwargs['qubole_conn_id'])
if conn and conn.host:
host = re.sub(r'api$', 'v2/analyze?command_id=', conn.host)
else:
host = 'https://api.qubole.com/v2/analyze?command_id='
qds_command_id = ti.xcom_pull(task_ids=operator.task_id, key='qbol_cmd_id')
url = host + str(qds_command_id) if qds_command_id else ''
return url
class QuboleOperator(BaseOperator):
"""
Execute tasks (commands) on QDS (https://qubole.com).
:param qubole_conn_id: Connection id which consists of qds auth_token
:type qubole_conn_id: str
kwargs:
:command_type: type of command to be executed, e.g. hivecmd, shellcmd, hadoopcmd
:tags: array of tags to be assigned with the command
:cluster_label: cluster label on which the command will be executed
:name: name to be given to command
:notify: whether to send email on command completion or not (default is False)
**Arguments specific to command types**
hivecmd:
:query: inline query statement
:script_location: s3 location containing query statement
:sample_size: size of sample in bytes on which to run query
:macros: macro values which were used in query
:sample_size: size of sample in bytes on which to run query
:hive-version: Specifies the hive version to be used. eg: 0.13,1.2,etc.
prestocmd:
:query: inline query statement
:script_location: s3 location containing query statement
:macros: macro values which were used in query
hadoopcmd:
:sub_commnad: must be one these ["jar", "s3distcp", "streaming"] followed by
1 or more args
shellcmd:
:script: inline command with args
:script_location: s3 location containing query statement
:files: list of files in s3 bucket as file1,file2 format. These files will be
copied into the working directory where the qubole command is being
executed.
:archives: list of archives in s3 bucket as archive1,archive2 format. These
will be unarchived into the working directory where the qubole command is
being executed
:parameters: any extra args which need to be passed to script (only when
script_location is supplied)
pigcmd:
:script: inline query statement (latin_statements)
:script_location: s3 location containing pig query
:parameters: any extra args which need to be passed to script (only when
script_location is supplied
sparkcmd:
:program: the complete Spark Program in Scala, R, or Python
:cmdline: spark-submit command line, all required information must be specify
in cmdline itself.
:sql: inline sql query
:script_location: s3 location containing query statement
:language: language of the program, Scala, R, or Python
:app_id: ID of an Spark job server app
:arguments: spark-submit command line arguments
:user_program_arguments: arguments that the user program takes in
:macros: macro values which were used in query
:note_id: Id of the Notebook to run
dbtapquerycmd:
:db_tap_id: data store ID of the target database, in Qubole.
:query: inline query statement
:macros: macro values which were used in query
dbexportcmd:
:mode: Can be 1 for Hive export or 2 for HDFS/S3 export
:schema: Db schema name assumed accordingly by database if not specified
:hive_table: Name of the hive table
:partition_spec: partition specification for Hive table.
:dbtap_id: data store ID of the target database, in Qubole.
:db_table: name of the db table
:db_update_mode: allowinsert or updateonly
:db_update_keys: columns used to determine the uniqueness of rows
:export_dir: HDFS/S3 location from which data will be exported.
:fields_terminated_by: hex of the char used as column separator in the dataset
:use_customer_cluster: To use cluster to run command
:customer_cluster_label: the label of the cluster to run the command on
:additional_options: Additional Sqoop options which are needed enclose options in
double or single quotes e.g. '--map-column-hive id=int,data=string'
dbimportcmd:
:mode: 1 (simple), 2 (advance)
:hive_table: Name of the hive table
:schema: Db schema name assumed accordingly by database if not specified
:hive_serde: Output format of the Hive Table
:dbtap_id: data store ID of the target database, in Qubole.
:db_table: name of the db table
:where_clause: where clause, if any
:parallelism: number of parallel db connections to use for extracting data
:extract_query: SQL query to extract data from db. $CONDITIONS must be part
of the where clause.
:boundary_query: Query to be used get range of row IDs to be extracted
:split_column: Column used as row ID to split data into ranges (mode 2)
:use_customer_cluster: To use cluster to run command
:customer_cluster_label: the label of the cluster to run the command on
:additional_options: Additional Sqoop options which are needed enclose options in
double or single quotes
.. note:
Following fields are template-supported : ``query``, ``script_location``,
``sub_command``, ``script``, ``files``, ``archives``, ``program``, ``cmdline``,
``sql``, ``where_clause``, ``extract_query``, ``boundary_query``, ``macros``,
``tags``, ``name``, ``parameters``, ``dbtap_id``, ``hive_table``, ``db_table``,
``split_column``, ``note_id``, ``db_update_keys``, ``export_dir``,
``partition_spec``, ``qubole_conn_id``, ``arguments``, ``user_program_arguments``.
You can also use ``.txt`` files for template driven use cases.
.. note:
In QuboleOperator there is a default handler for task failures and retries,
which generally kills the command running at QDS for the corresponding task
instance. You can override this behavior by providing your own failure and retry
handler in task definition.
"""
template_fields = ('query', 'script_location', 'sub_command', 'script', 'files',
'archives', 'program', 'cmdline', 'sql', 'where_clause', 'tags',
'extract_query', 'boundary_query', 'macros', 'name', 'parameters',
'dbtap_id', 'hive_table', 'db_table', 'split_column', 'note_id',
'db_update_keys', 'export_dir', 'partition_spec', 'qubole_conn_id',
'arguments', 'user_program_arguments', 'cluster_label') # type: Iterable[str]
template_ext = ('.txt',) # type: Iterable[str]
ui_color = '#3064A1'
ui_fgcolor = '#fff'
qubole_hook_allowed_args_list = ['command_type', 'qubole_conn_id', 'fetch_logs']
operator_extra_links = (
QDSLink(),
)
# The _serialized_fields are lazily loaded when get_serialized_fields() method is called
__serialized_fields: Optional[FrozenSet[str]] = None
@apply_defaults
def __init__(self, qubole_conn_id="qubole_default", *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.kwargs['qubole_conn_id'] = qubole_conn_id
self.hook = None
filtered_base_kwargs = self._get_filtered_args(kwargs)
super().__init__(*args, **filtered_base_kwargs)
if self.on_failure_callback is None:
self.on_failure_callback = QuboleHook.handle_failure_retry
if self.on_retry_callback is None:
self.on_retry_callback = QuboleHook.handle_failure_retry
def _get_filtered_args(self, all_kwargs):
qubole_args = flatten_list(COMMAND_ARGS.values()) + HYPHEN_ARGS + \
flatten_list(POSITIONAL_ARGS.values()) + self.qubole_hook_allowed_args_list
return {key: value for key, value in all_kwargs.items() if key not in qubole_args}
def execute(self, context):
return self.get_hook().execute(context)
def on_kill(self, ti=None):
if self.hook:
self.hook.kill(ti)
else:
self.get_hook().kill(ti)
def get_results(self, ti=None, fp=None, inline=True, delim=None, fetch=True):
"""get_results from Qubole"""
return self.get_hook().get_results(ti, fp, inline, delim, fetch)
def get_log(self, ti):
"""get_log from Qubole"""
return self.get_hook().get_log(ti)
def get_jobs_id(self, ti):
"""get jobs_id from Qubole"""
return self.get_hook().get_jobs_id(ti)
def get_hook(self):
"""Reinitialising the hook, as some template fields might have changed"""
return QuboleHook(*self.args, **self.kwargs)
def __getattribute__(self, name):
if name in QuboleOperator.template_fields:
if name in self.kwargs:
return self.kwargs[name]
else:
return ''
else:
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if name in QuboleOperator.template_fields:
self.kwargs[name] = value
else:
object.__setattr__(self, name, value)
@classmethod
def get_serialized_fields(cls):
"""Serialized QuboleOperator contain exactly these fields."""
if not cls.__serialized_fields:
cls.__serialized_fields = frozenset(super().get_serialized_fields() | {"qubole_conn_id"})
return cls.__serialized_fields
|
|
# -*- coding: utf8 -*-
import io
import os
import shutil
def split_url_path(path):
"""
Separates URL path to repository name and path.
# Parameters
path (str): The path from URL.
# Return
tuple (str, str): The repository name and the path to be listed.
"""
separator = '/'
parts = path.split(separator)
return separator.join(parts[0:2]), separator.join(parts[2:])
def list_folder(cwd):
"""
List folder on *cwd* path as list of *File*.
# Parameters
cwd (str): The absolute path to be listed.
# Return
list (File): The list of files and folders listed in path.
"""
data = sorted(os.listdir(cwd))
dirs = []
files = []
for filename in data:
file = File(filename, cwd)
if file.type == File.TYPE_FOLDER:
dirs.append(file)
else:
files.append(file)
result = dirs + files
return result
def create_folder(cwd, folder_name):
"""
Creates folder named *folder_name* on defined *cwd* path.
If does not exist, it creates it and return new path of folder.
If already exists, it returns empty str.
# Parameters
cwd (str): The absolute path, where folder should be created.
folder_name (str): The name of folder to be created.
# Return
str: Path of newly created folder, if it does not already exist.
"""
path = os.path.join(cwd, folder_name)
if not os.path.exists(path):
os.makedirs(path)
return path
return ''
def create_file(cwd, file_name):
"""
Creates file named *file_name* on defined *cwd* path.
If does not exist, it creates it and return new path of file.
If already exists, it returns empty str.
# Parameters
cwd (str): The absolute path, where file should be created.
file_name (str): The name of file to be created.
# Return
str: Path of newly created file, if it does not already exist.
"""
path = os.path.join(cwd, file_name)
if not os.path.exists(path):
open(path, 'w').close()
return path
return ''
def create_file_path(file_path):
"""
Creates file defined by *file_path*.
If does not exist, it creates it and return new path of file.
If already exists, it returns empty str.
# Parameters
cwd (str): The absolute path, where file should be created.
file_name (str): The name of file to be created.
# Return
str: Path of newly created file, if it does not already exist.
"""
if not os.path.exists(file_path):
open(file_path, 'w').close()
return file_path
return ''
def read_file(path):
"""
Reads file located in defined *path*.
If does not exist, it returns empty str.
# Parameters
path (str): The absolute path of file to be read.
# Return
str: Text content of file.
"""
if os.path.isfile(path):
file = open(path, mode='r')
content = file.read()
file.close()
return content
return ''
def write_file(path, data, encoding='utf8'):
"""
Writes *data* into file located in *path*, only if it already exists.
As workaround, it replaces \r symbol.
# Parameters
path (str): The absolute path of file to be written.
data (str): Text content to be written.
"""
if os.path.isfile(path):
file = io.open(path, mode='w', encoding=encoding)
file.write(data.replace('\r', ''))
file.close()
def rename(path, new_path):
"""
Performs rename operation from *path* to *new_path*. This operation
performs only in case, that there is not file/folder with same name.
# Parameters
path (str): Old path to be renamed.
new_path (str): New path to be renamed to.
"""
if (os.path.isfile(path) and not os.path.isfile(new_path)) or (os.path.isdir(path) and not os.path.isdir(new_path)):
os.rename(path, new_path)
def delete(path):
"""
Performs delete operation on file or folder stored on *path*.
If on *path* is file, it performs os.remove().
If on *path* is folder, it performs shutil.rmtree().
# Parameters
path (str): The absolute path of file or folder to be deleted.
"""
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def human_readable_size(filesize = 0):
"""
Converts number of bytes from *filesize* to human-readable format.
e.g. 2048 is converted to "2 kB".
# Parameters:
filesize (int): The size of file in bytes.
# Return:
str: Human-readable size.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P']:
if filesize < 1024:
return "{} {}B".format("{:.2f}".format(filesize).rstrip('0').rstrip('.'), unit)
filesize = filesize / 1024
return '0 B'
class File:
TYPE_FOLDER = 'folder'
TYPE_FILE = 'file'
def __init__(self, filename, path):
full_path = os.path.join(path, filename)
self.type = File.TYPE_FOLDER if os.path.isdir(full_path) else File.TYPE_FILE
self.filename = filename
self.path = path
self.filesize = os.path.getsize(full_path) if self.type == File.TYPE_FILE else 0
self.filesize_readable = human_readable_size(self.filesize)
|
|
"""Unit tests for top_k_accuracy.py
Written by Grant Van Horn.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import random
import unittest
import numpy as np
import top_k_accuracy
def make_accuracy_data(num_classes, num_test_samples, num_correct_predictions, k):
"""Convenience method to create the labels and predictions array.
Args:
num_classes (int): The number of classes in the dataset.
num_test_samples (int): The number of labels and predictions to generate.
num_correct_predictions (int): The number of predictions that are correct.
k (int): The number of class labels in a prediction.
Returns:
list : The labels.
list : The predictions.
"""
assert k <= num_classes, "k too big"
assert num_correct_predictions <= num_test_samples, ""\
"`num_correct_predictions` is larger than `num_test_samples`"
if k == num_classes:
assert num_test_samples == num_correct_predictions, ""\
"`num_correct_predictions` should be equal to `num_test_samples` "\
"when `k` equals `num_classes`"
labels = []
predictions = []
class_labels = range(num_classes)
# Determine which idexes will be correct
if num_correct_predictions > 0:
correct_prediction_idxs = set(random.sample(range(num_test_samples),
num_correct_predictions))
else:
correct_prediction_idxs = set()
# Fill in the labels and prediction lists
for i in range(num_test_samples):
gt_label = random.choice(class_labels)
labels.append(gt_label)
preds = random.sample(class_labels, k)
if i in correct_prediction_idxs:
if gt_label not in preds:
preds[0] = gt_label
else:
if gt_label in preds:
preds = [p for p in preds if p != gt_label]
preds.append(gt_label + 1 % num_classes)
predictions.append(preds)
return labels, predictions
class TestAccuracy(unittest.TestCase):
def test_top_1_perfect(self):
labels, predictions = make_accuracy_data(
num_classes=10,
num_test_samples=100,
num_correct_predictions=100,
k=1)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
# Try with strings
labels = map(str, labels)
predictions = [map(str, preds) for preds in predictions]
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
# Try with numpy
labels = np.array(labels)
predictions = np.array(predictions)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
def test_top_3_perfect(self):
labels, predictions = make_accuracy_data(
num_classes=10,
num_test_samples=100,
num_correct_predictions=100,
k=3)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
# Try with strings
labels = map(str, labels)
predictions = [map(str, preds) for preds in predictions]
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
# Try with numpy
labels = np.array(labels)
predictions = np.array(predictions)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
def test_top_5_half_right(self):
labels, predictions = make_accuracy_data(
num_classes=10,
num_test_samples=10,
num_correct_predictions=5,
k=5)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 0.5)
# Try with strings
labels = map(str, labels)
predictions = [map(str, preds) for preds in predictions]
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 0.5)
# Try with numpy
labels = np.array(labels)
predictions = np.array(predictions)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 0.5)
def test_top_5_none_correct(self):
labels, predictions = make_accuracy_data(
num_classes=10,
num_test_samples=100,
num_correct_predictions=0,
k=5)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 0.)
# Try with strings
labels = map(str, labels)
predictions = [map(str, preds) for preds in predictions]
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 0.)
# Try with numpy
labels = np.array(labels)
predictions = np.array(predictions)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 0.)
def test_top_5_with_5_classes(self):
labels, predictions = make_accuracy_data(
num_classes=5,
num_test_samples=100,
num_correct_predictions=100,
k=5)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
# Try with strings
labels = map(str, labels)
predictions = [map(str, preds) for preds in predictions]
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
# Try with numpy
labels = np.array(labels)
predictions = np.array(predictions)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 1.)
def test_empty_labels(self):
labels = []
predictions = []
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 0)
# Try with numpy
labels = np.array(labels)
predictions = np.array(predictions)
accuracy = top_k_accuracy.compute_top_k_accuracy(labels, predictions)
self.assertEqual(accuracy, 0)
@unittest.expectedFailure
def test_unmatched_lengths(self):
labels, predictions = make_accuracy_data(
num_classes=10,
num_test_samples=100,
num_correct_predictions=0,
k=3)
# add one extra prediction to the prediction matrix
predictions.append([0, 1, 2])
# Should throw an Exception
top_k_accuracy.compute_top_k_accuracy(labels, predictions)
def make_csv_file(output_path, field_names, row_data):
"""Write the data to a csv file.
Args:
output_path (str) : File path to write the csv file.
field_names [str] : The column field names.
prediction_data ([{}] : A list of dict containing the field names.
"""
with open(output_path, 'w') as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=field_names)
writer.writeheader()
for data in row_data:
writer.writerow(data)
def make_submission_file(output_path, submission_data):
"""Create a submission csv file.
Args:
output_path (str) : File path to write the csv file.
submission_data [{}] : A list of dicts containing values for the keys
"image_id" and "preditions".
"""
make_csv_file(output_path, field_names=["image_id", "predictions"],
row_data=submission_data)
def make_solution_file(output_path, solution_data):
"""Create a solution csv file.
Args:
output_path (str) : File path to write the csv file.
solution_data [{}] : A list of dicts containing values for the keys
"image_id", "truth" and "usage".
"""
make_csv_file(output_path, field_names=["image_id", "truth", "usage"],
row_data=solution_data)
def make_submission_entry(image_id, predictions):
"""Convenience function to create a prediction dictionary that can be
written to a csv file.
Args:
image_id : The image id.
predictions [] : A list of predictions.
Returns:
{} : A dict that can be used by a csv.DictWriter to write the data.
"""
predictions_str = " ".join(map(str, predictions))
return {
"image_id" : image_id,
"predictions" : predictions_str
}
def make_submission_files(num_classes, k, num_public_samples,
num_correct_public_predictions, num_private_samples,
num_correct_private_samples):
"""Convenience method to create the submission and solution csv files.
Args:
num_classes (int): The number of classes in the dataset.
k (int): The number of predictions to consider.
num_public_samples (int): The number of "Public" samples.
num_correct_public_predictions (int): The number of "Public" samples
the user gets correct.
num_private_samples (int): The number of "Private" samples.
num_correct_private_samples (int): The number of "Private" samples the
user gets correct.
Returns:
str: A file path to a submission file.
str: A file path to a solution file.
"""
public_labels, public_predictions = make_accuracy_data(
num_classes=num_classes,
num_test_samples=num_public_samples,
num_correct_predictions=num_correct_public_predictions,
k=k)
private_labels, private_predictions = make_accuracy_data(
num_classes=num_classes,
num_test_samples=num_private_samples,
num_correct_predictions=num_correct_private_samples,
k=k)
solution_data = []
for i, label in enumerate(public_labels):
solution_data.append({
"image_id" : i,
"truth" : label,
"usage" : "Public"
})
for i, label in enumerate(private_labels):
solution_data.append({
"image_id" : i + num_public_samples,
"truth" : label,
"usage" : "Private"
})
submission_data = []
for i, preds in enumerate(public_predictions + private_predictions):
image_id = i
submission_data.append(make_submission_entry(image_id, preds))
submission_fp = '/tmp/submission_file.txt'
make_submission_file(submission_fp, submission_data)
solution_fp = '/tmp/solution_file.txt'
make_solution_file(solution_fp, solution_data)
return submission_fp, solution_fp
class TestSubmissions(unittest.TestCase):
def test_top_1_perfect(self):
k = 1
submission_fp, solution_fp = make_submission_files(
num_classes=10,
k=k,
num_public_samples=100,
num_correct_public_predictions=100,
num_private_samples=100,
num_correct_private_samples=100)
public_acc, private_acc = top_k_accuracy.evaluate(submission_fp,
solution_fp, k=k)
self.assertEqual(public_acc, 1.)
self.assertEqual(private_acc, 1.)
def test_top_3_perfect(self):
k = 3
submission_fp, solution_fp = make_submission_files(
num_classes=10,
k=k,
num_public_samples=100,
num_correct_public_predictions=100,
num_private_samples=100,
num_correct_private_samples=100)
public_acc, private_acc = top_k_accuracy.evaluate(submission_fp,
solution_fp, k=k)
self.assertEqual(public_acc, 1.)
self.assertEqual(private_acc, 1.)
def test_top_5_half_right(self):
k = 5
submission_fp, solution_fp = make_submission_files(
num_classes=10,
k=k,
num_public_samples=100,
num_correct_public_predictions=50,
num_private_samples=100,
num_correct_private_samples=50)
public_acc, private_acc = top_k_accuracy.evaluate(submission_fp,
solution_fp, k=k)
self.assertEqual(public_acc, 0.5)
self.assertEqual(private_acc, 0.5)
def test_top_5_none_right(self):
k = 5
submission_fp, solution_fp = make_submission_files(
num_classes=10,
k=k,
num_public_samples=100,
num_correct_public_predictions=0,
num_private_samples=100,
num_correct_private_samples=0)
public_acc, private_acc = top_k_accuracy.evaluate(submission_fp,
solution_fp, k=k)
self.assertEqual(public_acc, 0.)
self.assertEqual(private_acc, 0.)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This file is part of pyCMBS.
(c) 2012- Alexander Loew
For COPYING and LICENSE details, please refer to the LICENSE file
"""
import matplotlib as mpl
mpl.rcParams['backend'] = 'Agg'
from pycmbs.data import Data
import matplotlib.pyplot as plt
import sys
import os
import pylab
import pickle
from pycmbs.plots import GlecklerPlot, GlobalMeanPlot
from pycmbs.benchmarking.report import Report
from pycmbs.benchmarking.utils import get_T63_landseamask
from pycmbs.benchmarking import models
from pycmbs.benchmarking import config
from pycmbs.benchmarking import analysis
from pycmbs.benchmarking.models import CMIP5Data, CMIP5RAWData, CMIP5RAW_SINGLE
from pycmbs.benchmarking.models import JSBACH_BOT # , JSBACH_RAW
from pycmbs.benchmarking.models import JSBACH_RAW2, CMIP3Data, JSBACH_SPECIAL
from pycmbs.benchmarking.models import MeanModel
from pycmbs.benchmarking.utils import get_temporary_directory
########################################################################
"""
HOWTO
Add a new variable:
1) register variable in ./configuration/model_data_routines.json
2) implement for each Data object a routine how to read the data
3) implement an analysis script that performs the actual analysis
4) register this analysis script in the file
./configuration/analysis_scripts.json
"""
########################################################################
# START
# read command line options ...
########################################################################
def create_dummy_configuration():
"""
creates a dummy configuration
1) creates dummy configuration file
2) copy template directory with INI files
"""
import shutil
d = os.environ['PYCMBSPATH'] # directory of pyCMBS installation
cwd = os.getcwd()
odir = cwd + os.sep + 'configuration'
# copy samples of INI files from repository
if os.path.exists(odir):
os.system('rm -rf ' + odir)
shutil.copytree(d + os.sep + 'benchmarking' +
os.sep + 'configuration', odir)
# create dummy configuration file
CFGW = config.CFGWriter(cwd + os.sep + 'template.cfg',
generator='pyCMBS CONFIGURATION WRITER')
CFGW.save(temp_dir='<put here the temporary data processing directory>',
vars=['albedo', 'sis'], start_date='2000-01-01',
stop_date='2007-09-30',
models=[{'id': 'MPI-ESM', 'type': 'CMIP5', 'experiment': 'AMIP',
'path': '/this/is/the/root/path/to/the/model/data/'},
{'id': 'MPI-ESMv01', 'type': 'JSBACH',
'experiment': 'HIST',
'path': '/this/is/the/root/path/to/the/model/data/'}
])
os.system('rm -rf ' + odir + os.sep + '.svn')
def main():
plt.close('all')
if len(sys.argv) > 1:
if len(sys.argv) == 2:
# a single argument was provides as option
if sys.argv[1] == 'init':
# copy INI files and a template configuration file
# to current directory
create_dummy_configuration()
sys.exit()
else:
file = sys.argv[1] # name of config file
if not os.path.exists(file):
raise ValueError('Configuration file can not be \
found: %s' % file)
else:
raise ValueError('Currently not more than one command \
line parameter supported!')
else: # default
print('*******************************************')
print('* WELCOME to pycmbs.py *')
print('* Happy benchmarking ... *')
print('*******************************************')
print ''
print 'please specify a configuration filename as argument'
sys.exit()
####################################################################
# CONFIGURATION and OPTIONS
####################################################################
# read configuration file
CF = config.ConfigFile(file)
# read plotting options
PCFG = config.PlotOptions()
PCFG.read(CF)
plot_options = PCFG
####################################################################
# REMOVE previous Data warnings
####################################################################
outdir = CF.options['outputdir']
if outdir[-1] != os.sep:
outdir += os.sep
os.environ['PYCMBS_OUTPUTDIR'] = outdir
os.environ['PYCMBS_OUTPUTFORMAT'] = CF.options['report_format']
os.environ['DATA_WARNING_FILE'] = outdir + 'data_warnings_' \
+ CF.options['report'] + '.log'
if os.path.exists(os.environ['DATA_WARNING_FILE']):
os.remove(os.environ['DATA_WARNING_FILE'])
for thevar in plot_options.options.keys():
if thevar in plot_options.options.keys():
print('Variable: %s' % thevar)
for k in plot_options.options[thevar].keys():
print(' Observation: %s' % k)
if CF.options['basemap']:
f_fast = False
else:
f_fast = True
shift_lon = use_basemap = not f_fast
########################################################################
# TIMES
########################################################################
s_start_time = CF.start_date
s_stop_time = CF.stop_date
start_time = pylab.num2date(pylab.datestr2num(s_start_time))
stop_time = pylab.num2date(pylab.datestr2num(s_stop_time))
########################################################################
# INIT METHODS
########################################################################
# names of analysis scripts for all variables ---
scripts = CF.get_analysis_scripts()
# get dictionary with methods how to read data for model variables to be
# analyzed
variables = CF.variables
varmethods = CF.get_methods4variables(CF.variables)
# READ DATA
# create a Model instance for each model specified
# in the configuration file
#
# read the data for all variables and return a list
# of Data objects for further processing
model_cnt = 1
proc_models = []
for i in range(len(CF.models)):
# assign model information from configuration
data_dir = CF.dirs[i]
model = CF.models[i]
experiment = CF.experiments[i]
# create model object and read data
# results are stored in individual variables namex modelXXXXX
if CF.dtypes[i].upper() == 'CMIP5':
themodel = CMIP5Data(data_dir, model, experiment, varmethods,
intervals=CF.intervals, lat_name='lat',
lon_name='lon', label=model,
start_time=start_time,
stop_time=stop_time,
shift_lon=shift_lon)
elif CF.dtypes[i].upper() == 'CMIP5RAW':
themodel = CMIP5RAWData(data_dir, model, experiment, varmethods,
intervals=CF.intervals, lat_name='lat',
lon_name='lon', label=model,
start_time=start_time,
stop_time=stop_time,
shift_lon=shift_lon)
elif 'CMIP5RAWSINGLE' in CF.dtypes[i].upper():
themodel = CMIP5RAW_SINGLE(data_dir, model, experiment, varmethods,
intervals=CF.intervals, lat_name='lat',
lon_name='lon', label=model,
start_time=start_time,
stop_time=stop_time,
shift_lon=shift_lon)
elif CF.dtypes[i].upper() == 'JSBACH_BOT':
themodel = JSBACH_BOT(data_dir, varmethods, experiment,
intervals=CF.intervals,
start_time=start_time,
stop_time=stop_time,
name=model, shift_lon=shift_lon)
elif CF.dtypes[i].upper() == 'JSBACH_RAW':
themodel = JSBACH_RAW(data_dir, varmethods, experiment,
intervals=CF.intervals,
name=model,
shift_lon=shift_lon,
start_time=start_time,
stop_time=stop_time
)
elif CF.dtypes[i].upper() == 'JSBACH_RAW2':
themodel = JSBACH_RAW2(data_dir, varmethods, experiment,
intervals=CF.intervals,
start_time=start_time,
stop_time=stop_time,
name=model, shift_lon=shift_lon) # ,
# model_dict=model_dict)
elif CF.dtypes[i].upper() == 'JSBACH_SPECIAL':
themodel = JSBACH_SPECIAL(data_dir, varmethods, experiment,
intervals=CF.intervals,
start_time=start_time,
stop_time=stop_time,
name=model, shift_lon=shift_lon) # ,
# model_dict=model_dict)
elif CF.dtypes[i].upper() == 'CMIP3':
themodel = CMIP3Data(data_dir, model, experiment, varmethods,
intervals=CF.intervals, lat_name='lat',
lon_name='lon', label=model,
start_time=start_time,
stop_time=stop_time,
shift_lon=shift_lon)
else:
raise ValueError('Invalid model type: %s' % CF.dtypes[i])
# read data for current model
# options that specify regrid options etc.
themodel._global_configuration = CF
themodel.plot_options = plot_options
themodel.get_data()
# copy current model to a variable named modelXXXX
cmd = 'model' + str(model_cnt).zfill(4) + ' = ' \
+ 'themodel.copy(); del themodel'
exec(cmd) # store copy of cmip5 model in separate variable
# append model to list of models ---
proc_models.append('model' + str(model_cnt).zfill(4))
model_cnt += 1
########################################################################
# MULTIMODEL MEAN
# here we have now all the model and variables read.
# The list of all models is contained in the variable proc_models.
f_mean_model = True
if f_mean_model:
# calculate climatological mean values: The models contain already
# climatological information in the variables[] list. Thus there is
# not need to take care for the different timesteps here. This
# should have been handled already in the preprocessing.
# generate instance of MeanModel to store result
MEANMODEL = MeanModel(varmethods, intervals=CF.intervals)
# sum up all models
for i in range(len(proc_models)):
exec('actmodel = ' + proc_models[i] + '.copy()')
MEANMODEL.add_member(actmodel)
del actmodel
# calculate ensemble mean
MEANMODEL.ensmean()
# save mean model to file
# include filename of configuration file
MEANMODEL.save(get_temporary_directory(),
prefix='MEANMODEL_' + file[:-4])
# add mean model to general list of models to process in analysis
proc_models.append('MEANMODEL')
########################################################################
# END MULTIMODEL MEAN
########################################################################
########################################################################
# INIT reporting and plotting and diagnostics
########################################################################
# Gleckler Plot
global_gleckler = GlecklerPlot()
# Report
rep = Report(CF.options['report'],
'pyCMBS report - ' + CF.options['report'],
CF.options['author'],
outdir=outdir,
dpi=300, format=CF.options['report_format'])
cmd = 'cp ' + os.environ['PYCMBSPATH'] + os.sep + \
'logo' + os.sep + 'Phytonlogo5.pdf ' + rep.outdir
os.system(cmd)
########################################################################
########################################################################
########################################################################
# MAIN ANALYSIS LOOP: perform analysis for each model and variable
########################################################################
########################################################################
########################################################################
skeys = scripts.keys()
for variable in variables:
# register current variable in Gleckler Plot
global_gleckler.add_variable(variable)
# call analysis scripts for each variable
for k in range(len(skeys)):
if variable == skeys[k]:
print 'Doing analysis for variable ... ', variable
print ' ... ', scripts[variable]
# model list is reformatted so it can be evaluated properly
model_list = str(proc_models).replace("'", "")
cmd = 'analysis.' + scripts[variable] + '(' + model_list \
+ ',GP=global_gleckler,shift_lon=shift_lon, \
use_basemap=use_basemap,report=rep,\
interval=CF.intervals[variable],\
plot_options=PCFG)'
eval(cmd)
########################################################################
# GLECKLER PLOT finalization ...
########################################################################
# generate Gleckler analysis plot for all variables and models analyzed ///
global_gleckler.plot(vmin=-0.1, vmax=0.1, nclasses=16,
show_value=False, ticks=[-0.1, -0.05, 0., 0.05, 0.1])
oname = outdir + 'gleckler.pkl'
if os.path.exists(oname):
os.remove(oname)
pickle.dump(global_gleckler.models,
open(outdir + 'gleckler_models.pkl', 'w'))
pickle.dump(global_gleckler.variables,
open(outdir + 'gleckler_variables.pkl', 'w'))
pickle.dump(global_gleckler.data,
open(outdir + 'gleckler_data.pkl', 'w'))
pickle.dump(global_gleckler._raw_data,
open(outdir + 'gleckler_rawdata.pkl', 'w'))
rep.section('Summary error statistics')
rep.subsection('Gleckler metric')
rep.figure(global_gleckler.fig,
caption='Gleckler et al. (2008) model performance index',
width='10cm')
global_gleckler.fig.savefig(
outdir + 'portraet_diagram.png', dpi=200, bbox_inches='tight')
global_gleckler.fig.savefig(
outdir + 'portraet_diagram.pdf', dpi=200, bbox_inches='tight')
plt.close(global_gleckler.fig.number)
# generate dictionary with observation labels for each variable
labels_dict = {}
for variable in variables:
if variable not in PCFG.options.keys():
continue
varoptions = PCFG.options[variable]
thelabels = {}
for k in varoptions.keys(): # keys of observational datasets
if k == 'OPTIONS':
continue
else:
# only add observation to legend,
# if option in INI file is set
if varoptions[k]['add_to_report']:
# generate dictionary for GlecklerPLot legend
thelabels.update(
{int(varoptions[k]['gleckler_position']): k})
labels_dict.update({variable: thelabels})
del thelabels
# legend for gleckler plot ///
lcnt = 1
for variable in variables:
if variable not in PCFG.options.keys():
continue
varoptions = PCFG.options[variable]
thelabels = labels_dict[variable]
fl = global_gleckler._draw_legend(thelabels, title=variable.upper())
if fl is not None:
rep.figure(fl, width='8cm', bbox_inches=None)
fl.savefig(outdir + 'legend_portraet_' + str(lcnt)
.zfill(5) + '.png', bbox_inches='tight', dpi=200)
plt.close(fl.number)
del fl
lcnt += 1
# plot model ranking between different observational datasets ///
rep.subsection('Model ranking consistency')
for v in global_gleckler.variables:
rep.subsubsection(v.upper())
tmpfig = global_gleckler.plot_model_ranking(
v, show_text=True, obslabels=labels_dict[v])
if tmpfig is not None:
rep.figure(tmpfig, width='8cm', bbox_inches=None,
caption='Model RANKING for different observational \
datasets: ' + v.upper())
plt.close(tmpfig.number)
del tmpfig
# write a table with model ranking
tmp_filename = outdir + 'ranking_table_' + v + '.tex'
rep.open_table()
global_gleckler.write_ranking_table(
v, tmp_filename, fmt='latex', obslabels=labels_dict[v])
rep.input(tmp_filename)
rep.close_table(caption='Model rankings for variable ' + v.upper())
# plot absolute model error
tmpfig = global_gleckler.plot_model_error(v, obslabels=labels_dict[v])
if tmpfig is not None:
rep.figure(tmpfig, width='8cm', bbox_inches=None,
caption='Model ERROR for different observational \
datasets: ' + v.upper())
plt.close(tmpfig.number)
del tmpfig
########################################################################
# CLEAN up and finish
########################################################################
plt.close('all')
rep.close()
print('##########################################')
print('# BENCHMARKING FINIHSED! #')
print('##########################################')
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import json
import pytest
import requests
from functools import partial
from tableschema import Field, exceptions
# Constants
DESCRIPTOR_MIN = {'name': 'id'}
DESCRIPTOR_MAX = {
'name': 'id',
'type': 'integer',
'format': 'default',
'constraints': {'required': True},
}
# Tests [general]
def test_descriptor(apply_defaults):
assert Field(DESCRIPTOR_MIN).descriptor == apply_defaults(DESCRIPTOR_MIN)
def test_name():
assert Field(DESCRIPTOR_MIN).name == 'id'
def test_type():
assert Field(DESCRIPTOR_MIN).type == 'string'
assert Field(DESCRIPTOR_MAX).type == 'integer'
def test_format():
assert Field(DESCRIPTOR_MIN).format == 'default'
assert Field(DESCRIPTOR_MAX).format == 'default'
def test_constraints():
assert Field(DESCRIPTOR_MIN).constraints == {}
assert Field(DESCRIPTOR_MAX).constraints == {'required': True}
def test_required():
assert Field(DESCRIPTOR_MIN).required == False
assert Field(DESCRIPTOR_MAX).required == True
def test_cast_value():
assert Field(DESCRIPTOR_MAX).cast_value('1') == 1
def test_cast_value_constraint_error():
with pytest.raises(exceptions.CastError):
Field(DESCRIPTOR_MAX).cast_value('')
def test_cast_value_constraints_false():
assert Field(DESCRIPTOR_MIN).cast_value('', constraints=False) == None
def test_cast_value_null_with_missing_values():
field = Field({'name': 'name', 'type': 'number'}, missing_values=['null'])
assert field.cast_value('null') == None
def test_test_value():
assert Field(DESCRIPTOR_MAX).test_value('1') == True
assert Field(DESCRIPTOR_MAX).test_value('string') == False
assert Field(DESCRIPTOR_MAX).test_value('') == False
def test_test_value_constraints_false():
assert Field(DESCRIPTOR_MIN).test_value('', constraints=False) == True
# Tests [missingValues]
def test_string_missingValues():
field = Field({
'name': 'name',
'type': 'string',
}, missing_values=['', 'NA', 'N/A'])
cast = field.cast_value
assert cast('') == None
assert cast('NA') == None
assert cast('N/A') == None
def test_number_missingValues():
field = Field({
'name': 'name',
'type': 'number',
}, missing_values=['', 'NA', 'N/A'])
cast = field.cast_value
assert cast('') == None
assert cast('NA') == None
assert cast('N/A') == None
# Tests [constraints]
def test_test_value_required():
field = Field({
'name': 'name',
'type': 'string',
'constraints': {'required': True}
}, missing_values=['', 'NA', 'N/A'])
test = partial(field.test_value, constraints=['required'])
assert test('test') == True
assert test('null') == True
assert test('none') == True
assert test('nil') == True
assert test('nan') == True
assert test('NA') == False
assert test('N/A') == False
assert test('-') == True
assert test('') == False
assert test(None) == False
def test_test_value_pattern():
field = Field({
'name': 'name',
'type': 'string',
'constraints': {'pattern': '3.*'}
})
test = partial(field.test_value, constraints=['pattern'])
assert test('3') == True
assert test('321') == True
assert test('123') == False
def test_test_value_unique():
field = Field({
'name': 'name',
'type': 'integer',
'constraints': {'unique': True}
})
test = partial(field.test_value, constraints=['unique'])
assert test(30000) == True
assert test('bad') == False
def test_test_value_enum():
field = Field({
'name': 'name',
'type': 'integer',
'constraints': {'enum': ['1', '2', '3']}
})
test = partial(field.test_value, constraints=['enum'])
assert test('1') == True
assert test(1) == True
assert test('4') == False
assert test(4) == False
def test_test_value_minimum():
field = Field({
'name': 'name',
'type': 'integer',
'constraints': {'minimum': 1}
})
test = partial(field.test_value, constraints=['minimum'])
assert test('2') == True
assert test(2) == True
assert test('1') == True
assert test(1) == True
assert test('0') == False
assert test(0) == False
def test_test_value_maximum():
field = Field({
'name': 'name',
'type': 'integer',
'constraints': {'maximum': 1}
})
test = partial(field.test_value, constraints=['maximum'])
assert test('0') == True
assert test(0) == True
assert test('1') == True
assert test(1) == True
assert test('2') == False
assert test(2) == False
def test_test_value_minLength():
field = Field({
'name': 'name',
'type': 'string',
'constraints': {'minLength': 1}
})
test = partial(field.test_value, constraints=['minLength'])
assert test('ab') == True
assert test('a') == True
# Null value passes
assert test('') == True
def test_test_value_maxLength():
field = Field({
'name': 'name',
'type': 'string',
'constraints': {'maxLength': 1}
})
test = partial(field.test_value, constraints=['maxLength'])
assert test('') == True
assert test('a') == True
assert test('ab') == False
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Volume drivers for libvirt."""
import hashlib
import os
import time
import urllib2
import urlparse
from oslo.config import cfg
from nova import exception
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
from nova import paths
from nova.storage import linuxscsi
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as virtutils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.IntOpt('num_iscsi_scan_tries',
default=3,
help='number of times to rescan iSCSI target to find volume'),
cfg.StrOpt('rbd_user',
default=None,
help='the RADOS client name for accessing rbd volumes'),
cfg.StrOpt('rbd_secret_uuid',
default=None,
help='the libvirt uuid of the secret for the rbd_user'
'volumes'),
cfg.StrOpt('nfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Dir where the nfs volume is mounted on the compute node'),
cfg.StrOpt('nfs_mount_options',
default=None,
help='Mount options passed to the nfs client. See section '
'of the nfs man page for details'),
cfg.IntOpt('num_aoe_discover_tries',
default=3,
help='number of times to rediscover AoE target to find volume'),
cfg.StrOpt('glusterfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Dir where the glusterfs volume is mounted on the '
'compute node'),
cfg.BoolOpt('libvirt_iscsi_use_multipath',
default=False,
help='use multipath connection of the iSCSI volume'),
cfg.StrOpt('scality_sofs_config',
default=None,
help='Path or URL to Scality SOFS configuration file'),
cfg.StrOpt('scality_sofs_mount_point',
default='$state_path/scality',
help='Base dir where Scality SOFS shall be mounted'),
]
CONF = cfg.CONF
CONF.register_opts(volume_opts)
class LibvirtBaseVolumeDriver(object):
"""Base class for volume drivers."""
def __init__(self, connection, is_block_dev):
self.connection = connection
self.is_block_dev = is_block_dev
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.driver_name = virtutils.pick_disk_driver_name(self.is_block_dev)
conf.device_type = disk_info['type']
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
return conf
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
pass
class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):
"""Class for volumes backed by local file."""
def __init__(self, connection):
super(LibvirtVolumeDriver,
self).__init__(connection, is_block_dev=True)
def connect_volume(self, connection_info, disk_info):
"""Connect the volume to a local device."""
conf = super(LibvirtVolumeDriver,
self).connect_volume(connection_info,
disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach fake volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFakeVolumeDriver,
self).__init__(connection, is_block_dev=True)
def connect_volume(self, connection_info, disk_info):
"""Connect the volume to a fake device."""
conf = super(LibvirtFakeVolumeDriver,
self).connect_volume(connection_info,
disk_info)
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_host = "fake"
return conf
class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtNetVolumeDriver,
self).__init__(connection, is_block_dev=False)
def connect_volume(self, connection_info, disk_info):
conf = super(LibvirtNetVolumeDriver,
self).connect_volume(connection_info,
disk_info)
conf.source_type = "network"
conf.source_protocol = connection_info['driver_volume_type']
conf.source_host = connection_info['data']['name']
netdisk_properties = connection_info['data']
auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and
CONF.rbd_secret_uuid):
conf.auth_secret_uuid = CONF.rbd_secret_uuid
auth_enabled = True # Force authentication locally
if CONF.rbd_user:
conf.auth_username = CONF.rbd_user
if auth_enabled:
conf.auth_username = (conf.auth_username or
netdisk_properties['auth_username'])
conf.auth_secret_type = netdisk_properties['secret_type']
conf.auth_secret_uuid = (conf.auth_secret_uuid or
netdisk_properties['secret_uuid'])
return conf
class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISCSIVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('iscsiadm', '-m', 'node', '-T',
iscsi_properties['target_iqn'],
'-p', iscsi_properties['target_portal'],
*iscsi_command, run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
return (out, err)
def _iscsiadm_update(self, iscsi_properties, property_key, property_value,
**kwargs):
iscsi_command = ('--op', 'update', '-n', property_key,
'-v', property_value)
return self._run_iscsiadm(iscsi_properties, iscsi_command, **kwargs)
def _get_target_portals_from_iscsiadm_output(self, output):
return [line.split()[0] for line in output.splitlines()]
@lockutils.synchronized('connect_volume', 'nova-')
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
conf = super(LibvirtISCSIVolumeDriver,
self).connect_volume(connection_info,
disk_info)
iscsi_properties = connection_info['data']
libvirt_iscsi_use_multipath = CONF.libvirt_iscsi_use_multipath
if libvirt_iscsi_use_multipath:
#multipath installed, discovering other targets if available
#multipath should be configured on the nova-compute node,
#in order to fit storage vendor
out = self._run_iscsiadm_bare(['-m',
'discovery',
'-t',
'sendtargets',
'-p',
iscsi_properties['target_portal']],
check_exit_code=[0, 255])[0] \
or ""
for ip in self._get_target_portals_from_iscsiadm_output(out):
props = iscsi_properties.copy()
props['target_portal'] = ip
self._connect_to_iscsi_portal(props)
self._rescan_iscsi()
else:
self._connect_to_iscsi_portal(iscsi_properties)
host_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-%s" %
(iscsi_properties['target_portal'],
iscsi_properties['target_iqn'],
iscsi_properties.get('target_lun', 0)))
# The /dev/disk/by-path/... node is not always present immediately
# TODO(justinsb): This retry-with-delay is a pattern, move to utils?
tries = 0
disk_dev = disk_info['dev']
while not os.path.exists(host_device):
if tries >= CONF.num_iscsi_scan_tries:
raise exception.NovaException(_("iSCSI device not found at %s")
% (host_device))
LOG.warn(_("ISCSI volume not yet found at: %(disk_dev)s. "
"Will rescan & retry. Try number: %(tries)s") %
locals())
# The rescan isn't documented as being necessary(?), but it helps
self._run_iscsiadm(iscsi_properties, ("--rescan",))
tries = tries + 1
if not os.path.exists(host_device):
time.sleep(tries ** 2)
if tries != 0:
LOG.debug(_("Found iSCSI node %(disk_dev)s "
"(after %(tries)s rescans)") %
locals())
if libvirt_iscsi_use_multipath:
#we use the multipath device instead of the single path device
self._rescan_multipath()
multipath_device = self._get_multipath_device_name(host_device)
if multipath_device is not None:
host_device = multipath_device
conf.source_type = "block"
conf.source_path = host_device
return conf
@lockutils.synchronized('connect_volume', 'nova-')
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
iscsi_properties = connection_info['data']
multipath_device = None
if CONF.libvirt_iscsi_use_multipath:
host_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-%s" %
(iscsi_properties['target_portal'],
iscsi_properties['target_iqn'],
iscsi_properties.get('target_lun', 0)))
multipath_device = self._get_multipath_device_name(host_device)
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
if CONF.libvirt_iscsi_use_multipath and multipath_device:
return self._disconnect_volume_multipath_iscsi(iscsi_properties)
# NOTE(vish): Only disconnect from the target if no luns from the
# target are in use.
device_prefix = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-" %
(iscsi_properties['target_portal'],
iscsi_properties['target_iqn']))
devices = self.connection.get_all_block_devices()
devices = [dev for dev in devices if dev.startswith(device_prefix)]
if not devices:
self._disconnect_from_iscsi_portal(iscsi_properties)
def _disconnect_volume_multipath_iscsi(self, iscsi_properties):
self._rescan_iscsi()
self._rescan_multipath()
block_devices = self.connection.get_all_block_devices()
devices = []
for dev in block_devices:
if "/mapper/" in dev:
devices.append(dev)
else:
mpdev = self._get_multipath_device_name(dev)
if mpdev:
devices.append(mpdev)
if not devices:
# disconnect if no other multipath devices
self._disconnect_mpath(iscsi_properties)
return
other_iqns = [self._get_multipath_iqn(device)
for device in devices]
if iscsi_properties['target_iqn'] not in other_iqns:
# disconnect if no other multipath devices with same iqn
self._disconnect_mpath(iscsi_properties)
return
# else do not disconnect iscsi portals,
# as they are used for other luns
return
def _connect_to_iscsi_portal(self, iscsi_properties):
# NOTE(vish): If we are on the same host as nova volume, the
# discovery makes the target so we don't need to
# run --op new. Therefore, we check to see if the
# target exists, and if we get 255 (Not Found), then
# we run --op new. This will also happen if another
# volume is using the same target.
try:
self._run_iscsiadm(iscsi_properties, ())
except exception.ProcessExecutionError as exc:
# iscsiadm returns 21 for "No records found" after version 2.0-871
if exc.exit_code in [21, 255]:
self._run_iscsiadm(iscsi_properties, ('--op', 'new'))
else:
raise
if iscsi_properties.get('auth_method'):
self._iscsiadm_update(iscsi_properties,
"node.session.auth.authmethod",
iscsi_properties['auth_method'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.username",
iscsi_properties['auth_username'])
self._iscsiadm_update(iscsi_properties,
"node.session.auth.password",
iscsi_properties['auth_password'])
#duplicate logins crash iscsiadm after load,
#so we scan active sessions to see if the node is logged in.
out = self._run_iscsiadm_bare(["-m", "session"],
run_as_root=True,
check_exit_code=[0, 1, 21])[0] or ""
portals = [{'portal': p.split(" ")[2], 'iqn': p.split(" ")[3]}
for p in out.splitlines() if p.startswith("tcp:")]
stripped_portal = iscsi_properties['target_portal'].split(",")[0]
if len(portals) == 0 or len([s for s in portals
if stripped_portal ==
s['portal'].split(",")[0]
and
s['iqn'] ==
iscsi_properties['target_iqn']]
) == 0:
try:
self._run_iscsiadm(iscsi_properties,
("--login",),
check_exit_code=[0, 255])
except exception.ProcessExecutionError as err:
#as this might be one of many paths,
#only set successfull logins to startup automatically
if err.exit_code in [15]:
self._iscsiadm_update(iscsi_properties,
"node.startup",
"automatic")
return
self._iscsiadm_update(iscsi_properties,
"node.startup",
"automatic")
def _disconnect_from_iscsi_portal(self, iscsi_properties):
self._iscsiadm_update(iscsi_properties, "node.startup", "manual",
check_exit_code=[0, 21, 255])
self._run_iscsiadm(iscsi_properties, ("--logout",),
check_exit_code=[0, 21, 255])
self._run_iscsiadm(iscsi_properties, ('--op', 'delete'),
check_exit_code=[0, 21, 255])
def _get_multipath_device_name(self, single_path_device):
device = os.path.realpath(single_path_device)
out = self._run_multipath(['-ll',
device],
check_exit_code=[0, 1])[0]
mpath_line = [line for line in out.splitlines()
if "scsi_id" not in line] # ignore udev errors
if len(mpath_line) > 0 and len(mpath_line[0]) > 0:
return "/dev/mapper/%s" % mpath_line[0].split(" ")[0]
return None
def _get_iscsi_devices(self):
try:
devices = list(os.walk('/dev/disk/by-path'))[0][-1]
except IndexError:
return []
return [entry for entry in devices if entry.startswith("ip-")]
def _disconnect_mpath(self, iscsi_properties):
entries = self._get_iscsi_devices()
ips = [ip.split("-")[1] for ip in entries
if iscsi_properties['target_iqn'] in ip]
for ip in ips:
props = iscsi_properties.copy()
props['target_portal'] = ip
self._disconnect_from_iscsi_portal(props)
self._rescan_multipath()
def _get_multipath_iqn(self, multipath_device):
entries = self._get_iscsi_devices()
for entry in entries:
entry_real_path = os.path.realpath("/dev/disk/by-path/%s" % entry)
entry_multipath = self._get_multipath_device_name(entry_real_path)
if entry_multipath == multipath_device:
return entry.split("iscsi-")[1].split("-lun")[0]
return None
def _run_iscsiadm_bare(self, iscsi_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('iscsiadm',
*iscsi_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
(iscsi_command, out, err))
return (out, err)
def _run_multipath(self, multipath_command, **kwargs):
check_exit_code = kwargs.pop('check_exit_code', 0)
(out, err) = utils.execute('multipath',
*multipath_command,
run_as_root=True,
check_exit_code=check_exit_code)
LOG.debug("multipath %s: stdout=%s stderr=%s" %
(multipath_command, out, err))
return (out, err)
def _rescan_iscsi(self):
self._run_iscsiadm_bare(('-m', 'node', '--rescan'),
check_exit_code=[0, 1, 21, 255])
self._run_iscsiadm_bare(('-m', 'session', '--rescan'),
check_exit_code=[0, 1, 21, 255])
def _rescan_multipath(self):
self._run_multipath('-r', check_exit_code=[0, 1, 21])
class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for NFS."""
def __init__(self, connection):
"""Create back-end to nfs."""
super(LibvirtNFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
conf = super(LibvirtNFSVolumeDriver,
self).connect_volume(connection_info,
disk_info)
path = self._ensure_mounted(connection_info['data']['export'])
path = os.path.join(path, connection_info['data']['name'])
conf.source_type = 'file'
conf.source_path = path
return conf
def _ensure_mounted(self, nfs_export):
"""
@type nfs_export: string
"""
mount_path = os.path.join(CONF.nfs_mount_point_base,
self.get_hash_str(nfs_export))
self._mount_nfs(mount_path, nfs_export, ensure=True)
return mount_path
def _mount_nfs(self, mount_path, nfs_share, ensure=False):
"""Mount nfs export to mount path."""
if not self._path_exists(mount_path):
utils.execute('mkdir', '-p', mount_path)
# Construct the NFS mount command.
nfs_cmd = ['mount', '-t', 'nfs']
if CONF.nfs_mount_options is not None:
nfs_cmd.extend(['-o', CONF.nfs_mount_options])
nfs_cmd.extend([nfs_share, mount_path])
try:
utils.execute(*nfs_cmd, run_as_root=True)
except exception.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_("%s is already mounted"), nfs_share)
else:
raise
@staticmethod
def get_hash_str(base_str):
"""returns string that represents hash of base_str (in hex format)."""
return hashlib.md5(base_str).hexdigest()
@staticmethod
def _path_exists(path):
"""Check path."""
try:
return utils.execute('stat', path, run_as_root=True)
except exception.ProcessExecutionError:
return False
class LibvirtAOEVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach AoE volumes to libvirt."""
def __init__(self, connection):
super(LibvirtAOEVolumeDriver,
self).__init__(connection, is_block_dev=True)
def _aoe_discover(self):
"""Call aoe-discover (aoe-tools) AoE Discover."""
(out, err) = utils.execute('aoe-discover',
run_as_root=True, check_exit_code=0)
return (out, err)
def _aoe_revalidate(self, aoedev):
"""Revalidate the LUN Geometry (When an AoE ID is reused)."""
(out, err) = utils.execute('aoe-revalidate', aoedev,
run_as_root=True, check_exit_code=0)
return (out, err)
def connect_volume(self, connection_info, mount_device):
shelf = connection_info['data']['target_shelf']
lun = connection_info['data']['target_lun']
aoedev = 'e%s.%s' % (shelf, lun)
aoedevpath = '/dev/etherd/%s' % (aoedev)
if os.path.exists(aoedevpath):
# NOTE(jbr_): If aoedevpath already exists, revalidate the LUN.
self._aoe_revalidate(aoedev)
else:
# NOTE(jbr_): If aoedevpath does not exist, do a discover.
self._aoe_discover()
#NOTE(jbr_): Device path is not always present immediately
def _wait_for_device_discovery(aoedevpath, mount_device):
tries = self.tries
if os.path.exists(aoedevpath):
raise utils.LoopingCallDone()
if self.tries >= CONF.num_aoe_discover_tries:
raise exception.NovaException(_("AoE device not found at %s") %
(aoedevpath))
LOG.warn(_("AoE volume not yet found at: %(aoedevpath)s. "
"Try number: %(tries)s") %
locals())
self._aoe_discover()
self.tries = self.tries + 1
self.tries = 0
timer = utils.FixedIntervalLoopingCall(_wait_for_device_discovery,
aoedevpath, mount_device)
timer.start(interval=2).wait()
tries = self.tries
if tries != 0:
LOG.debug(_("Found AoE device %(aoedevpath)s "
"(after %(tries)s rediscover)") %
locals())
conf = super(LibvirtAOEVolumeDriver,
self).connect_volume(connection_info, mount_device)
conf.source_type = "block"
conf.source_path = aoedevpath
return conf
class LibvirtGlusterfsVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for GlusterFS."""
def __init__(self, connection):
"""Create back-end to glusterfs."""
super(LibvirtGlusterfsVolumeDriver,
self).__init__(connection, is_block_dev=False)
def connect_volume(self, connection_info, mount_device):
"""Connect the volume. Returns xml for libvirt."""
conf = super(LibvirtGlusterfsVolumeDriver,
self).connect_volume(connection_info, mount_device)
path = self._ensure_mounted(connection_info['data']['export'])
path = os.path.join(path, connection_info['data']['name'])
conf.source_type = 'file'
conf.source_path = path
return conf
def _ensure_mounted(self, glusterfs_export):
"""
@type glusterfs_export: string
"""
mount_path = os.path.join(CONF.glusterfs_mount_point_base,
self.get_hash_str(glusterfs_export))
self._mount_glusterfs(mount_path, glusterfs_export, ensure=True)
return mount_path
def _mount_glusterfs(self, mount_path, glusterfs_share, ensure=False):
"""Mount glusterfs export to mount path."""
if not self._path_exists(mount_path):
utils.execute('mkdir', '-p', mount_path)
try:
utils.execute('mount', '-t', 'glusterfs', glusterfs_share,
mount_path,
run_as_root=True)
except exception.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_("%s is already mounted"), glusterfs_share)
else:
raise
@staticmethod
def get_hash_str(base_str):
"""returns string that represents hash of base_str (in hex format)."""
return hashlib.md5(base_str).hexdigest()
@staticmethod
def _path_exists(path):
"""Check path."""
try:
return utils.execute('stat', path, run_as_root=True)
except exception.ProcessExecutionError:
return False
class LibvirtFibreChannelVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Fibre Channel Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFibreChannelVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_pci_num(self, hba):
# NOTE(walter-boring)
# device path is in format of
# /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.3/host2/fc_host/host2
# sometimes an extra entry exists before the host2 value
# we always want the value prior to the host2 value
pci_num = None
if hba is not None:
if "device_path" in hba:
index = 0
device_path = hba['device_path'].split('/')
for value in device_path:
if value.startswith('host'):
break
index = index + 1
if index > 0:
pci_num = device_path[index - 1]
return pci_num
@lockutils.synchronized('connect_volume', 'nova-')
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
fc_properties = connection_info['data']
mount_device = disk_info["dev"]
ports = fc_properties['target_wwn']
wwns = []
# we support a list of wwns or a single wwn
if isinstance(ports, list):
for wwn in ports:
wwns.append(wwn)
elif isinstance(ports, str):
wwns.append(ports)
# We need to look for wwns on every hba
# because we don't know ahead of time
# where they will show up.
hbas = virtutils.get_fc_hbas_info()
host_devices = []
for hba in hbas:
pci_num = self._get_pci_num(hba)
if pci_num is not None:
for wwn in wwns:
target_wwn = "0x%s" % wwn.lower()
host_device = ("/dev/disk/by-path/pci-%s-fc-%s-lun-%s" %
(pci_num,
target_wwn,
fc_properties.get('target_lun', 0)))
host_devices.append(host_device)
if len(host_devices) == 0:
# this is empty because we don't have any FC HBAs
msg = _("We are unable to locate any Fibre Channel devices")
raise exception.NovaException(msg)
# The /dev/disk/by-path/... node is not always present immediately
# We only need to find the first device. Once we see the first device
# multipath will have any others.
def _wait_for_device_discovery(host_devices, mount_device):
tries = self.tries
for device in host_devices:
LOG.debug(_("Looking for Fibre Channel dev %(device)s")
% locals())
if os.path.exists(device):
self.host_device = device
# get the /dev/sdX device. This is used
# to find the multipath device.
self.device_name = os.path.realpath(device)
raise utils.LoopingCallDone()
if self.tries >= CONF.num_iscsi_scan_tries:
msg = _("Fibre Channel device not found.")
raise exception.NovaException(msg)
LOG.warn(_("Fibre volume not yet found at: %(mount_device)s. "
"Will rescan & retry. Try number: %(tries)s") %
locals())
linuxscsi.rescan_hosts(hbas)
self.tries = self.tries + 1
self.host_device = None
self.device_name = None
self.tries = 0
timer = utils.FixedIntervalLoopingCall(_wait_for_device_discovery,
host_devices, mount_device)
timer.start(interval=2).wait()
tries = self.tries
if self.host_device is not None and self.device_name is not None:
LOG.debug(_("Found Fibre Channel volume %(mount_device)s "
"(after %(tries)s rescans)") % locals())
# see if the new drive is part of a multipath
# device. If so, we'll use the multipath device.
mdev_info = linuxscsi.find_multipath_device(self.device_name)
if mdev_info is not None:
LOG.debug(_("Multipath device discovered %(device)s")
% {'device': mdev_info['device']})
device_path = mdev_info['device']
connection_info['data']['devices'] = mdev_info['devices']
else:
# we didn't find a multipath device.
# so we assume the kernel only sees 1 device
device_path = self.host_device
device_info = linuxscsi.get_device_info(self.device_name)
connection_info['data']['devices'] = [device_info]
conf = super(LibvirtFibreChannelVolumeDriver,
self).connect_volume(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = device_path
return conf
@lockutils.synchronized('connect_volume', 'nova-')
def disconnect_volume(self, connection_info, mount_device):
"""Detach the volume from instance_name."""
super(LibvirtFibreChannelVolumeDriver,
self).disconnect_volume(connection_info, mount_device)
devices = connection_info['data']['devices']
# There may have been more than 1 device mounted
# by the kernel for this volume. We have to remove
# all of them
for device in devices:
linuxscsi.remove_device(device)
class LibvirtScalityVolumeDriver(LibvirtBaseVolumeDriver):
"""Scality SOFS Nova driver. Provide hypervisors with access
to sparse files on SOFS. """
def __init__(self, connection):
"""Create back-end to SOFS and check connection."""
super(LibvirtScalityVolumeDriver,
self).__init__(connection, is_block_dev=False)
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
self._check_prerequisites()
self._mount_sofs()
conf = super(LibvirtScalityVolumeDriver,
self).connect_volume(connection_info, disk_info)
path = os.path.join(CONF.scality_sofs_mount_point,
connection_info['data']['sofs_path'])
conf.source_type = 'file'
conf.source_path = path
# The default driver cache policy is 'none', and this causes
# qemu/kvm to open the volume file with O_DIRECT, which is
# rejected by FUSE (on kernels older than 3.3). Scality SOFS
# is FUSE based, so we must provide a more sensible default.
conf.driver_cache = 'writethrough'
return conf
def _check_prerequisites(self):
"""Sanity checks before attempting to mount SOFS."""
# config is mandatory
config = CONF.scality_sofs_config
if not config:
msg = _("Value required for 'scality_sofs_config'")
LOG.warn(msg)
raise exception.NovaException(msg)
# config can be a file path or a URL, check it
if urlparse.urlparse(config).scheme == '':
# turn local path into URL
config = 'file://%s' % config
try:
urllib2.urlopen(config, timeout=5).close()
except urllib2.URLError as e:
msg = _("Cannot access 'scality_sofs_config': %s") % e
LOG.warn(msg)
raise exception.NovaException(msg)
# mount.sofs must be installed
if not os.access('/sbin/mount.sofs', os.X_OK):
msg = _("Cannot execute /sbin/mount.sofs")
LOG.warn(msg)
raise exception.NovaException(msg)
def _mount_sofs(self):
config = CONF.scality_sofs_config
mount_path = CONF.scality_sofs_mount_point
sysdir = os.path.join(mount_path, 'sys')
if not os.path.isdir(mount_path):
utils.execute('mkdir', '-p', mount_path)
if not os.path.isdir(sysdir):
utils.execute('mount', '-t', 'sofs', config, mount_path,
run_as_root=True)
if not os.path.isdir(sysdir):
msg = _("Cannot mount Scality SOFS, check syslog for errors")
LOG.warn(msg)
raise exception.NovaException(msg)
|
|
#
# The Python Imaging Library.
# $Id$
#
# JPEG (JFIF) file handling
#
# See "Digital Compression and Coding of Continous-Tone Still Images,
# Part 1, Requirements and Guidelines" (CCITT T.81 / ISO 10918-1)
#
# History:
# 1995-09-09 fl Created
# 1995-09-13 fl Added full parser
# 1996-03-25 fl Added hack to use the IJG command line utilities
# 1996-05-05 fl Workaround Photoshop 2.5 CMYK polarity bug
# 1996-05-28 fl Added draft support, JFIF version (0.1)
# 1996-12-30 fl Added encoder options, added progression property (0.2)
# 1997-08-27 fl Save mode 1 images as BW (0.3)
# 1998-07-12 fl Added YCbCr to draft and save methods (0.4)
# 1998-10-19 fl Don't hang on files using 16-bit DQT's (0.4.1)
# 2001-04-16 fl Extract DPI settings from JFIF files (0.4.2)
# 2002-07-01 fl Skip pad bytes before markers; identify Exif files (0.4.3)
# 2003-04-25 fl Added experimental EXIF decoder (0.5)
# 2003-06-06 fl Added experimental EXIF GPSinfo decoder
# 2003-09-13 fl Extract COM markers
# 2009-09-06 fl Added icc_profile support (from Florian Hoech)
# 2009-03-06 fl Changed CMYK handling; always use Adobe polarity (0.6)
# 2009-03-08 fl Added subsampling support (from Justin Huff).
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1995-1996 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.6"
import array, struct
from PIL import Image, ImageFile, _binary
from PIL.JpegPresets import presets
from PIL._util import isStringType
i8 = _binary.i8
o8 = _binary.o8
i16 = _binary.i16be
i32 = _binary.i32be
#
# Parser
def Skip(self, marker):
n = i16(self.fp.read(2))-2
ImageFile._safe_read(self.fp, n)
def APP(self, marker):
#
# Application marker. Store these in the APP dictionary.
# Also look for well-known application markers.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
app = "APP%d" % (marker&15)
self.app[app] = s # compatibility
self.applist.append((app, s))
if marker == 0xFFE0 and s[:4] == b"JFIF":
# extract JFIF information
self.info["jfif"] = version = i16(s, 5) # version
self.info["jfif_version"] = divmod(version, 256)
# extract JFIF properties
try:
jfif_unit = i8(s[7])
jfif_density = i16(s, 8), i16(s, 10)
except:
pass
else:
if jfif_unit == 1:
self.info["dpi"] = jfif_density
self.info["jfif_unit"] = jfif_unit
self.info["jfif_density"] = jfif_density
elif marker == 0xFFE1 and s[:5] == b"Exif\0":
# extract Exif information (incomplete)
self.info["exif"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:5] == b"FPXR\0":
# extract FlashPix information (incomplete)
self.info["flashpix"] = s # FIXME: value will change
elif marker == 0xFFE2 and s[:12] == b"ICC_PROFILE\0":
# Since an ICC profile can be larger than the maximum size of
# a JPEG marker (64K), we need provisions to split it into
# multiple markers. The format defined by the ICC specifies
# one or more APP2 markers containing the following data:
# Identifying string ASCII "ICC_PROFILE\0" (12 bytes)
# Marker sequence number 1, 2, etc (1 byte)
# Number of markers Total of APP2's used (1 byte)
# Profile data (remainder of APP2 data)
# Decoders should use the marker sequence numbers to
# reassemble the profile, rather than assuming that the APP2
# markers appear in the correct sequence.
self.icclist.append(s)
elif marker == 0xFFEE and s[:5] == b"Adobe":
self.info["adobe"] = i16(s, 5)
# extract Adobe custom properties
try:
adobe_transform = i8(s[1])
except:
pass
else:
self.info["adobe_transform"] = adobe_transform
def COM(self, marker):
#
# Comment marker. Store these in the APP dictionary.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
self.app["COM"] = s # compatibility
self.applist.append(("COM", s))
def SOF(self, marker):
#
# Start of frame marker. Defines the size and mode of the
# image. JPEG is colour blind, so we use some simple
# heuristics to map the number of layers to an appropriate
# mode. Note that this could be made a bit brighter, by
# looking for JFIF and Adobe APP markers.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
self.size = i16(s[3:]), i16(s[1:])
self.bits = i8(s[0])
if self.bits != 8:
raise SyntaxError("cannot handle %d-bit layers" % self.bits)
self.layers = i8(s[5])
if self.layers == 1:
self.mode = "L"
elif self.layers == 3:
self.mode = "RGB"
elif self.layers == 4:
self.mode = "CMYK"
else:
raise SyntaxError("cannot handle %d-layer images" % self.layers)
if marker in [0xFFC2, 0xFFC6, 0xFFCA, 0xFFCE]:
self.info["progressive"] = self.info["progression"] = 1
if self.icclist:
# fixup icc profile
self.icclist.sort() # sort by sequence number
if i8(self.icclist[0][13]) == len(self.icclist):
profile = []
for p in self.icclist:
profile.append(p[14:])
icc_profile = b"".join(profile)
else:
icc_profile = None # wrong number of fragments
self.info["icc_profile"] = icc_profile
self.icclist = None
for i in range(6, len(s), 3):
t = s[i:i+3]
# 4-tuples: id, vsamp, hsamp, qtable
self.layer.append((t[0], i8(t[1])//16, i8(t[1])&15, i8(t[2])))
def DQT(self, marker):
#
# Define quantization table. Support baseline 8-bit tables
# only. Note that there might be more than one table in
# each marker.
# FIXME: The quantization tables can be used to estimate the
# compression quality.
n = i16(self.fp.read(2))-2
s = ImageFile._safe_read(self.fp, n)
while len(s):
if len(s) < 65:
raise SyntaxError("bad quantization table marker")
v = i8(s[0])
if v//16 == 0:
self.quantization[v&15] = array.array("b", s[1:65])
s = s[65:]
else:
return # FIXME: add code to read 16-bit tables!
# raise SyntaxError, "bad quantization table element size"
#
# JPEG marker table
MARKER = {
0xFFC0: ("SOF0", "Baseline DCT", SOF),
0xFFC1: ("SOF1", "Extended Sequential DCT", SOF),
0xFFC2: ("SOF2", "Progressive DCT", SOF),
0xFFC3: ("SOF3", "Spatial lossless", SOF),
0xFFC4: ("DHT", "Define Huffman table", Skip),
0xFFC5: ("SOF5", "Differential sequential DCT", SOF),
0xFFC6: ("SOF6", "Differential progressive DCT", SOF),
0xFFC7: ("SOF7", "Differential spatial", SOF),
0xFFC8: ("JPG", "Extension", None),
0xFFC9: ("SOF9", "Extended sequential DCT (AC)", SOF),
0xFFCA: ("SOF10", "Progressive DCT (AC)", SOF),
0xFFCB: ("SOF11", "Spatial lossless DCT (AC)", SOF),
0xFFCC: ("DAC", "Define arithmetic coding conditioning", Skip),
0xFFCD: ("SOF13", "Differential sequential DCT (AC)", SOF),
0xFFCE: ("SOF14", "Differential progressive DCT (AC)", SOF),
0xFFCF: ("SOF15", "Differential spatial (AC)", SOF),
0xFFD0: ("RST0", "Restart 0", None),
0xFFD1: ("RST1", "Restart 1", None),
0xFFD2: ("RST2", "Restart 2", None),
0xFFD3: ("RST3", "Restart 3", None),
0xFFD4: ("RST4", "Restart 4", None),
0xFFD5: ("RST5", "Restart 5", None),
0xFFD6: ("RST6", "Restart 6", None),
0xFFD7: ("RST7", "Restart 7", None),
0xFFD8: ("SOI", "Start of image", None),
0xFFD9: ("EOI", "End of image", None),
0xFFDA: ("SOS", "Start of scan", Skip),
0xFFDB: ("DQT", "Define quantization table", DQT),
0xFFDC: ("DNL", "Define number of lines", Skip),
0xFFDD: ("DRI", "Define restart interval", Skip),
0xFFDE: ("DHP", "Define hierarchical progression", SOF),
0xFFDF: ("EXP", "Expand reference component", Skip),
0xFFE0: ("APP0", "Application segment 0", APP),
0xFFE1: ("APP1", "Application segment 1", APP),
0xFFE2: ("APP2", "Application segment 2", APP),
0xFFE3: ("APP3", "Application segment 3", APP),
0xFFE4: ("APP4", "Application segment 4", APP),
0xFFE5: ("APP5", "Application segment 5", APP),
0xFFE6: ("APP6", "Application segment 6", APP),
0xFFE7: ("APP7", "Application segment 7", APP),
0xFFE8: ("APP8", "Application segment 8", APP),
0xFFE9: ("APP9", "Application segment 9", APP),
0xFFEA: ("APP10", "Application segment 10", APP),
0xFFEB: ("APP11", "Application segment 11", APP),
0xFFEC: ("APP12", "Application segment 12", APP),
0xFFED: ("APP13", "Application segment 13", APP),
0xFFEE: ("APP14", "Application segment 14", APP),
0xFFEF: ("APP15", "Application segment 15", APP),
0xFFF0: ("JPG0", "Extension 0", None),
0xFFF1: ("JPG1", "Extension 1", None),
0xFFF2: ("JPG2", "Extension 2", None),
0xFFF3: ("JPG3", "Extension 3", None),
0xFFF4: ("JPG4", "Extension 4", None),
0xFFF5: ("JPG5", "Extension 5", None),
0xFFF6: ("JPG6", "Extension 6", None),
0xFFF7: ("JPG7", "Extension 7", None),
0xFFF8: ("JPG8", "Extension 8", None),
0xFFF9: ("JPG9", "Extension 9", None),
0xFFFA: ("JPG10", "Extension 10", None),
0xFFFB: ("JPG11", "Extension 11", None),
0xFFFC: ("JPG12", "Extension 12", None),
0xFFFD: ("JPG13", "Extension 13", None),
0xFFFE: ("COM", "Comment", COM)
}
def _accept(prefix):
return prefix[0:1] == b"\377"
##
# Image plugin for JPEG and JFIF images.
class JpegImageFile(ImageFile.ImageFile):
format = "JPEG"
format_description = "JPEG (ISO 10918)"
def _open(self):
s = self.fp.read(1)
if i8(s[0]) != 255:
raise SyntaxError("not a JPEG file")
# Create attributes
self.bits = self.layers = 0
# JPEG specifics (internal)
self.layer = []
self.huffman_dc = {}
self.huffman_ac = {}
self.quantization = {}
self.app = {} # compatibility
self.applist = []
self.icclist = []
while True:
s = s + self.fp.read(1)
i = i16(s)
if i in MARKER:
name, description, handler = MARKER[i]
# print hex(i), name, description
if handler is not None:
handler(self, i)
if i == 0xFFDA: # start of scan
rawmode = self.mode
if self.mode == "CMYK":
rawmode = "CMYK;I" # assume adobe conventions
self.tile = [("jpeg", (0,0) + self.size, 0, (rawmode, ""))]
# self.__offset = self.fp.tell()
break
s = self.fp.read(1)
elif i == 0 or i == 65535:
# padded marker or junk; move on
s = "\xff"
else:
raise SyntaxError("no marker found")
def draft(self, mode, size):
if len(self.tile) != 1:
return
d, e, o, a = self.tile[0]
scale = 0
if a[0] == "RGB" and mode in ["L", "YCbCr"]:
self.mode = mode
a = mode, ""
if size:
scale = max(self.size[0] // size[0], self.size[1] // size[1])
for s in [8, 4, 2, 1]:
if scale >= s:
break
e = e[0], e[1], (e[2]-e[0]+s-1)//s+e[0], (e[3]-e[1]+s-1)//s+e[1]
self.size = ((self.size[0]+s-1)//s, (self.size[1]+s-1)//s)
scale = s
self.tile = [(d, e, o, a)]
self.decoderconfig = (scale, 1)
return self
def load_djpeg(self):
# ALTERNATIVE: handle JPEGs via the IJG command line utilities
import tempfile, os
f, path = tempfile.mkstemp()
os.close(f)
if os.path.exists(self.filename):
os.system("djpeg '%s' >'%s'" % (self.filename, path))
else:
raise ValueError("Invalid Filename")
try:
self.im = Image.core.open_ppm(path)
finally:
try: os.unlink(path)
except: pass
self.mode = self.im.mode
self.size = self.im.size
self.tile = []
def _getexif(self):
return _getexif(self)
def _getexif(self):
# Extract EXIF information. This method is highly experimental,
# and is likely to be replaced with something better in a future
# version.
from PIL import TiffImagePlugin
import io
def fixup(value):
if len(value) == 1:
return value[0]
return value
# The EXIF record consists of a TIFF file embedded in a JPEG
# application marker (!).
try:
data = self.info["exif"]
except KeyError:
return None
file = io.BytesIO(data[6:])
head = file.read(8)
exif = {}
# process dictionary
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
for key, value in info.items():
exif[key] = fixup(value)
# get exif extension
try:
file.seek(exif[0x8769])
except KeyError:
pass
else:
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
for key, value in info.items():
exif[key] = fixup(value)
# get gpsinfo extension
try:
file.seek(exif[0x8825])
except KeyError:
pass
else:
info = TiffImagePlugin.ImageFileDirectory(head)
info.load(file)
exif[0x8825] = gps = {}
for key, value in info.items():
gps[key] = fixup(value)
return exif
# --------------------------------------------------------------------
# stuff to save JPEG files
RAWMODE = {
"1": "L",
"L": "L",
"RGB": "RGB",
"RGBA": "RGB",
"RGBX": "RGB",
"CMYK": "CMYK;I", # assume adobe conventions
"YCbCr": "YCbCr",
}
zigzag_index = ( 0, 1, 5, 6, 14, 15, 27, 28,
2, 4, 7, 13, 16, 26, 29, 42,
3, 8, 12, 17, 25, 30, 41, 43,
9, 11, 18, 24, 31, 40, 44, 53,
10, 19, 23, 32, 39, 45, 52, 54,
20, 22, 33, 38, 46, 51, 55, 60,
21, 34, 37, 47, 50, 56, 59, 61,
35, 36, 48, 49, 57, 58, 62, 63)
samplings = {
(1, 1, 1, 1, 1, 1): 0,
(2, 1, 1, 1, 1, 1): 1,
(2, 2, 1, 1, 1, 1): 2,
}
def convert_dict_qtables(qtables):
qtables = [qtables[key] for key in xrange(len(qtables)) if qtables.has_key(key)]
for idx, table in enumerate(qtables):
qtables[idx] = [table[i] for i in zigzag_index]
return qtables
def get_sampling(im):
sampling = im.layer[0][1:3] + im.layer[1][1:3] + im.layer[2][1:3]
return samplings.get(sampling, -1)
def _save(im, fp, filename):
try:
rawmode = RAWMODE[im.mode]
except KeyError:
raise IOError("cannot write mode %s as JPEG" % im.mode)
info = im.encoderinfo
dpi = info.get("dpi", (0, 0))
quality = info.get("quality", 0)
subsampling = info.get("subsampling", -1)
qtables = info.get("qtables")
if quality == "keep":
quality = 0
subsampling = "keep"
qtables = "keep"
elif quality in presets:
preset = presets[quality]
quality = 0
subsampling = preset.get('subsampling', -1)
qtables = preset.get('quantization')
elif not isinstance(quality, int):
raise ValueError("Invalid quality setting")
else:
if subsampling in presets:
subsampling = presets[subsampling].get('subsampling', -1)
if qtables in presets:
qtables = presets[qtables].get('quantization')
if subsampling == "4:4:4":
subsampling = 0
elif subsampling == "4:2:2":
subsampling = 1
elif subsampling == "4:1:1":
subsampling = 2
elif subsampling == "keep":
if im.format != "JPEG":
raise ValueError("Cannot use 'keep' when original image is not a JPEG")
subsampling = get_sampling(im)
def validate_qtables(qtables):
if qtables is None:
return qtables
if isStringType(qtables):
try:
lines = [int(num) for line in qtables.splitlines()
for num in line.split('#', 1)[0].split()]
except ValueError:
raise ValueError("Invalid quantization table")
else:
qtables = [lines[s:s+64] for s in xrange(0, len(lines), 64)]
if isinstance(qtables, (tuple, list, dict)):
if isinstance(qtables, dict):
qtables = convert_dict_qtables(qtables)
elif isinstance(qtables, tuple):
qtables = list(qtables)
if not (0 < len(qtables) < 5):
raise ValueError("None or too many quantization tables")
for idx, table in enumerate(qtables):
try:
if len(table) != 64:
raise
table = array.array('b', table)
except TypeError:
raise ValueError("Invalid quantization table")
else:
qtables[idx] = list(table)
return qtables
if qtables == "keep":
if im.format != "JPEG":
raise ValueError("Cannot use 'keep' when original image is not a JPEG")
qtables = getattr(im, "quantization", None)
qtables = validate_qtables(qtables)
extra = b""
icc_profile = info.get("icc_profile")
if icc_profile:
ICC_OVERHEAD_LEN = 14
MAX_BYTES_IN_MARKER = 65533
MAX_DATA_BYTES_IN_MARKER = MAX_BYTES_IN_MARKER - ICC_OVERHEAD_LEN
markers = []
while icc_profile:
markers.append(icc_profile[:MAX_DATA_BYTES_IN_MARKER])
icc_profile = icc_profile[MAX_DATA_BYTES_IN_MARKER:]
i = 1
for marker in markers:
size = struct.pack(">H", 2 + ICC_OVERHEAD_LEN + len(marker))
extra = extra + (b"\xFF\xE2" + size + b"ICC_PROFILE\0" + o8(i) + o8(len(markers)) + marker)
i = i + 1
# get keyword arguments
im.encoderconfig = (
quality,
# "progressive" is the official name, but older documentation
# says "progression"
# FIXME: issue a warning if the wrong form is used (post-1.1.7)
"progressive" in info or "progression" in info,
info.get("smooth", 0),
"optimize" in info,
info.get("streamtype", 0),
dpi[0], dpi[1],
subsampling,
qtables,
extra,
info.get("exif", b"")
)
# if we optimize, libjpeg needs a buffer big enough to hold the whole image in a shot.
# Guessing on the size, at im.size bytes. (raw pizel size is channels*size, this
# is a value that's been used in a django patch.
# https://github.com/jdriscoll/django-imagekit/issues/50
bufsize=0
if "optimize" in info or "progressive" in info or "progression" in info:
bufsize = im.size[0]*im.size[1]
# The exif info needs to be written as one block, + APP1, + one spare byte.
# Ensure that our buffer is big enough
bufsize = max(ImageFile.MAXBLOCK, bufsize, len(info.get("exif",b"")) + 5 )
ImageFile._save(im, fp, [("jpeg", (0,0)+im.size, 0, rawmode)], bufsize)
def _save_cjpeg(im, fp, filename):
# ALTERNATIVE: handle JPEGs via the IJG command line utilities.
import os
file = im._dump()
os.system("cjpeg %s >%s" % (file, filename))
try: os.unlink(file)
except: pass
# -------------------------------------------------------------------q-
# Registry stuff
Image.register_open("JPEG", JpegImageFile, _accept)
Image.register_save("JPEG", _save)
Image.register_extension("JPEG", ".jfif")
Image.register_extension("JPEG", ".jpe")
Image.register_extension("JPEG", ".jpg")
Image.register_extension("JPEG", ".jpeg")
Image.register_mime("JPEG", "image/jpeg")
|
|
import imp
import importlib
import os
import os.path
import shutil
import sys
from test import support
from test.test_importlib import util
import unittest
import warnings
class LockTests(unittest.TestCase):
"""Very basic test of import lock functions."""
def verify_lock_state(self, expected):
self.assertEqual(imp.lock_held(), expected,
"expected imp.lock_held() to be %r" % expected)
def testLock(self):
LOOPS = 50
# The import lock may already be held, e.g. if the test suite is run
# via "import test.autotest".
lock_held_at_start = imp.lock_held()
self.verify_lock_state(lock_held_at_start)
for i in range(LOOPS):
imp.acquire_lock()
self.verify_lock_state(True)
for i in range(LOOPS):
imp.release_lock()
# The original state should be restored now.
self.verify_lock_state(lock_held_at_start)
if not lock_held_at_start:
try:
imp.release_lock()
except RuntimeError:
pass
else:
self.fail("release_lock() without lock should raise "
"RuntimeError")
class ImportTests(unittest.TestCase):
def setUp(self):
mod = importlib.import_module('test.encoded_modules')
self.test_strings = mod.test_strings
self.test_path = mod.__path__
def test_import_encoded_module(self):
for modname, encoding, teststr in self.test_strings:
mod = importlib.import_module('test.encoded_modules.'
'module_' + modname)
self.assertEqual(teststr, mod.test)
def test_find_module_encoding(self):
for mod, encoding, _ in self.test_strings:
with imp.find_module('module_' + mod, self.test_path)[0] as fd:
self.assertEqual(fd.encoding, encoding)
path = [os.path.dirname(__file__)]
with self.assertRaises(SyntaxError):
imp.find_module('badsyntax_pep3120', path)
def test_issue1267(self):
for mod, encoding, _ in self.test_strings:
fp, filename, info = imp.find_module('module_' + mod,
self.test_path)
with fp:
self.assertNotEqual(fp, None)
self.assertEqual(fp.encoding, encoding)
self.assertEqual(fp.tell(), 0)
self.assertEqual(fp.readline(), '# test %s encoding\n'
% encoding)
fp, filename, info = imp.find_module("tokenize")
with fp:
self.assertNotEqual(fp, None)
self.assertEqual(fp.encoding, "utf-8")
self.assertEqual(fp.tell(), 0)
self.assertEqual(fp.readline(),
'"""Tokenization help for Python programs.\n')
def test_issue3594(self):
temp_mod_name = 'test_imp_helper'
sys.path.insert(0, '.')
try:
with open(temp_mod_name + '.py', 'w') as file:
file.write("# coding: cp1252\nu = 'test.test_imp'\n")
file, filename, info = imp.find_module(temp_mod_name)
file.close()
self.assertEqual(file.encoding, 'cp1252')
finally:
del sys.path[0]
support.unlink(temp_mod_name + '.py')
support.unlink(temp_mod_name + '.pyc')
support.unlink(temp_mod_name + '.pyo')
def test_issue5604(self):
# Test cannot cover imp.load_compiled function.
# Martin von Loewis note what shared library cannot have non-ascii
# character because init_xxx function cannot be compiled
# and issue never happens for dynamic modules.
# But sources modified to follow generic way for processing pathes.
# the return encoding could be uppercase or None
fs_encoding = sys.getfilesystemencoding()
# covers utf-8 and Windows ANSI code pages
# one non-space symbol from every page
# (http://en.wikipedia.org/wiki/Code_page)
known_locales = {
'utf-8' : b'\xc3\xa4',
'cp1250' : b'\x8C',
'cp1251' : b'\xc0',
'cp1252' : b'\xc0',
'cp1253' : b'\xc1',
'cp1254' : b'\xc0',
'cp1255' : b'\xe0',
'cp1256' : b'\xe0',
'cp1257' : b'\xc0',
'cp1258' : b'\xc0',
}
if sys.platform == 'darwin':
self.assertEqual(fs_encoding, 'utf-8')
# Mac OS X uses the Normal Form D decomposition
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
special_char = b'a\xcc\x88'
else:
special_char = known_locales.get(fs_encoding)
if not special_char:
self.skipTest("can't run this test with %s as filesystem encoding"
% fs_encoding)
decoded_char = special_char.decode(fs_encoding)
temp_mod_name = 'test_imp_helper_' + decoded_char
test_package_name = 'test_imp_helper_package_' + decoded_char
init_file_name = os.path.join(test_package_name, '__init__.py')
try:
# if the curdir is not in sys.path the test fails when run with
# ./python ./Lib/test/regrtest.py test_imp
sys.path.insert(0, os.curdir)
with open(temp_mod_name + '.py', 'w') as file:
file.write('a = 1\n')
file, filename, info = imp.find_module(temp_mod_name)
with file:
self.assertIsNotNone(file)
self.assertTrue(filename[:-3].endswith(temp_mod_name))
self.assertEqual(info[0], '.py')
self.assertEqual(info[1], 'U')
self.assertEqual(info[2], imp.PY_SOURCE)
mod = imp.load_module(temp_mod_name, file, filename, info)
self.assertEqual(mod.a, 1)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
mod = imp.load_source(temp_mod_name, temp_mod_name + '.py')
self.assertEqual(mod.a, 1)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if not sys.dont_write_bytecode:
mod = imp.load_compiled(
temp_mod_name,
imp.cache_from_source(temp_mod_name + '.py'))
self.assertEqual(mod.a, 1)
if not os.path.exists(test_package_name):
os.mkdir(test_package_name)
with open(init_file_name, 'w') as file:
file.write('b = 2\n')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
package = imp.load_package(test_package_name, test_package_name)
self.assertEqual(package.b, 2)
finally:
del sys.path[0]
for ext in ('.py', '.pyc', '.pyo'):
support.unlink(temp_mod_name + ext)
support.unlink(init_file_name + ext)
support.rmtree(test_package_name)
def test_issue9319(self):
path = os.path.dirname(__file__)
self.assertRaises(SyntaxError,
imp.find_module, "badsyntax_pep3120", [path])
def test_load_from_source(self):
# Verify that the imp module can correctly load and find .py files
# XXX (ncoghlan): It would be nice to use support.CleanImport
# here, but that breaks because the os module registers some
# handlers in copy_reg on import. Since CleanImport doesn't
# revert that registration, the module is left in a broken
# state after reversion. Reinitialising the module contents
# and just reverting os.environ to its previous state is an OK
# workaround
orig_path = os.path
orig_getenv = os.getenv
with support.EnvironmentVarGuard():
x = imp.find_module("os")
self.addCleanup(x[0].close)
new_os = imp.load_module("os", *x)
self.assertIs(os, new_os)
self.assertIs(orig_path, new_os.path)
self.assertIsNot(orig_getenv, new_os.getenv)
@support.cpython_only
@unittest.skipIf(not hasattr(imp, 'load_dynamic'),
'imp.load_dynamic() required')
def test_issue15828_load_extensions(self):
# Issue 15828 picked up that the adapter between the old imp API
# and importlib couldn't handle C extensions
example = "_heapq"
x = imp.find_module(example)
file_ = x[0]
if file_ is not None:
self.addCleanup(file_.close)
mod = imp.load_module(example, *x)
self.assertEqual(mod.__name__, example)
def test_load_dynamic_ImportError_path(self):
# Issue #1559549 added `name` and `path` attributes to ImportError
# in order to provide better detail. Issue #10854 implemented those
# attributes on import failures of extensions on Windows.
path = 'bogus file path'
name = 'extension'
with self.assertRaises(ImportError) as err:
imp.load_dynamic(name, path)
self.assertIn(path, err.exception.path)
self.assertEqual(name, err.exception.name)
@support.cpython_only
@unittest.skipIf(not hasattr(imp, 'load_dynamic'),
'imp.load_dynamic() required')
def test_load_module_extension_file_is_None(self):
# When loading an extension module and the file is None, open one
# on the behalf of imp.load_dynamic().
# Issue #15902
name = '_heapq'
found = imp.find_module(name)
if found[0] is not None:
found[0].close()
if found[2][2] != imp.C_EXTENSION:
self.skipTest("found module doesn't appear to be a C extension")
imp.load_module(name, None, *found[1:])
def test_multiple_calls_to_get_data(self):
# Issue #18755: make sure multiple calls to get_data() can succeed.
loader = imp._LoadSourceCompatibility('imp', imp.__file__,
open(imp.__file__))
loader.get_data(imp.__file__) # File should be closed
loader.get_data(imp.__file__) # Will need to create a newly opened file
class ReloadTests(unittest.TestCase):
"""Very basic tests to make sure that imp.reload() operates just like
reload()."""
def test_source(self):
# XXX (ncoghlan): It would be nice to use test.support.CleanImport
# here, but that breaks because the os module registers some
# handlers in copy_reg on import. Since CleanImport doesn't
# revert that registration, the module is left in a broken
# state after reversion. Reinitialising the module contents
# and just reverting os.environ to its previous state is an OK
# workaround
with support.EnvironmentVarGuard():
import os
imp.reload(os)
def test_extension(self):
with support.CleanImport('time'):
import time
imp.reload(time)
def test_builtin(self):
with support.CleanImport('marshal'):
import marshal
imp.reload(marshal)
def test_with_deleted_parent(self):
# see #18681
from html import parser
html = sys.modules.pop('html')
def cleanup():
sys.modules['html'] = html
self.addCleanup(cleanup)
with self.assertRaisesRegex(ImportError, 'html'):
imp.reload(parser)
def test_module_replaced(self):
# see #18698
def code():
module = type(sys)('top_level')
module.spam = 3
sys.modules['top_level'] = module
mock = util.mock_modules('top_level',
module_code={'top_level': code})
with mock:
with util.import_state(meta_path=[mock]):
module = importlib.import_module('top_level')
reloaded = imp.reload(module)
actual = sys.modules['top_level']
self.assertEqual(actual.spam, 3)
self.assertEqual(reloaded.spam, 3)
class PEP3147Tests(unittest.TestCase):
"""Tests of PEP 3147."""
tag = imp.get_tag()
@unittest.skipUnless(sys.implementation.cache_tag is not None,
'requires sys.implementation.cache_tag not be None')
def test_cache_from_source(self):
# Given the path to a .py file, return the path to its PEP 3147
# defined .pyc file (i.e. under __pycache__).
path = os.path.join('foo', 'bar', 'baz', 'qux.py')
expect = os.path.join('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyc'.format(self.tag))
self.assertEqual(imp.cache_from_source(path, True), expect)
def test_cache_from_source_no_cache_tag(self):
# Non cache tag means NotImplementedError.
with support.swap_attr(sys.implementation, 'cache_tag', None):
with self.assertRaises(NotImplementedError):
imp.cache_from_source('whatever.py')
def test_cache_from_source_no_dot(self):
# Directory with a dot, filename without dot.
path = os.path.join('foo.bar', 'file')
expect = os.path.join('foo.bar', '__pycache__',
'file{}.pyc'.format(self.tag))
self.assertEqual(imp.cache_from_source(path, True), expect)
def test_cache_from_source_optimized(self):
# Given the path to a .py file, return the path to its PEP 3147
# defined .pyo file (i.e. under __pycache__).
path = os.path.join('foo', 'bar', 'baz', 'qux.py')
expect = os.path.join('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyo'.format(self.tag))
self.assertEqual(imp.cache_from_source(path, False), expect)
def test_cache_from_source_cwd(self):
path = 'foo.py'
expect = os.path.join('__pycache__', 'foo.{}.pyc'.format(self.tag))
self.assertEqual(imp.cache_from_source(path, True), expect)
def test_cache_from_source_override(self):
# When debug_override is not None, it can be any true-ish or false-ish
# value.
path = os.path.join('foo', 'bar', 'baz.py')
partial_expect = os.path.join('foo', 'bar', '__pycache__',
'baz.{}.py'.format(self.tag))
self.assertEqual(imp.cache_from_source(path, []), partial_expect + 'o')
self.assertEqual(imp.cache_from_source(path, [17]),
partial_expect + 'c')
# However if the bool-ishness can't be determined, the exception
# propagates.
class Bearish:
def __bool__(self): raise RuntimeError
with self.assertRaises(RuntimeError):
imp.cache_from_source('/foo/bar/baz.py', Bearish())
@unittest.skipUnless(os.sep == '\\' and os.altsep == '/',
'test meaningful only where os.altsep is defined')
def test_sep_altsep_and_sep_cache_from_source(self):
# Windows path and PEP 3147 where sep is right of altsep.
self.assertEqual(
imp.cache_from_source('\\foo\\bar\\baz/qux.py', True),
'\\foo\\bar\\baz\\__pycache__\\qux.{}.pyc'.format(self.tag))
@unittest.skipUnless(sys.implementation.cache_tag is not None,
'requires sys.implementation.cache_tag to not be '
'None')
def test_source_from_cache(self):
# Given the path to a PEP 3147 defined .pyc file, return the path to
# its source. This tests the good path.
path = os.path.join('foo', 'bar', 'baz', '__pycache__',
'qux.{}.pyc'.format(self.tag))
expect = os.path.join('foo', 'bar', 'baz', 'qux.py')
self.assertEqual(imp.source_from_cache(path), expect)
def test_source_from_cache_no_cache_tag(self):
# If sys.implementation.cache_tag is None, raise NotImplementedError.
path = os.path.join('blah', '__pycache__', 'whatever.pyc')
with support.swap_attr(sys.implementation, 'cache_tag', None):
with self.assertRaises(NotImplementedError):
imp.source_from_cache(path)
def test_source_from_cache_bad_path(self):
# When the path to a pyc file is not in PEP 3147 format, a ValueError
# is raised.
self.assertRaises(
ValueError, imp.source_from_cache, '/foo/bar/bazqux.pyc')
def test_source_from_cache_no_slash(self):
# No slashes at all in path -> ValueError
self.assertRaises(
ValueError, imp.source_from_cache, 'foo.cpython-32.pyc')
def test_source_from_cache_too_few_dots(self):
# Too few dots in final path component -> ValueError
self.assertRaises(
ValueError, imp.source_from_cache, '__pycache__/foo.pyc')
def test_source_from_cache_too_many_dots(self):
# Too many dots in final path component -> ValueError
self.assertRaises(
ValueError, imp.source_from_cache,
'__pycache__/foo.cpython-32.foo.pyc')
def test_source_from_cache_no__pycache__(self):
# Another problem with the path -> ValueError
self.assertRaises(
ValueError, imp.source_from_cache,
'/foo/bar/foo.cpython-32.foo.pyc')
def test_package___file__(self):
try:
m = __import__('pep3147')
except ImportError:
pass
else:
self.fail("pep3147 module already exists: %r" % (m,))
# Test that a package's __file__ points to the right source directory.
os.mkdir('pep3147')
sys.path.insert(0, os.curdir)
def cleanup():
if sys.path[0] == os.curdir:
del sys.path[0]
shutil.rmtree('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py file.
support.create_empty_file('pep3147/__init__.py')
importlib.invalidate_caches()
expected___file__ = os.sep.join(('.', 'pep3147', '__init__.py'))
m = __import__('pep3147')
self.assertEqual(m.__file__, expected___file__, (m.__file__, m.__path__, sys.path, sys.path_importer_cache))
# Ensure we load the pyc file.
support.unload('pep3147')
m = __import__('pep3147')
support.unload('pep3147')
self.assertEqual(m.__file__, expected___file__, (m.__file__, m.__path__, sys.path, sys.path_importer_cache))
class NullImporterTests(unittest.TestCase):
@unittest.skipIf(support.TESTFN_UNENCODABLE is None,
"Need an undecodeable filename")
def test_unencodeable(self):
name = support.TESTFN_UNENCODABLE
os.mkdir(name)
try:
self.assertRaises(ImportError, imp.NullImporter, name)
finally:
os.rmdir(name)
def test_main():
tests = [
ImportTests,
PEP3147Tests,
ReloadTests,
NullImporterTests,
]
try:
import _thread
except ImportError:
pass
else:
tests.append(LockTests)
support.run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
|
import pytest
from mne import open_docs, grade_to_tris
from mne.epochs import add_channels_epochs
from mne.utils import (copy_function_doc_to_method_doc, copy_doc,
linkcode_resolve, deprecated, deprecated_alias)
import webbrowser
@pytest.mark.parametrize('obj', (grade_to_tris, add_channels_epochs))
def test_doc_filling(obj):
"""Test that docs are filled properly."""
doc = obj.__doc__
assert 'verbose : ' in doc
if obj is add_channels_epochs:
assert 'passed as a keyword' in doc
def test_deprecated_alias():
"""Test deprecated_alias."""
def new_func():
"""Do something."""
pass
deprecated_alias('old_func', new_func)
assert old_func # noqa
assert 'has been deprecated in favor of new_func' in old_func.__doc__ # noqa
assert 'deprecated' not in new_func.__doc__
@deprecated('bad func')
def deprecated_func():
"""Do something."""
pass
@deprecated('bad class')
class deprecated_class(object):
def __init__(self):
pass
@deprecated('bad method')
def bad(self):
pass
def test_deprecated():
"""Test deprecated function."""
assert 'DEPRECATED' in deprecated_func.__doc__
with pytest.deprecated_call(match='bad func'):
deprecated_func()
assert 'DEPRECATED' in deprecated_class.__init__.__doc__
with pytest.deprecated_call(match='bad class'):
dep = deprecated_class()
assert 'DEPRECATED' in deprecated_class.bad.__doc__
assert 'DEPRECATED' in dep.bad.__doc__
with pytest.deprecated_call(match='bad method'):
dep.bad()
def test_copy_doc():
"""Test decorator for copying docstrings."""
class A:
def m1():
"""Docstring for m1."""
pass
class B:
def m1():
pass
class C (A):
@copy_doc(A.m1)
def m1():
pass
assert C.m1.__doc__ == 'Docstring for m1.'
pytest.raises(ValueError, copy_doc(B.m1), C.m1)
def test_copy_function_doc_to_method_doc():
"""Test decorator for re-using function docstring as method docstrings."""
def f1(object, a, b, c):
"""Docstring for f1.
Parameters
----------
object : object
Some object. This description also has
blank lines in it.
a : int
Parameter a
b : int
Parameter b
"""
pass
def f2(object):
"""Docstring for f2.
Parameters
----------
object : object
Only one parameter
Returns
-------
nothing.
"""
pass
def f3(object):
"""Docstring for f3.
Parameters
----------
object : object
Only one parameter
"""
pass
def f4(object):
"""Docstring for f4."""
pass
def f5(object): # noqa: D410, D411, D414
"""Docstring for f5.
Parameters
----------
Returns
-------
nothing.
"""
pass
class A:
@copy_function_doc_to_method_doc(f1)
def method_f1(self, a, b, c):
pass
@copy_function_doc_to_method_doc(f2)
def method_f2(self):
"method_f3 own docstring"
pass
@copy_function_doc_to_method_doc(f3)
def method_f3(self):
pass
assert A.method_f1.__doc__ == """Docstring for f1.
Parameters
----------
a : int
Parameter a
b : int
Parameter b
"""
assert A.method_f2.__doc__ == """Docstring for f2.
Returns
-------
nothing.
method_f3 own docstring"""
assert A.method_f3.__doc__ == 'Docstring for f3.\n\n '
pytest.raises(ValueError, copy_function_doc_to_method_doc(f5), A.method_f1)
def myfun(x):
"""Check url."""
assert 'mne.tools' in x
def test_open_docs():
"""Test doc launching."""
old_tab = webbrowser.open_new_tab
try:
# monkey patch temporarily to prevent tabs from actually spawning
webbrowser.open_new_tab = myfun
open_docs()
open_docs('tutorials', 'dev')
open_docs('examples', 'stable')
pytest.raises(ValueError, open_docs, 'foo')
pytest.raises(ValueError, open_docs, 'api', 'foo')
finally:
webbrowser.open_new_tab = old_tab
def test_linkcode_resolve():
"""Test linkcode resolving."""
ex = '#L'
url = linkcode_resolve('py', dict(module='mne', fullname='Epochs'))
assert '/mne/epochs.py' + ex in url
url = linkcode_resolve('py', dict(module='mne',
fullname='compute_covariance'))
assert '/mne/cov.py' + ex in url
url = linkcode_resolve('py', dict(module='mne',
fullname='convert_forward_solution'))
assert '/mne/forward/forward.py' + ex in url
url = linkcode_resolve('py', dict(module='mne',
fullname='datasets.sample.data_path'))
assert '/mne/datasets/sample/sample.py' + ex in url
|
|
import sip
sip.setapi('QVariant', 2)
import os
import traceback
from blocks.WorkspaceController import WorkspaceController
from generators.PythonGen import PythonGen
from PyQt4 import QtGui,QtCore, uic
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QDockWidget
class MainWnd(QtGui.QMainWindow):
instance = None
def __init__(self):
super(MainWnd, self).__init__()
self.filename = None
uic.loadUi('main.ui', self)
self.viewGroup = QtGui.QActionGroup(self)
self.viewGroup.addAction(self.actionTestBench)
self.viewGroup.addAction(self.actionBlockEditor)
self.viewGroup.addAction(self.actionDrawersets)
self.actionBlockEditor.setChecked(True)
self.viewGroup.triggered.connect(self.onActionTriggered)
self.actionNew.triggered.connect(self.onNew)
self.actionOpen.triggered.connect(self.onOpen)
self.actionSave.triggered.connect(self.onSave)
self.actionSaveAs.triggered.connect(self.onSaveAs)
self.actionRun.triggered.connect(self.onRun)
self.actionSaveAll.triggered.connect(self.onSaveAll)
self.actionQuit.triggered.connect(self.close)
#self.connect(self.tabWidget,QtCore.SIGNAL("currentChanged(int)"),self.currentChanged)
self.actionStop.setEnabled(False)
# Create a new WorkspaceController
self.wc = WorkspaceController(self.pgBlockEditor)
#self.createBlockFactory("support\\lang_def.xml")
self.resetWorksapce()
self.InitBlockGenusListWidget()
self.InitBlockDrawerSetsTreeWidget()
self.setActiveWidget(self.pgBlockEditor)
layout = QtGui.QHBoxLayout()
self.wndPreview.setLayout(layout);
self.wndApplyGenus.hide()
self.lwBlockGenus.setMainWnd(self)
#self.blockPreviewWnd.resizeEvent = self.onResize
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
self.show()
def getInstance():
#print(MainWnd.instance)
if(MainWnd.instance == None):
MainWnd.instance = MainWnd()
print()
return MainWnd.instance
def __createDockWindow(self, name):
"""
Private method to create a dock window with common properties.
@param name object name of the new dock window (string)
@return the generated dock window (QDockWindow)
"""
dock = QDockWidget()
dock.setObjectName(name)
dock.setFeatures(
QDockWidget.DockWidgetFeatures(QDockWidget.AllDockWidgetFeatures))
return dock
def __setupDockWindow(self, dock, where, widget, caption):
"""
Private method to configure the dock window created with
__createDockWindow().
@param dock the dock window (QDockWindow)
@param where dock area to be docked to (Qt.DockWidgetArea)
@param widget widget to be shown in the dock window (QWidget)
@param caption caption of the dock window (string)
"""
if caption is None:
caption = ""
self.addDockWidget(where, dock)
dock.setWidget(widget)
dock.setWindowTitle(caption)
dock.show()
def createBlockFactory(self, lang_file):
from FactoryTreeView import FactoryTreeView
self.wc.resetWorkspace();
root = self.wc.setLangDefFilePath(lang_file)
self.wc.loadFreshWorkspace();
self.blocksFactory = self.__createDockWindow("blocksFactory")
self.factoryWidget = QtGui.QWidget()
layout = QtGui.QVBoxLayout()
self.searchTxtBox = QtGui.QTextEdit()
self.searchTxtBox.setMaximumHeight(23)
self.factoryBlockTree = FactoryTreeView(root)
layout.addWidget(self.searchTxtBox)
layout.addWidget(self.factoryBlockTree)
layout.setContentsMargins(0, 0, 0, 0)
self.factoryWidget.setLayout(layout)
self.__setupDockWindow(self.blocksFactory, Qt.LeftDockWidgetArea,
self.factoryWidget, self.tr("Blocks Factory"))
def setActiveWidget(self, widget):
self.stackedWidget.setCurrentWidget(widget)
if widget == self.pgHome:
self.blockGenusWnd.hide()
self.blockPropWnd.hide()
self.drawerSetsWnd.hide()
if widget == self.pgBlockEditor:
self.blockGenusWnd.hide()
self.blockPropWnd.show()
self.drawerSetsWnd.hide()
if widget == self.pgDrawerSets:
self.blockGenusWnd.show()
self.drawerSetsWnd.show()
self.lwBlockGenus.setDragEnabled(True)
self.lwBlockGenus.setDragDropMode(QtGui.QAbstractItemView.DragOnly);
self.blockPropWnd.hide()
def onActionTriggered(self, action):
if action == self.actionTestBench:
widget = self.pgHome
if action == self.actionBlockEditor:
widget = self.pgBlockEditor
if action == self.actionDrawersets:
widget = self.pgDrawerSets
self.setActiveWidget(widget)
def InitBlockGenusListWidget(self):
from blocks.BlockGenus import BlockGenus
self.lwBlockGenus.clear()
for name in sorted(BlockGenus.nameToGenus):
if name == '__previewGenus__': continue
item = QtGui.QListWidgetItem()
item.setText(name)
item.setData(QtCore.Qt.UserRole, BlockGenus.nameToGenus[name])
self.lwBlockGenus.addItem(item)
self.lwBlockGenus.itemSelectionChanged.connect(self.onBlockGenusItemChanged)
def onBlockGenusItemChanged(self):
#print('onBlockGenusItemChanged')
items = self.lwBlockGenus.selectedItems()
if(len(items) != 1): return
item = items[0]
genus = item.data(QtCore.Qt.UserRole)
self.showBlockGenusInfo(genus)
def showBlockGenusInfo(self, genus):
from components.BlockGenusTreeModel import BlockGenusTreeModel
from components.propertyeditor.QVariantDelegate import QVariantDelegate
drawerSetsFile = os.getcwd() + "\\"+ "support\\block_drawersets.xml"
self.genusTreeModel = BlockGenusTreeModel(self.tvBlockGenusView, self, genus, drawerSetsFile)
self.tvBlockGenusView.init()
self.tvBlockGenusView.setModel(self.genusTreeModel)
self.tvBlockGenusView.setItemDelegate(QVariantDelegate(self.tvBlockGenusView));
self.tvBlockGenusView.expandAll()
#isStarterIndex = model.getIndexForNode(model.properties['connectors'])
#self.tvBlockGenusView.setIndexWidget(isStarterIndex, QtGui.QCheckBox())
def InitBlockDrawerSetsTreeWidget(self):
#from components.DrawerSetsTreeView import DrawerSetsTreeView
#langDefLocation = os.getcwd() + "\\"+ "support\\block_genuses.xml"
#print('InitBlockDrawerSetsTreeWidget')
#root = self.wc.setLangDefFilePath("support\\lang_def.xml")
#self.wc.loadFreshWorkspace();
self.tvDrawerSets.init("support\\block_drawersets.jason")
self.tvDrawerSets.selectionModel().selectionChanged.connect(self.onBlockDrawerSetsItemChanged)
#self.drawerSetsModel = DrawerSetsTreeModel(self.tvDrawerSets, self, langDefLocation)
#self.tvDrawerSets.setModel(self.drawerSetsModel)
#self.tvDrawerSets.expandAll()
#factory = self.wc.workspace.factory
#for canvas in reversed(factory.canvas.findChildren(FactoryCanvas)) :
# print(canvas.name)
return
pass
def onBlockDrawerSetsItemChanged(self, selects, deselects):
if(len(selects.indexes()) != 1): return
index = selects.indexes()[0]
item = index.internalPointer()
rb = item.obj
genus = rb.getBlock().getGenus()
for index in range(self.lwBlockGenus.count()):
item = self.lwBlockGenus.item(index)
tmp_genus = item.data(QtCore.Qt.UserRole)
#if(genus.genusName == tmp_genus.genusName):
# pass
if(genus == tmp_genus):
self.lwBlockGenus.setCurrentItem(item)
break
#self.showBlockGenusInfo(genus)
def onBlockClick(self, block):
from components.BlockPropTreeModel import BlockPropTreeModel
from components.propertyeditor.QVariantDelegate import QVariantDelegate
model = BlockPropTreeModel(self, block)
self.tvBlockPropView.init()
self.tvBlockPropView.setModel(model)
self.tvBlockPropView.setItemDelegate(QVariantDelegate(self.tvBlockPropView));
self.tvBlockPropView.expandAll()
pass
def showBlock(self, genus):
#from blocks.BlockGenus import BlockGenus
from blocks.Block import Block
from blocks.FactoryRenderableBlock import FactoryRenderableBlock
if(genus == None): return
block = Block.createBlockFromID(None, genus.genusName)
child_list = self.wndPreview.findChildren(FactoryRenderableBlock)
for i in reversed(range(len(child_list))):
child_list[i].deleteLater()
factoryRB = FactoryRenderableBlock.from_block(None, block)
factoryRB.setParent(self.wndPreview)
factoryRB.show()
factoryRB.move((self.wndPreview.width() - factoryRB.width())/2, (self.wndPreview.height() - factoryRB.height())/2)
pass
def onResize(self, event):
from blocks.FactoryRenderableBlock import FactoryRenderableBlock
print('onResize')
child_list = self.wndPreview.findChildren(FactoryRenderableBlock)
if(len(child_list) != 1): return
factoryRB = child_list[0]
factoryRB.move((self.wndPreview.width() - factoryRB.width())/2, (self.wndPreview.height() - factoryRB.height())/2)
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self, 'Message',
"Are you sure to quit?", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
else:
event.ignore()
def onNew(self):
self.wc.resetWorkspace();
def onOpen(self):
if (self.isWorkspaceChanged()):
quit_msg = "There have no saved change, do you want to save it?"
reply = QtGui.QMessageBox.question(self, 'Message',
quit_msg, QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
self.onSave();
self.loadFile();
#this.setTitle(makeFrameTitle());
def onRun(self):
try:
gen = PythonGen(WorkspaceController.workspace)
code = gen.workspaceToCode()
print(code)
exec(code)
except:
print(traceback.format_exc())
#exc_type, exc_obj, exc_tb = sys.exc_info()
#fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
#print(exc_type, fname, exc_tb.tb_lineno,exc_obj)
#pass
def loadFile(self):
filename = QtGui.QFileDialog.getOpenFileName(self, 'Open File', '.', "Block file (*.blks);;All files (*.*)" )
if(filename == ''): return # User cancel load
self.filename = filename
try:
QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
self.loadBlockFile(self.filename);
#context.setWorkspaceChanged(false);
#except:
# print("ERROR!")
# pass
finally:
QtGui.QApplication.restoreOverrideCursor()
def isWorkspaceChanged(self):
return False
def loadBlockFile(self,filename):
if (filename != None):
#self.wc.resetWorkspace();
self.wc.loadProjectFromPath(filename);
def onSave(self):
widget = self.stackedWidget.currentWidget()
if widget == self.pgHome:
pass
if widget == self.pgBlockEditor:
self.onSaveBlockEditor()
if widget == self.pgDrawerSets:
self.onSaveDrawerSets()
def onSaveBlockEditor(self):
import codecs
filename = self.filename
if(filename == None or filename == ''):
filename = QtGui.QFileDialog.getSaveFileName(self, "Save file", "", ".blks(*.blks)")
if(filename == ''): return # User cancel load
block_file = codecs.open(filename, "w",'utf-8')
block_file.write(self.wc.getSaveString())
block_file.close()
def onSaveDrawerSets(self):
import codecs
drawerSetsFileName = 'support\\block_drawersets.jason'
file = codecs.open(drawerSetsFileName, "w",'utf-8')
file.write(self.tvDrawerSets.getSaveString())
file.close()
genusFileName = 'support\\block_genuses.jason'
file = codecs.open(genusFileName, "w",'utf-8')
file.write(self.lwBlockGenus.getSaveString())
file.close()
def onSaveAs(self):
import codecs
filename = QtGui.QFileDialog.getSaveFileName(self, "Save file", "", ".blks(*.blks)")
if(filename == ''): return # User cancel load
block_file = codecs.open(filename, "w",'utf-8')
block_file.write(self.wc.getSaveString())
block_file.close()
def onSaveAll(self):
print('onSaveAll')
def resetWorksapce(self):
self.wc.resetWorkspace();
self.wc.setLangDefFilePath("support\\lang_def.xml")
self.wc.loadFreshWorkspace();
self.wc.initWorkspacePanel()
self.saveFilePath = None;
self.saveFileName = "untitled";
self.workspaceEmpty = True;
|
|
import socket
from typing import Any, BinaryIO, Dict, Iterable, List, Optional, Tuple, Union
__version__ = '2.0.0'
Address = Union[Tuple[str, int], str]
Body = Union[bytes, str]
Stats = Dict[str, Union[str, int]]
DEFAULT_TUBE = 'default'
DEFAULT_PRIORITY = 2**16
DEFAULT_DELAY = 0
DEFAULT_TTR = 60
class Job:
"""A job returned from the server."""
__slots__ = ('id', 'body')
def __init__(self, id: int, body: Body) -> None:
self.id = id
self.body = body
JobOrID = Union[Job, int]
class Error(Exception):
"""Base class for non-connection related exceptions. Connection related
issues use the built-in ``ConnectionError``.
"""
class UnknownResponseError(Error):
"""The server sent a response that this client does not understand."""
def __init__(self, status: bytes, values: List[bytes]) -> None:
self.status = status
self.values = values
class BeanstalkdError(Error):
"""Base class for error messages returned from the server."""
class BadFormatError(BeanstalkdError):
"""The client sent a malformed command."""
class BuriedError(BeanstalkdError):
"""The server ran out of memory trying to grow the priority queue and had to
bury the job.
"""
def __init__(self, values: List[bytes] = None) -> None:
if values:
self.id = int(values[0]) # type: Optional[int]
else:
self.id = None
class DeadlineSoonError(BeanstalkdError):
"""The client has a reserved job timing out within the next second."""
class DrainingError(BeanstalkdError):
"""The client tried to insert a job while the server was in drain mode."""
class ExpectedCrlfError(BeanstalkdError):
"""The client sent a job body without a trailing CRLF."""
class InternalError(BeanstalkdError):
"""The server detected an internal error."""
class JobTooBigError(BeanstalkdError):
"""The client attempted to insert a job larger than ``max-job-size``."""
class NotFoundError(BeanstalkdError):
"""For the delete, release, bury, and kick commands, it means that the job
does not exist or is not reserved by the client.
For the peek commands, it means the requested job does not exist or that
there are no jobs in the requested state.
"""
class NotIgnoredError(BeanstalkdError):
"""The client attempted to ignore the only tube on its watch list."""
class OutOfMemoryError(BeanstalkdError):
"""The server could not allocate enough memory for a job."""
class TimedOutError(BeanstalkdError):
"""A job could not be reserved within the specified timeout."""
class UnknownCommandError(BeanstalkdError):
"""The client sent a command that the server does not understand."""
ERROR_RESPONSES = {
b'BAD_FORMAT': BadFormatError,
b'BURIED': BuriedError,
b'DEADLINE_SOON': DeadlineSoonError,
b'DRAINING': DrainingError,
b'EXPECTED_CRLF': ExpectedCrlfError,
b'INTERNAL_ERROR': InternalError,
b'JOB_TOO_BIG': JobTooBigError,
b'NOT_FOUND': NotFoundError,
b'NOT_IGNORED': NotIgnoredError,
b'OUT_OF_MEMORY': OutOfMemoryError,
b'TIMED_OUT': TimedOutError,
b'UNKNOWN_COMMAND': UnknownCommandError,
}
class Client:
"""A client implementing the beanstalk protocol. Upon creation a connection
with beanstalkd is established and tubes are initialized.
:param address: A socket address pair (host, port) or a Unix domain socket path.
:param encoding: The encoding used to encode and decode job bodies.
:param use: The tube to use after connecting.
:param watch: The tubes to watch after connecting. The ``default`` tube will
be ignored if it's not included.
"""
def __init__(self,
address: Address,
encoding: Optional[str] = 'utf-8',
use: str = DEFAULT_TUBE,
watch: Union[str, Iterable[str]] = DEFAULT_TUBE) -> None:
if isinstance(address, str):
self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._sock.connect(address)
else:
self._sock = socket.create_connection(address)
self._reader = self._sock.makefile('rb') # type: BinaryIO
self.encoding = encoding
if use != DEFAULT_TUBE:
self.use(use)
if isinstance(watch, str):
if watch != DEFAULT_TUBE:
self.watch(watch)
self.ignore(DEFAULT_TUBE)
else:
for tube in watch:
self.watch(tube)
if DEFAULT_TUBE not in watch:
self.ignore(DEFAULT_TUBE)
def __enter__(self) -> 'Client':
return self
def __exit__(self, *args: Any) -> None:
self.close()
def close(self) -> None:
"""Closes the connection to beanstalkd. The client instance should not
be used after calling this method."""
self._reader.close()
self._sock.close()
def _send_cmd(self, cmd: bytes, expected: bytes) -> List[bytes]:
self._sock.sendall(cmd + b'\r\n')
line = self._reader.readline()
return _parse_response(line, expected)
def _read_chunk(self, size: int) -> bytes:
data = self._reader.read(size + 2)
return _parse_chunk(data, size)
def _int_cmd(self, cmd: bytes, expected: bytes) -> int:
n, = self._send_cmd(cmd, expected)
return int(n)
def _job_cmd(self, cmd: bytes, expected: bytes) -> Job:
id, size = (int(n) for n in self._send_cmd(cmd, expected))
chunk = self._read_chunk(size)
if self.encoding is None:
body = chunk # type: Body
else:
body = chunk.decode(self.encoding)
return Job(id, body)
def _peek_cmd(self, cmd: bytes) -> Job:
return self._job_cmd(cmd, b'FOUND')
def _stats_cmd(self, cmd: bytes) -> Stats:
size = self._int_cmd(cmd, b'OK')
chunk = self._read_chunk(size)
return _parse_simple_yaml(chunk)
def _list_cmd(self, cmd: bytes) -> List[str]:
size = self._int_cmd(cmd, b'OK')
chunk = self._read_chunk(size)
return _parse_simple_yaml_list(chunk)
def put(self,
body: Body,
priority: int = DEFAULT_PRIORITY,
delay: int = DEFAULT_DELAY,
ttr: int = DEFAULT_TTR) -> int:
"""Inserts a job into the currently used tube and returns the job ID.
:param body: The data representing the job.
:param priority: An integer between 0 and 4,294,967,295 where 0 is the
most urgent.
:param delay: The number of seconds to delay the job for.
:param ttr: The maximum number of seconds the job can be reserved for
before timing out.
"""
if isinstance(body, str):
if self.encoding is None:
raise TypeError("Unable to encode string with no encoding set")
body = body.encode(self.encoding)
cmd = b'put %d %d %d %d\r\n%b' % (priority, delay, ttr, len(body), body)
return self._int_cmd(cmd, b'INSERTED')
def use(self, tube: str) -> None:
"""Changes the currently used tube.
:param tube: The tube to use.
"""
self._send_cmd(b'use %b' % tube.encode('ascii'), b'USING')
def reserve(self, timeout: Optional[int] = None) -> Job:
"""Reserves a job from a tube on the watch list, giving this client
exclusive access to it for the TTR. Returns the reserved job.
This blocks until a job is reserved unless a ``timeout`` is given,
which will raise a :class:`TimedOutError <greenstalk.TimedOutError>` if
a job cannot be reserved within that time.
:param timeout: The maximum number of seconds to wait.
"""
if timeout is None:
cmd = b'reserve'
else:
cmd = b'reserve-with-timeout %d' % timeout
return self._job_cmd(cmd, b'RESERVED')
def reserve_job(self, id: int) -> Job:
"""Reserves a job by ID, giving this client exclusive access to it for
the TTR. Returns the reserved job.
A :class:`NotFoundError <greenstalk.NotFoundError>` is raised if a job
with the specified ID could not be reserved.
:param id: The ID of the job to reserve.
"""
return self._job_cmd(b'reserve-job %d' % id, b'RESERVED')
def delete(self, job: JobOrID) -> None:
"""Deletes a job.
:param job: The job or job ID to delete.
"""
self._send_cmd(b'delete %d' % _to_id(job), b'DELETED')
def release(self,
job: Job,
priority: int = DEFAULT_PRIORITY,
delay: int = DEFAULT_DELAY) -> None:
"""Releases a reserved job.
:param job: The job to release.
:param priority: An integer between 0 and 4,294,967,295 where 0 is the
most urgent.
:param delay: The number of seconds to delay the job for.
"""
self._send_cmd(b'release %d %d %d' % (job.id, priority, delay), b'RELEASED')
def bury(self, job: Job, priority: int = DEFAULT_PRIORITY) -> None:
"""Buries a reserved job.
:param job: The job to bury.
:param priority: An integer between 0 and 4,294,967,295 where 0 is the
most urgent.
"""
self._send_cmd(b'bury %d %d' % (job.id, priority), b'BURIED')
def touch(self, job: Job) -> None:
"""Refreshes the TTR of a reserved job.
:param job: The job to touch.
"""
self._send_cmd(b'touch %d' % job.id, b'TOUCHED')
def watch(self, tube: str) -> int:
"""Adds a tube to the watch list. Returns the number of tubes this
client is watching.
:param tube: The tube to watch.
"""
return self._int_cmd(b'watch %b' % tube.encode('ascii'), b'WATCHING')
def ignore(self, tube: str) -> int:
"""Removes a tube from the watch list. Returns the number of tubes this
client is watching.
:param tube: The tube to ignore.
"""
return self._int_cmd(b'ignore %b' % tube.encode('ascii'), b'WATCHING')
def peek(self, id: int) -> Job:
"""Returns a job by ID.
:param id: The ID of the job to peek.
"""
return self._peek_cmd(b'peek %d' % id)
def peek_ready(self) -> Job:
"""Returns the next ready job in the currently used tube."""
return self._peek_cmd(b'peek-ready')
def peek_delayed(self) -> Job:
"""Returns the next available delayed job in the currently used tube."""
return self._peek_cmd(b'peek-delayed')
def peek_buried(self) -> Job:
"""Returns the oldest buried job in the currently used tube."""
return self._peek_cmd(b'peek-buried')
def kick(self, bound: int) -> int:
"""Moves delayed and buried jobs into the ready queue and returns the
number of jobs effected.
Only jobs from the currently used tube are moved.
A kick will only move jobs in a single state. If there are any buried
jobs, only those will be moved. Otherwise delayed jobs will be moved.
:param bound: The maximum number of jobs to kick.
"""
return self._int_cmd(b'kick %d' % bound, b'KICKED')
def kick_job(self, job: JobOrID) -> None:
"""Moves a delayed or buried job into the ready queue.
:param job: The job or job ID to kick.
"""
self._send_cmd(b'kick-job %d' % _to_id(job), b'KICKED')
def stats_job(self, job: JobOrID) -> Stats:
"""Returns job statistics.
:param job: The job or job ID to return statistics for.
"""
return self._stats_cmd(b'stats-job %d' % _to_id(job))
def stats_tube(self, tube: str) -> Stats:
"""Returns tube statistics.
:param tube: The tube to return statistics for.
"""
return self._stats_cmd(b'stats-tube %b' % tube.encode('ascii'))
def stats(self) -> Stats:
"""Returns system statistics."""
return self._stats_cmd(b'stats')
def tubes(self) -> List[str]:
"""Returns a list of all existing tubes."""
return self._list_cmd(b'list-tubes')
def using(self) -> str:
"""Returns the tube currently being used by the client."""
tube, = self._send_cmd(b'list-tube-used', b'USING')
return tube.decode('ascii')
def watching(self) -> List[str]:
"""Returns a list of tubes currently being watched by the client."""
return self._list_cmd(b'list-tubes-watched')
def pause_tube(self, tube: str, delay: int) -> None:
"""Prevents jobs from being reserved from a tube for a period of time.
:param tube: The tube to pause.
:param delay: The number of seconds to pause the tube for.
"""
self._send_cmd(b'pause-tube %b %d' % (tube.encode('ascii'), delay), b'PAUSED')
def _to_id(j: JobOrID) -> int:
return j.id if isinstance(j, Job) else j
def _parse_response(line: bytes, expected: bytes) -> List[bytes]:
if not line:
raise ConnectionError("Unexpected EOF")
assert line[-2:] == b'\r\n'
line = line[:-2]
status, *values = line.split()
if status == expected:
return values
if status in ERROR_RESPONSES:
raise ERROR_RESPONSES[status](values)
raise UnknownResponseError(status, values)
def _parse_chunk(data: bytes, size: int) -> bytes:
assert data[-2:] == b'\r\n'
data = data[:-2]
if len(data) != size:
raise ConnectionError("Unexpected EOF reading chunk")
return data
def _parse_simple_yaml(buf: bytes) -> Stats:
data = buf.decode('ascii')
assert data[:4] == '---\n'
data = data[4:] # strip YAML head
stats = {}
for line in data.splitlines():
key, value = line.split(': ', 1)
try:
v = int(value) # type: Union[int, str]
except ValueError:
v = value
stats[key] = v
return stats
def _parse_simple_yaml_list(buf: bytes) -> List[str]:
data = buf.decode('ascii')
assert data[:4] == '---\n'
data = data[4:] # strip YAML head
list = []
for line in data.splitlines():
assert line.startswith('- ')
list.append(line[2:])
return list
|
|
# -*- mode: python; coding: utf-8 -*-
# Copyright 2017 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Handling quality metrics of HERA data."""
from astropy.time import Time
from math import floor
from sqlalchemy import (Column, Integer, BigInteger, Float, ForeignKey,
String)
from sqlalchemy.ext.hybrid import hybrid_property
from . import MCDeclarativeBase, DEFAULT_GPS_TOL
class AntMetrics(MCDeclarativeBase):
"""
Definition of ant_metrics table.
These are metrics, generally generated by
hera_qm, which are keyed to individual antennas. For example, hera_qm.ant_metrics
will flag individual antennas as bad.
Attributes
----------
obsid : BigInteger Column
observation identification number, generally equal to the floor
of the start time in gps seconds (long integer). ForeignKey into hera_obs table.
ant : Integer Column
Antenna number (int >= 0)
pol : String Column
Polarization ('x', 'y', 'n', or 'e')
metric : String Column
Name of metric
mc_time : BigInteger Column
time metric is reported to M&C in floor(gps seconds)
val : Float Column
Value of metric (double)
"""
__tablename__ = 'ant_metrics'
obsid = Column(BigInteger, ForeignKey('hera_obs.obsid'), primary_key=True)
ant = Column(Integer, primary_key=True)
pol = Column(String, primary_key=True)
metric = Column(String, ForeignKey('metric_list.metric'), primary_key=True)
mc_time = Column(BigInteger, nullable=False)
val = Column(Float, nullable=False)
# tolerances set to 1ms
tols = {'mc_time': DEFAULT_GPS_TOL}
@hybrid_property
def antpol(self):
"""
Get antenna number and pol as a tuple.
Returns
-------
tuple
(ant, pol)
"""
return (self.ant, self.pol)
@classmethod
def create(cls, obsid, ant, pol, metric, db_time, val):
"""
Create a new ant_metric object using Astropy to compute the LST.
Parameters
----------
obsid: long integer
observation identification number.
ant: integer
antenna number
pol: string ('x', 'y', 'n', or 'e')
polarization
metric: string
metric name
db_time: astropy time object
astropy time object based on a timestamp from the database.
Usually generated from MCSession.get_current_db_time()
val: float
value of metric
"""
if not isinstance(obsid, int):
raise ValueError('obsid must be an integer.')
if not isinstance(ant, int):
raise ValueError('antenna must be an integer.')
try:
pol = str(pol)
except ValueError:
raise ValueError('pol must be string "x", "y", "n", or "e".')
pol = pol.lower()
if pol not in ('x', 'y', 'n', 'e'):
raise ValueError('pol must be string "x", "y", "n", or "e".')
if not isinstance(metric, str):
raise ValueError('metric must be string.')
if not isinstance(db_time, Time):
raise ValueError('db_time must be an astropy Time object')
mc_time = floor(db_time.gps)
try:
val = float(val)
except ValueError:
raise ValueError('val must be castable as float.')
return cls(obsid=obsid, ant=ant, pol=pol, metric=metric,
mc_time=mc_time, val=val)
class ArrayMetrics(MCDeclarativeBase):
"""
Definition of array_metrics table.
These are metrics, generally generated by
hera_qm, which are keyed to the overall array. For example, hera_qm.firstcal_metrics
generates an overall decision whether the firstcal solutions were "good."
Attributes
----------
obsid : BigInteger Column
observation identification number, generally equal to the floor
of the start time in gps seconds (long integer). ForeignKey into hera_obs table.
metric : String Column
Name of metric
mc_time : BigInteger Column
time metric is reported to M&C in floor(gps seconds)
val : Float Column
Value of metric (double)
"""
__tablename__ = 'array_metrics'
obsid = Column(BigInteger, ForeignKey('hera_obs.obsid'), primary_key=True)
metric = Column(String, ForeignKey('metric_list.metric'), primary_key=True)
mc_time = Column(BigInteger, nullable=False)
val = Column(Float, nullable=False)
# tolerances set to 1ms
tols = {'mc_time': DEFAULT_GPS_TOL}
@classmethod
def create(cls, obsid, metric, db_time, val):
"""
Create a new array_metric object using Astropy to compute the LST.
Parameters
----------
obsid: long integer
observation identification number.
metric: string
metric name
db_time: astropy time object
astropy time object based on a timestamp from the database.
Usually generated from MCSession.get_current_db_time()
val: float
value of metric
"""
if not isinstance(obsid, int):
raise ValueError('obsid must be an integer.')
if not isinstance(metric, str):
raise ValueError('metric must be string.')
if not isinstance(db_time, Time):
raise ValueError('db_time must be an astropy Time object')
mc_time = floor(db_time.gps)
try:
val = float(val)
except ValueError:
raise ValueError('val must be castable as float.')
return cls(obsid=obsid, metric=metric, mc_time=mc_time, val=val)
class MetricList(MCDeclarativeBase):
"""
Definition of metric_list table, which provides descriptions of metrics.
Attributes
----------
metric : String Column
Name of metric
desc : String Column
Description of metric
"""
__tablename__ = 'metric_list'
metric = Column(String, primary_key=True)
desc = Column(String, nullable=False)
@classmethod
def create(cls, metric, desc):
"""
Create a new ant_metric object using Astropy to compute the LST.
Parameters
----------
metric: string
metric name
desc: string
description of metric
"""
if not isinstance(metric, str):
raise ValueError('metric must be string.')
if not isinstance(desc, str):
raise ValueError('metric description must be a string.')
return cls(metric=metric, desc=desc)
|
|
__author__ = 'isparks'
from lxml import etree
MEDI_NS = '{http://www.mdsol.com/ns/odm/metadata}'
ODM_NS = '{http://www.cdisc.org/ns/odm/v1.3}'
XLINK_NS = '{http://www.w3.org/1999/xlink}'
def getEnvironmentFromNameAndProtocol(studyname, protocolname):
"""Extract environment name using studyname and protocolname to guide"""
# StudyName = "TEST (1) (DEV)"
# ProtocolName = "TEST (1)"
# Raw Env = "(DEV)"
raw_env = studyname[len(protocolname):].strip()
if '(' in raw_env:
l_brace_pos = raw_env.rfind('(')
r_brace_pos = raw_env.rfind(')')
return raw_env[l_brace_pos + 1:r_brace_pos]
else:
return raw_env
def parseXMLString(xml):
"""Parse XML string, return root"""
# Remove BOM if it exists (different requests seem to have different BOMs)
unichr_captured = ''
if not xml.strip():
return u''
while xml[0] != u'<':
unichr_captured += xml[0]
xml = xml[1:]
parser = etree.XMLParser(ns_clean=True, collect_ids=False, huge_tree=True)
try:
return etree.fromstring(xml.encode('utf-8'), parser=parser)
except etree.XMLSyntaxError:
raise Exception(xml)
class RWSException(Exception):
"""RWS Exception. Usual to attach the error response object"""
def __init__(self, msg, rws_error):
Exception.__init__(self, msg)
self.rws_error = rws_error
class XMLRepr(object):
"""Classes that represent objects passed back from RWS as XML"""
def __init__(self, xml):
self.root = parseXMLString(xml)
def __str__(self):
"""String representation of same"""
return etree.tostring(self.root, encoding='utf-8').decode('utf-8')
class ODMDoc(XMLRepr):
"""A base ODM document"""
def __init__(self, xml):
# Call base class
XMLRepr.__init__(self, xml)
r_get = self.root.get
self.filetype = r_get('FileType')
self.creationdatetime = r_get('CreationDateTime')
self.fileoid = r_get("FileOID")
self.ODMVersion = r_get("ODMVersion")
self.granularity = r_get("Granularity", None)
class RWSError(ODMDoc):
"""
Extends ODMDoc, inheriting attributes like filetype, creationdatetime etc.
Parses XML of the form::
<?xml version="1.0" encoding="utf-8"?>
<ODM xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata"
FileType="Snapshot"
CreationDateTime="2013-04-08T10:28:49.578-00:00"
FileOID="4d13722a-ceb6-4419-a917-b6ad5d0bc30e"
ODMVersion="1.3"
mdsol:ErrorDescription="Incorrect login and password combination. [RWS00008]"
xmlns="http://www.cdisc.org/ns/odm/v1.3" />
"""
def __init__(self, xml):
ODMDoc.__init__(self, xml)
r_get = self.root.get
self.errordescription = r_get(MEDI_NS + "ErrorDescription")
class RWSErrorResponse(XMLRepr):
"""
Parses messages of the form::
<Response
ReferenceNumber="0b47fe86-542f-4070-9e7d-16396a5ef08a"
InboundODMFileOID="Not Supplied"
IsTransactionSuccessful="0"
ReasonCode="RWS00092"
ErrorClientResponseMessage="CRF version not found">
</Response>
"""
def __init__(self, xml):
# Call base class
XMLRepr.__init__(self, xml)
r_get = self.root.get
self.referencenumber = r_get("ReferenceNumber")
self.inboundodmfileoid = r_get('InboundODMFileOID')
self.istransactionsuccessful = r_get('IsTransactionSuccessful') == "1"
self.reasoncode = r_get("ReasonCode")
self.errordescription = r_get("ErrorClientResponseMessage")
class RWSResponse(XMLRepr):
"""
Parses messages of the form::
<Response ReferenceNumber="82e942b0-48e8-4cf4-b299-51e2b6a89a1b"
InboundODMFileOID=""
IsTransactionSuccessful="1"
SuccessStatistics="Rave objects touched: Subjects=0; Folders=0; Forms=0; Fields=0; LogLines=0" NewRecords="">
</Response>
"""
def __init__(self, xml):
# Call base class
XMLRepr.__init__(self, xml)
r_get = self.root.get
self.referencenumber = r_get("ReferenceNumber")
self.inboundodmfileoid = r_get('InboundODMFileOID')
self.istransactionsuccessful = r_get('IsTransactionSuccessful') == "1"
self.subjects_touched = 0
self.folders_touched = 0
self.forms_touched = 0
self.fields_touched = 0
self.loglines_touched = 0
success_stats = r_get('SuccessStatistics', '')
# Clinical data post
if success_stats.startswith('Rave objects touched:'):
success_stats = success_stats[
len('Rave objects touched:') + 1:] # Subjects=0; Folders=0; Forms=0; Fields=0; LogLines=0
parts = success_stats.split(';') # [Subjects=0, Folders=0, Forms=0, Fields=0, LogLines=0]
for part in parts:
name, value = part.strip().split('=')
# if value[-1] == ';':
# value = value[:-1]
if name == 'Subjects':
self.subjects_touched = int(value)
elif name == 'Folders':
self.folders_touched = int(value)
elif name == 'Forms':
self.forms_touched = int(value)
elif name == 'Fields':
self.fields_touched = int(value)
elif name == 'LogLines':
self.loglines_touched = int(value)
else:
raise KeyError('Unknown Rave Object %s in response %s' % (name, success_stats,))
# Note: Metadata post has success_stats == 'N/A'
self.new_records = r_get("NewRecords")
class RWSPostResponse(RWSResponse):
"""
Parses responses from PostODMClinicalData messages with the format::
<Response ReferenceNumber="82e942b0-48e8-4cf4-b299-51e2b6a89a1b"
InboundODMFileOID=""
IsTransactionSuccessful="1"
SuccessStatistics="Rave objects touched: Subjects=0; Folders=0; Forms=0; Fields=0; LogLines=0" NewRecords=""
SubjectNumberInStudy="1103" SubjectNumberInStudySite="55">
</Response>
"""
def __init__(self, xml):
# Call base class
RWSResponse.__init__(self, xml)
r_get = self.root.get
# These counts may only come as a result of a Clinical data POST
snis = r_get('SubjectNumberInStudy')
self.subjects_in_study = int(snis) if snis is not None else None
sniss = r_get('SubjectNumberInStudySite')
self.subjects_in_study_site = int(sniss) if sniss is not None else None
# DraftImported only comes from a MetaData Post
# In which case successStatistics will be SuccessStatistics="N/A"
self.draft_imported = r_get("DraftImported")
class RWSPostErrorResponse(RWSResponse):
"""Responses to Clinical data post messages have additional Attributes to normal RWS Response messages::
<Response
ReferenceNumber="5b1fa9a3-0cf3-46b6-8304-37c2e3b7d04f"
InboundODMFileOID="1"
IsTransactionSuccessful = "0"
ReasonCode="RWS00024"
ErrorOriginLocation="/ODM/ClinicalData[1]/SubjectData[1]"
SuccessStatistics="Rave objects touched: Subjects=0; Folders=0; Forms=0; Fields=0; LogLines=0"
ErrorClientResponseMessage="Subject already exists.">
</Response>
"""
def __init__(self, xml):
RWSResponse.__init__(self, xml)
# Get additional properties
r_get = self.root.get
self.reason_code = r_get('ReasonCode')
self.error_origin_location = r_get('ErrorOriginLocation')
self.error_client_response_message = r_get('ErrorClientResponseMessage')
class RWSStudyListItem(object):
"""An item in the RWS Study List response"""
def __init__(self):
self.oid = None
self.studyname = None
self.protocolname = None
self.environment = None
self.projecttype = None
def isProd(self):
"""Is production if environment is empty"""
return self.environment == '' and self.projecttype != 'GlobalLibraryVolume'
@classmethod
def fromElement(cls, elem):
"""Read properties from an XML Element
<Study OID="Fixitol(Dev)" mdsol:ProjectType="GlobalLibraryVolume">
<GlobalVariables>
<StudyName>Fixitol (Dev)</StudyName>
<StudyDescription/>
<ProtocolName>Fixitol</ProtocolName>
</GlobalVariables>
</Study>
"""
self = cls()
self.oid = elem.get('OID')
# Not all returned documents have a projecttype (GlobalLibraryVolumes do)
self.projecttype = elem.get(MEDI_NS + "ProjectType", "Project")
e_global_variables = elem.find(ODM_NS + 'GlobalVariables')
self.studyname = e_global_variables.find(ODM_NS + 'StudyName').text
self.protocolname = e_global_variables.find(ODM_NS + 'ProtocolName').text
self.environment = getEnvironmentFromNameAndProtocol(self.studyname, self.protocolname)
return self
# I hate multi-inheritance generally but since this is inheriting from a built-in like list I feel
# less bad about it.
class RWSStudies(list, ODMDoc):
"""
Represents a list of studies. Extends the list class and adds a couple of extra properties::
<ODM FileType="Snapshot" FileOID="767a1f8b-7b72-4d12-adbe-37d4d62ba75e"
CreationDateTime="2013-04-08T10:02:17.781-00:00"
ODMVersion="1.3"
xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns="http://www.cdisc.org/ns/odm/v1.3">
<Study OID="Fixitol(Dev)">
<GlobalVariables>
<StudyName>Fixitol (Dev)</StudyName>
<StudyDescription/>
<ProtocolName>Fixitol</ProtocolName>
</GlobalVariables>
</Study>
<Study OID="IANTEST(Prod)">
<GlobalVariables>
<StudyName>IANTEST</StudyName>
<StudyDescription/>
<ProtocolName>IANTEST</ProtocolName>
</GlobalVariables>
</Study>
</ODM>
"""
def __init__(self, xml):
# Get basic properties
ODMDoc.__init__(self, xml)
for estudy in self.root.findall(ODM_NS + 'Study'):
self.append(RWSStudyListItem.fromElement(estudy))
class MetaDataVersion(object):
"""
<MetaDataVersion OID="1203" Name="Webservice Outbound"/>
"""
def __init__(self):
self.oid = None
self.name = None
@classmethod
def fromElement(cls, elem):
"""Read properties from an XML Element
<MetaDataVersion OID="1203" Name="Webservice Outbound"/>
"""
self = cls()
self.oid = elem.get('OID')
self.name = elem.get('Name')
return self
class RWSStudyMetadataVersions(list, ODMDoc, RWSStudyListItem):
"""
Parses responses from MetaDataVersions request::
<ODM ODMVersion="1.3" Granularity="Metadata" FileType="Snapshot" FileOID="d26b4d33-376d-4037-9747-684411190179" CreationDateTime=" 2013-04-08T01:29:13 " xmlns="http://www.cdisc.org/ns/odm/v1.3" xmlns:mdsol="http://www.mdsol.com/ns/odm/metadata">
<Study OID="IANTEST">
<GlobalVariables>
<StudyName>IANTEST</StudyName>
<StudyDescription></StudyDescription>
<ProtocolName>IANTEST</ProtocolName>
</GlobalVariables>
<MetaDataVersion OID="1203" Name="Webservice Outbound" />
<MetaDataVersion OID="1195" Name="JC_Demo_Draft1" />
<MetaDataVersion OID="1165" Name="Initial" />
</Study>
</ODM>
"""
def __init__(self, xml):
# Get basic properties
ODMDoc.__init__(self, xml)
root = self.root # from ODMDoc
e_study = root.find(ODM_NS + 'Study')
# Quick way to grab the elements here, nasty though
self.study = RWSStudyListItem.fromElement(e_study)
for e_version in e_study.findall(ODM_NS + 'MetaDataVersion'):
self.append(MetaDataVersion.fromElement(e_version))
class RWSSubjectListItem(object):
"""
Parses response of Subject List request::
<ClinicalData StudyOID="Fixitol(Dev)" MetaDataVersionOID="1111">
<SubjectData SubjectKey="1111">
<SiteRef LocationOID="335566"/>
</SubjectData>
</ClinicalData>
Optionally ClinicalData may include status::
<SubjectData SubjectKey="1111" mdsol:Overdue="No"
mdsol:Touched="Yes"
mdsol:Empty="No"
mdsol:Incomplete="No"
mdsol:NonConformant="No"
mdsol:RequiresSecondPass="No"
mdsol:RequiresReconciliation="No"
mdsol:RequiresVerification="No"
mdsol:Verified="No"
mdsol:Frozen="No"
mdsol:Locked="No"
mdsol:RequiresReview="No"
mdsol:PendingReview="No"
mdsol:Reviewed="No"
mdsol:RequiresAnswerQuery="No"
mdsol:RequiresPendingCloseQuery="No"
mdsol:RequiresCloseQuery="No"
mdsol:StickyPlaced="No"
mdsol:Signed="No"
mdsol:SignatureCurrent="No"
mdsol:RequiresTranslation="No"
mdsol:RequiresCoding="No"
mdsol:RequiresPendingAnswerQuery="No"
mdsol:RequiresSignature="No"
mdsol:ReadyForFreeze="No"
mdsol:ReadyForLock="Yes">
The SubjectKey can be either a Subject ID or a UUID depending on the value of SubjectKeyType::
<ClinicalData StudyOID="Fixitol(Dev)" MetaDataVersionOID="1111">
<SubjectData SubjectKey="EC82F1AB-D463-4930-841D-36FC865E63B2" mdsol:SubjectName="1" mdsol:SubjectKeyType="SubjectUUID">
<SiteRef LocationOID="335566"/>
</SubjectData>
</ClinicalData>
May include links::
<ClinicalData StudyOID="Fixitol(Dev)" MetaDataVersionOID="1111">
<SubjectData SubjectKey="1111">
<SiteRef LocationOID="335566"/>
<mdsol:Link xlink:type="simple" xlink:href="http://innovate.mdsol.com/MedidataRAVE/HandleLink.aspx?page=SubjectPage.aspx?ID=849" />
</SubjectData>
</ClinicalData>
"""
STATUS_PROPERTIES = ["Overdue",
"Touched",
"Empty",
"Incomplete",
"NonConformant",
"RequiresSecondPass",
"RequiresReconciliation",
"RequiresVerification",
"Verified",
"Frozen",
"Locked",
"RequiresReview",
"PendingReview",
"Reviewed",
"RequiresAnswerQuery",
"RequiresPendingCloseQuery",
"RequiresCloseQuery",
"StickyPlaced",
"Signed",
"SignatureCurrent",
"RequiresTranslation",
"RequiresCoding",
"RequiresPendingAnswerQuery",
"RequiresSignature",
"ReadyForFreeze",
"ReadyForLock",
"SubjectKeyType",
"SubjectName"]
def __init__(self):
"""The ODM message has a ClinicalData element with a single SubjectData and SiteRef elements
nested within. I collapse into a single object
"""
self.studyoid = None
self.metadataversionoid = None
self.subjectkey = None
self.subjectkeytype = None
self.locationoid = None
self.active = None # SubjectActive
self.deleted = None # Deleted
self.links = [] # Link if requested
# Optional properties, only if status included
for prop in RWSSubjectListItem.STATUS_PROPERTIES:
setattr(self, prop.lower(), None)
@property
def subject_name(self):
"""
Get the subject name consistently - if the SubjectKeyType is SubjectUUID
then the subject name lives in the mdsol:SubjectName attribute
:return: The Subject ID for the subject
:rtype
"""
if self.subjectkeytype and self.subjectkeytype == "SubjectUUID".lower():
# if the SubjectKeyType is "SubjectUUID", then return the SubjectName
return self.subjectname
else:
return self.subjectkey
@classmethod
def fromElement(cls, elem):
"""
Read properties from an XML Element
"""
self = cls()
self.studyoid = elem.get('StudyOID')
self.metadataversionoid = elem.get('MetaDataVersionOID')
e_subjectdata = elem.findall(ODM_NS + 'SubjectData')[0]
self.subjectkey = e_subjectdata.get('SubjectKey')
e_siteref = e_subjectdata.findall(ODM_NS + 'SiteRef')[0]
self.locationoid = e_siteref.get('LocationOID')
e_links = e_subjectdata.findall(MEDI_NS + 'Link')
for e_link in e_links:
self.links.append(e_link.get(XLINK_NS + 'href'))
decodes = {'yes': True, 'no': False, '': None}
for prop in RWSSubjectListItem.STATUS_PROPERTIES:
val = e_subjectdata.get(MEDI_NS + prop, "").lower()
setattr(self, prop.lower(), decodes.get(val, val))
# By default we only get back active and non-deleted subjects
self.active = decodes[e_subjectdata.get(MEDI_NS + "SubjectActive", "yes").lower()]
self.deleted = decodes[e_subjectdata.get(MEDI_NS + "Deleted", "no").lower()]
return self
class RWSSubjects(list, ODMDoc):
"""
Represents a list of subjects::
<ODM FileType="Snapshot"
FileOID="770f1758-db33-4ab2-af72-38db863734aa"
CreationDateTime="2013-04-08T14:08:06.875-00:00"
ODMVersion="1.3">
<ClinicalData StudyOID="Fixitol(Dev)" MetaDataVersionOID="1111">
<SubjectData SubjectKey="000001">
<SiteRef LocationOID="BP001"/>
<mdsol:Link xlink:type="simple" xlink:href="http://innovate.mdsol.com/MedidataRAVE/HandleLink.aspx?page=SubjectPage.aspx?ID=849" />
</SubjectData>
</ClinicalData>
<ClinicalData StudyOID="Fixitol(Dev)" MetaDataVersionOID="1111">
<SubjectData SubjectKey="1111">
<SiteRef LocationOID="335566"/>
<mdsol:Link xlink:type="simple" xlink:href="http://innovate.mdsol.com/MedidataRAVE/HandleLink.aspx?page=SubjectPage.aspx?ID=849" />
</SubjectData>
</ClinicalData>
</ODM>
"""
def __init__(self, xml):
# Get basic properties
ODMDoc.__init__(self, xml)
root = self.root # from ODMDoc
for e_clindata in root.findall(ODM_NS + 'ClinicalData'):
self.append(RWSSubjectListItem.fromElement(e_clindata))
|
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 TurboCoin
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BIP68 implementation."""
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, ToHex
from test_framework.script import CScript
from test_framework.test_framework import TurbocoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
get_bip9_status,
satoshi_round,
)
SEQUENCE_LOCKTIME_DISABLE_FLAG = (1<<31)
SEQUENCE_LOCKTIME_TYPE_FLAG = (1<<22) # this means use time (0 means height)
SEQUENCE_LOCKTIME_GRANULARITY = 9 # this is a bit-shift
SEQUENCE_LOCKTIME_MASK = 0x0000ffff
# RPC error for non-BIP68 final transactions
NOT_FINAL_ERROR = "non-BIP68-final (code 64)"
class BIP68Test(TurbocoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [[], ["-acceptnonstdtxn=0"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.relayfee = self.nodes[0].getnetworkinfo()["relayfee"]
# Generate some coins
self.nodes[0].generate(110)
self.log.info("Running test disable flag")
self.test_disable_flag()
self.log.info("Running test sequence-lock-confirmed-inputs")
self.test_sequence_lock_confirmed_inputs()
self.log.info("Running test sequence-lock-unconfirmed-inputs")
self.test_sequence_lock_unconfirmed_inputs()
self.log.info("Running test BIP68 not consensus before versionbits activation")
self.test_bip68_not_consensus()
self.log.info("Activating BIP68 (and 112/113)")
self.activateCSV()
self.log.info("Verifying nVersion=2 transactions are standard.")
self.log.info("Note that nVersion=2 transactions are always standard (independent of BIP68 activation status).")
self.test_version2_relay()
self.log.info("Passed")
# Test that BIP68 is not in effect if tx version is 1, or if
# the first sequence bit is set.
def test_disable_flag(self):
# Create some unconfirmed inputs
new_addr = self.nodes[0].getnewaddress()
self.nodes[0].sendtoaddress(new_addr, 2) # send 2 TURBO
utxos = self.nodes[0].listunspent(0, 0)
assert len(utxos) > 0
utxo = utxos[0]
tx1 = CTransaction()
value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN)
# Check that the disable flag disables relative locktime.
# If sequence locks were used, this would require 1 block for the
# input to mature.
sequence_value = SEQUENCE_LOCKTIME_DISABLE_FLAG | 1
tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)]
tx1.vout = [CTxOut(value, CScript([b'a']))]
tx1_signed = self.nodes[0].signrawtransactionwithwallet(ToHex(tx1))["hex"]
tx1_id = self.nodes[0].sendrawtransaction(tx1_signed)
tx1_id = int(tx1_id, 16)
# This transaction will enable sequence-locks, so this transaction should
# fail
tx2 = CTransaction()
tx2.nVersion = 2
sequence_value = sequence_value & 0x7fffffff
tx2.vin = [CTxIn(COutPoint(tx1_id, 0), nSequence=sequence_value)]
tx2.vout = [CTxOut(int(value - self.relayfee * COIN), CScript([b'a' * 35]))]
tx2.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2))
# Setting the version back down to 1 should disable the sequence lock,
# so this should be accepted.
tx2.nVersion = 1
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Calculate the median time past of a prior block ("confirmations" before
# the current tip).
def get_median_time_past(self, confirmations):
block_hash = self.nodes[0].getblockhash(self.nodes[0].getblockcount()-confirmations)
return self.nodes[0].getblockheader(block_hash)["mediantime"]
# Test that sequence locks are respected for transactions spending confirmed inputs.
def test_sequence_lock_confirmed_inputs(self):
# Create lots of confirmed utxos, and use them to generate lots of random
# transactions.
max_outputs = 50
addresses = []
while len(addresses) < max_outputs:
addresses.append(self.nodes[0].getnewaddress())
while len(self.nodes[0].listunspent()) < 200:
import random
random.shuffle(addresses)
num_outputs = random.randint(1, max_outputs)
outputs = {}
for i in range(num_outputs):
outputs[addresses[i]] = random.randint(1, 20)*0.01
self.nodes[0].sendmany("", outputs)
self.nodes[0].generate(1)
utxos = self.nodes[0].listunspent()
# Try creating a lot of random transactions.
# Each time, choose a random number of inputs, and randomly set
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
for i in range(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
# Track whether any sequence locks used should fail
should_pass = True
# Track whether this transaction was built with sequence locks
using_sequence_locks = False
tx = CTransaction()
tx.nVersion = 2
value = 0
for j in range(num_inputs):
sequence_value = 0xfffffffe # this disables sequence locks
# 50% chance we enable sequence locks
if random.randint(0,1):
using_sequence_locks = True
# 10% of the time, make the input sequence value pass
input_will_pass = (random.randint(1,10) == 1)
sequence_value = utxos[j]["confirmations"]
if not input_will_pass:
sequence_value += 1
should_pass = False
# Figure out what the median-time-past was for the confirmed input
# Note that if an input has N confirmations, we're going back N blocks
# from the tip so that we're looking up MTP of the block
# PRIOR to the one the input appears in, as per the BIP68 spec.
orig_time = self.get_median_time_past(utxos[j]["confirmations"])
cur_time = self.get_median_time_past(0) # MTP of the tip
# can only timelock this input if it's not too old -- otherwise use height
can_time_lock = True
if ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY) >= SEQUENCE_LOCKTIME_MASK:
can_time_lock = False
# if time-lockable, then 50% chance we make this a time lock
if random.randint(0,1) and can_time_lock:
# Find first time-lock value that fails, or latest one that succeeds
time_delta = sequence_value << SEQUENCE_LOCKTIME_GRANULARITY
if input_will_pass and time_delta > cur_time - orig_time:
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)
elif (not input_will_pass and time_delta <= cur_time - orig_time):
sequence_value = ((cur_time - orig_time) >> SEQUENCE_LOCKTIME_GRANULARITY)+1
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value))
value += utxos[j]["amount"]*COIN
# Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output
tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50
tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), CScript([b'a'])))
rawtx = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))["hex"]
if (using_sequence_locks and not should_pass):
# This transaction should be rejected
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, rawtx)
else:
# This raw transaction should be accepted
self.nodes[0].sendrawtransaction(rawtx)
utxos = self.nodes[0].listunspent()
# Test that sequence locks on unconfirmed inputs must have nSequence
# height or time of 0 to be accepted.
# Then test that BIP68-invalid transactions are removed from the mempool
# after a reorg.
def test_sequence_lock_unconfirmed_inputs(self):
# Store height so we can easily reset the chain at the end of the test
cur_height = self.nodes[0].getblockcount()
# Create a mempool tx.
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Anyone-can-spend mempool tx.
# Sequence lock of 0 should pass.
tx2 = CTransaction()
tx2.nVersion = 2
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(tx2_raw)
# Create a spend of the 0th output of orig_tx with a sequence lock
# of 1, and test what happens when submitting.
# orig_tx.vout[0] must be an anyone-can-spend output
def test_nonzero_locks(orig_tx, node, relayfee, use_height_lock):
sequence_value = 1
if not use_height_lock:
sequence_value |= SEQUENCE_LOCKTIME_TYPE_FLAG
tx = CTransaction()
tx.nVersion = 2
tx.vin = [CTxIn(COutPoint(orig_tx.sha256, 0), nSequence=sequence_value)]
tx.vout = [CTxOut(int(orig_tx.vout[0].nValue - relayfee * COIN), CScript([b'a' * 35]))]
tx.rehash()
if (orig_tx.hash in node.getrawmempool()):
# sendrawtransaction should fail if the tx is in the mempool
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx))
else:
# sendrawtransaction should succeed if the tx is not in the mempool
node.sendrawtransaction(ToHex(tx))
return tx
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Now mine some blocks, but make sure tx2 doesn't get mined.
# Use prioritisetransaction to lower the effective feerate to 0
self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(-self.relayfee*COIN))
cur_time = int(time.time())
for i in range(10):
self.nodes[0].setmocktime(cur_time + 600)
self.nodes[0].generate(1)
cur_time += 600
assert tx2.hash in self.nodes[0].getrawmempool()
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=True)
test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
# Mine tx2, and then try again
self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(self.relayfee*COIN))
# Advance the time on the node so that we can test timelocks
self.nodes[0].setmocktime(cur_time+600)
self.nodes[0].generate(1)
assert tx2.hash not in self.nodes[0].getrawmempool()
# Now that tx2 is not in the mempool, a sequence locked spend should
# succeed
tx3 = test_nonzero_locks(tx2, self.nodes[0], self.relayfee, use_height_lock=False)
assert tx3.hash in self.nodes[0].getrawmempool()
self.nodes[0].generate(1)
assert tx3.hash not in self.nodes[0].getrawmempool()
# One more test, this time using height locks
tx4 = test_nonzero_locks(tx3, self.nodes[0], self.relayfee, use_height_lock=True)
assert tx4.hash in self.nodes[0].getrawmempool()
# Now try combining confirmed and unconfirmed inputs
tx5 = test_nonzero_locks(tx4, self.nodes[0], self.relayfee, use_height_lock=True)
assert tx5.hash not in self.nodes[0].getrawmempool()
utxos = self.nodes[0].listunspent()
tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1))
tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN)
raw_tx5 = self.nodes[0].signrawtransactionwithwallet(ToHex(tx5))["hex"]
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5)
# Test mempool-BIP68 consistency after reorg
#
# State of the transactions in the last blocks:
# ... -> [ tx2 ] -> [ tx3 ]
# tip-1 tip
# And currently tx4 is in the mempool.
#
# If we invalidate the tip, tx3 should get added to the mempool, causing
# tx4 to be removed (fails sequence-lock).
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
assert tx4.hash not in self.nodes[0].getrawmempool()
assert tx3.hash in self.nodes[0].getrawmempool()
# Now mine 2 empty blocks to reorg out the current tip (labeled tip-1 in
# diagram above).
# This would cause tx2 to be added back to the mempool, which in turn causes
# tx3 to be removed.
tip = int(self.nodes[0].getblockhash(self.nodes[0].getblockcount()-1), 16)
height = self.nodes[0].getblockcount()
for i in range(2):
block = create_block(tip, create_coinbase(height), cur_time)
block.nVersion = 3
block.rehash()
block.solve()
tip = block.sha256
height += 1
self.nodes[0].submitblock(ToHex(block))
cur_time += 1
mempool = self.nodes[0].getrawmempool()
assert tx3.hash not in mempool
assert tx2.hash in mempool
# Reset the chain and get rid of the mocktimed-blocks
self.nodes[0].setmocktime(0)
self.nodes[0].invalidateblock(self.nodes[0].getblockhash(cur_height+1))
self.nodes[0].generate(10)
# Make sure that BIP68 isn't being used to validate blocks, prior to
# versionbits activation. If more blocks are mined prior to this test
# being run, then it's possible the test has activated the soft fork, and
# this test should be moved to run earlier, or deleted.
def test_bip68_not_consensus(self):
assert get_bip9_status(self.nodes[0], 'csv')['status'] != 'active'
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2)
tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid))
tx1.rehash()
# Make an anyone-can-spend transaction
tx2 = CTransaction()
tx2.nVersion = 1
tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)]
tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), CScript([b'a']))]
# sign tx2
tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"]
tx2 = FromHex(tx2, tx2_raw)
tx2.rehash()
self.nodes[0].sendrawtransaction(ToHex(tx2))
# Now make an invalid spend of tx2 according to BIP68
sequence_value = 100 # 100 block relative locktime
tx3 = CTransaction()
tx3.nVersion = 2
tx3.vin = [CTxIn(COutPoint(tx2.sha256, 0), nSequence=sequence_value)]
tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee * COIN), CScript([b'a' * 35]))]
tx3.rehash()
assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3))
# make a block that violates bip68; ensure that the tip updates
tip = int(self.nodes[0].getbestblockhash(), 16)
block = create_block(tip, create_coinbase(self.nodes[0].getblockcount()+1))
block.nVersion = 3
block.vtx.extend([tx1, tx2, tx3])
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
add_witness_commitment(block)
block.solve()
self.nodes[0].submitblock(block.serialize(True).hex())
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
def activateCSV(self):
# activation should happen at block height 432 (3 periods)
# getblockchaininfo will show CSV as active at block 431 (144 * 3 -1) since it's returning whether CSV is active for the next block.
min_activation_height = 432
height = self.nodes[0].getblockcount()
assert_greater_than(min_activation_height - height, 2)
self.nodes[0].generate(min_activation_height - height - 2)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "locked_in")
self.nodes[0].generate(1)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], "active")
self.sync_blocks()
# Use self.nodes[1] to test that version 2 transactions are standard.
def test_version2_relay(self):
inputs = [ ]
outputs = { self.nodes[1].getnewaddress() : 1.0 }
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex']
tx = FromHex(CTransaction(), rawtxfund)
tx.nVersion = 2
tx_signed = self.nodes[1].signrawtransactionwithwallet(ToHex(tx))["hex"]
self.nodes[1].sendrawtransaction(tx_signed)
if __name__ == '__main__':
BIP68Test().main()
|
|
"""
promise: bytecode optimisation using staticness assertions.
This is a module for applying some simple optimisations to function bytecode.
By promising that a function doesn't do certain things at run-time, it's
possible to apply optimisations that are not legal in the general case.
As a simple example, it's possible to promise that a function doesn't modify
(or care if anyone else modifies) any builtin functions by decorating it thus:
@promise.constant(__builtins__)
def function():
...
Such a promise will allow the builtins to be stored as direct object references
in the function bytecode, avoiding name lookups during function execution.
As another example, it's possible to promise that a function is pure; i.e. that
it's a simple algorithm for mapping input values to an output value:
@promise.pure()
def calculate(a,b):
return 2*a*a + 3*b + 7
If a pure function is then used by another function as a constant, it can be
directly inlined into the bytecode to avoid the overhead of a function call:
@promise.constant(("calculate",))
def aggregate(pairs):
# calculate() is a pure constant, so it will be inlined here.
return sum(calculate(a,b) for (a,b) in pairs)
The currently available promises are:
* invariant(names): promise that variables having the given names will
not change value during execution of the function.
* constant(names): promise that variables having the given names will
always refer to the same object, across all calls
to the function.
* pure(): promise that the function is a transparent mapping from inputs
to outputs; this opens up the possibility of inling it directly
into other functions.
* sensible(): promise that the function is "sensibly behaved". All
builtins and module-level functions are considered
constant; all other module-level names are considered
invariant.
Promise is built on Noam Raphael's fantastic "byteplay" module. It used be
be bundled as part of promise because it needed some patches to work with
newer versions of Python; now it's just bundled for your convenience.
"""
__ver_major__ = 0
__ver_minor__ = 2
__ver_patch__ = 2
__ver_sub__ = ""
__version__ = "%d.%d.%d%s" % (__ver_major__,__ver_minor__,
__ver_patch__,__ver_sub__)
import types
from promise.byteplay import *
class BrokenPromiseError(Exception):
"""Exception raised when you make a promise that is provably broken."""
pass
def _ids():
"""Generator producing unique ids."""
i = 0
while True:
i += 1
yield i
_ids = _ids()
def new_name(name=None):
"""Generate a new unique variable name
If the given name is not None, it is included in the generated name for
ease of reference in e.g. tracebacks or bytecode inspection.
"""
if name is None:
return "_promise_var%s" % (_ids.next(),)
else:
return "_promise_var%s_%s" % (_ids.next(),name,)
def apply_deferred_promises(func):
"""Apply any deferred promises attached to a function."""
# Get the code object before checking for deferred promises.
# This prevents race conditions if several threads apply them at once.
c = Code.from_code(func.func_code)
try:
deferred = func._promise_deferred
except AttributeError:
pass
else:
del func._promise_deferred
# Remove the bootstrapping code inserted by Promise.defer()
idx = c.code.index((POP_TOP,None))
del c.code[:idx+1]
# Apply each promise in turn
for p in deferred:
p.apply(func,c)
# Use the transformed bytecode in subsequent calls to func
func.func_code = c.to_code()
pass
class Promise(object):
"""Base class for promises.
A "Promise" represents a transformation that can be applied to a function's
bytecode, given that the user promises to only use the function in certain
restricted ways. They are intended for use as function- or class-level
decorators. The following methods should be provided by subclasses:
* decorate(func): mark the given function as having this promise
applied; this may directly modify the function's
bytecode or defer the modification until call time.
* apply(func,code): actually transform the function's bytecode
to take advantages of the promised behaviour.
Subclasses may find the following method useful:
* defer(func): defer the application of this promise until the
given function is called for the first time.
* apply_or_defer(func): immediately apply this promise if the given
function has no deferred promises; otherwise
defer it until after the existing promises.
"""
def __init__(self):
pass
def __call__(self,*args):
"""Apply this promise to a function, module, dict, etc.
Calling a promise arranges for it to be applied to any functions
found in the given arguments. Each argument can be a raw function,
or a class, module or iterable of functions.
"""
if not args:
return None
for arg in args:
if isinstance(arg,types.FunctionType):
self.decorate(arg)
else:
try:
subargs = arg.itervalues()
except (AttributeError,TypeError):
subargs = (getattr(arg,nm) for nm in dir(arg))
for subarg in subargs:
if isinstance(subarg,types.FunctionType):
self(subarg)
return args[0]
def decorate(self,func):
"""Decorate the given function to apply this promise.
This can either directly apply the promise, or defer its application
until the function is first executed. The return value is ignored;
in practice this means that decorate() must directly modify the given
function rather than the standard practice of creating a wrapper.
"""
pass
def apply(self,func,code):
"""Apply this promise to the given function.
The argument 'func' is the function to which the promise is being
applied, and 'code' is a byteplay code object representing its code.
The code object should be modified in-place.
"""
pass
def defer(self,func):
"""Defer the application of this promise func is first executed."""
# Try to be thread-safe by using setdefault(), which is implemented
# in C and is therefore non-interruptible.
default = []
deferred = func.__dict__.setdefault("_promise_deferred",default)
deferred.append(self)
if deferred is default:
# Add code to apply the promise when func is first executed.
# These opcodes are removed by apply_deferred_promises()
c = Code.from_code(func.func_code)
c.code.insert(0,(LOAD_CONST,apply_deferred_promises))
c.code.insert(1,(LOAD_CONST,func))
c.code.insert(2,(CALL_FUNCTION,1))
c.code.insert(3,(POP_TOP,None))
func.func_code = c.to_code()
def apply_or_defer(self,func):
"""Apply this promise, or defer it if others are already deferred.
It's generally a good idea to use this instead of directly applying
a promise, since it ensures that individual promises will be applied
in the order in which they appear in code.
"""
try:
deferred = func._promise_deferred
except AttributeError:
code = Code.from_code(func.func_code)
self.apply(func,code)
func.func_code = code.to_code()
else:
deferred.append(self)
class invariant(Promise):
"""Promise that the given names are invariant during the function call.
This promise allows the names to be loaded once, at the beginning of the
function, and accessed through local variables from there on out. Instead
of doing this:
myvar = SomeGlobalObject()
def myfunc():
l_myvar = myvar # store locally for faster access
...do stuff with l_myvar...
You can now do this:
myvar = SomeGlobalObject()
@promise.invariant(("myvar",))
def myfunc():
...do stuff directly with myvar...
"""
def __init__(self,names):
self.names = names
super(invariant,self).__init__()
def decorate(self,func):
self.apply_or_defer(func)
def apply(self,func,code):
local_names = {}
load_ops = []
for (i,(op,arg)) in enumerate(code.code):
# Replace any LOADs of invariant names with a LOAD_FAST
if op in (LOAD_GLOBAL,LOAD_NAME,LOAD_DEREF):
if arg in self.names:
if arg not in local_names:
local_names[arg] = new_name(arg)
load_ops.append((op,arg))
load_ops.append((STORE_FAST,local_names[arg]))
code.code[i] = (LOAD_FAST,local_names[arg])
# Quick check that invariant names arent munged
elif op in (STORE_NAME,STORE_GLOBAL,STORE_FAST,STORE_DEREF):
if arg in self.names:
msg = "name '%s' was promised invariant, but assigned to"
raise BrokenPromiseError(msg % (arg,))
elif op in (DELETE_NAME,DELETE_GLOBAL,DELETE_FAST):
if arg in self.names:
msg = "name '%s' was promised invariant, but deleted"
raise BrokenPromiseError(msg % (arg,))
# Insert code to load the names in local vars at start of function
for i,op in enumerate(load_ops):
code.code.insert(i,op)
class constant(Promise):
"""Promise that the given names are constant
This promise allows the objects referred to by the names to be stored
directly in the code as constants, eliminating name lookups. We try
to resolve all constants at decoration time, but any that are missing
will be deferred until the function first executes.
Instead of doing this:
# this trick makes range() a local constant
def yield_lots_of_ranges(range=range):
for i in range(10):
yield range(i)
You can now do this:
@promise.constant(("range",))
def yield_lots_of_ranges()
for i in range(10):
yield range(i)
"""
def __init__(self,names,exclude=[]):
self.names = names
self.exclude = exclude
super(constant,self).__init__()
def _load_name(self,func,name,op=None):
"""Look up the given name in the scope of the given function.
This is an attempt to replicate the name lookup rules of LOAD_NAME,
LOAD_GLOBAL and friends. If a specific bytecode op is specified,
only the rules for that operation are applied.
If the name cannot be found, NameError is raised.
"""
if op in (None,LOAD_NAME,LOAD_DEREF):
try:
return self._load_name_deref(func,name)
except NameError:
pass
if op in (None,LOAD_NAME,LOAD_GLOBAL):
try:
return self._load_name_global(func,name)
except NameError:
pass
raise NameError(name)
def _load_name_deref(self,func,name):
"""Simulate (LOAD_DEREF,name) on the given function."""
# Determine index of cell matching given name
try:
idx = func.func_code.co_cellvars.index(name)
except ValueError:
try:
idx = func.func_code.co_freevars.index(name)
idx -= len(func.func_code.co_cellvars)
except ValueError:
raise NameError(name)
return func.func_closure[idx].cell_contents
def _load_name_global(self,func,name):
"""Simulate (LOAD_GLOBAL,name) on the given function."""
try:
try:
return func.func_globals[name]
except KeyError:
return __builtins__[name]
except KeyError:
raise NameError(name)
def decorate(self,func):
try:
self.apply_or_defer(func)
except NameError:
self.defer(func)
def apply(self,func,code):
new_constants = {}
old_constants = set()
missing_names = []
for (i,(op,arg)) in enumerate(code.code):
# Replace LOADs of matching names with LOAD_CONST
if op in (LOAD_GLOBAL,LOAD_DEREF,LOAD_NAME):
if arg in self.names:
if arg in self.exclude or arg in missing_names:
continue
try:
val = new_constants[arg]
except KeyError:
try:
val = self._load_name(func,arg,op)
except NameError:
missing_names.append(arg)
else:
new_constants[arg] = val
code.code[i] = (LOAD_CONST,val)
else:
code.code[i] = (LOAD_CONST,val)
# Quick check that locals haven't been promised constant
elif op == LOAD_FAST:
if arg in self.names:
raise BrokenPromiseError("local names can't be constant: '%s'" % (arg,))
# Quick check that constant names arent munged
elif op in (STORE_NAME,STORE_GLOBAL,STORE_FAST,STORE_DEREF):
if arg in self.names:
msg = "name '%s' was promised constant, but assigned to"
raise BrokenPromiseError(msg % (arg,))
elif op in (DELETE_NAME,DELETE_GLOBAL,DELETE_FAST):
if arg in self.names:
msg = "name '%s' was promised constant, but deleted"
raise BrokenPromiseError(msg % (arg,))
# Track any existing constants for use in the next step
elif op == LOAD_CONST:
if arg not in old_constants:
old_constants.add(arg)
# Recursively apply promise to any inner functions.
# TODO: how can we do deferred promises on inner functions?
if i+1 < len(code.code):
(nextop,nextarg) = code.code[i+1]
if nextop in (MAKE_FUNCTION,MAKE_CLOSURE):
exclude = arg.to_code().co_varnames
p = self.__class__(names=self.names,exclude=exclude)
try:
p.apply(func,arg)
except NameError:
pass
# If any constants define a '_promise_fold_constant' method,
# let them have a crack at the bytecode as well.
for const in new_constants.itervalues():
try:
fold = const._promise_fold_constant
except AttributeError:
pass
else:
fold(func,code)
for const in old_constants:
try:
fold = const._promise_fold_constant
except AttributeError:
pass
else:
fold(func,code)
# Re-raise a NameError if any occurred
if missing_names:
raise NameError(",".join(missing_names))
class pure(Promise):
"""Promise that a function is pure.
A pure function has no side-effects or internal state; it is simply
a mapping from input values to output values.
Currently the only optimisation this enables is inlining of constant
pure functions; other optimisations may be added in the future. For
example, in this code the calculate() function will be inlined as if
it were a macro:
@promise.pure()
def calculate(a,b):
reutrn a*a + 3*b + 7
@promise.constant(("calculate",))
def aggregate(pairs):
return sum(calculate(a,b) for (a,b) in pairs)
"""
def decorate(self,func):
c = Code.from_code(func.func_code)
if c.varargs:
raise TypeError("pure functions currently don't support varargs")
if c.varkwargs:
raise TypeError("pure functions currently don't support varkwds")
func._promise_fold_constant = self._make_fold_method(func)
# Since I'm pure, my globals must all be constant
global_names = set()
for (op,arg) in Code.from_code(func.func_code).code:
if op == LOAD_GLOBAL:
global_names.add(arg)
elif op in (STORE_GLOBAL,DELETE_GLOBAL):
msg = "pure functions must not modify their globals: '%s'"
raise BrokenPromiseError(msg % (arg,))
constant(global_names).decorate(func)
def _make_fold_method(self,source_func):
"""Make _promise_fold_constant method for the given pure function."""
def fold(dest_func,dest_code):
"""Inline the code of source_func into the given bytecode."""
# Apply any deferred promises to source_func.
# Since it's pure, we can simply call it to force this.
if hasattr(source_func,"_promise_deferred"):
try:
source_func(*([None]*source_func.func_code.co_argcount))
except Exception:
pass
# Inline the function at every callsite
toinline = self._find_inlinable_call(source_func,dest_code)
while toinline is not None:
(loadsite,callsite) = toinline
# Give new names to the locals in the source bytecode
source_code = Code.from_code(source_func.func_code)
name_map = self._rename_local_vars(source_code)
# Remove any setlineno ops from the source bytecode
new_code = [c for c in source_code.code if c[0] != SetLineno]
source_code.code[:] = new_code
# Pop the function arguments directly from the stack.
# Keyword args are currently not supported.
numargs = dest_code.code[callsite][1] & 0xFF
for i in xrange(numargs):
argname = source_func.func_code.co_varnames[i]
source_code.code.insert(0,(STORE_FAST,name_map[argname]))
# Fill in any missing args from the function defaults
numreqd = source_func.func_code.co_argcount
for i in xrange(numargs,numreqd):
argname = source_func.func_code.co_varnames[i]
defidx = i - numreqd + len(source_func.func_defaults)
defval = source_func.func_defaults[defidx]
source_code.code.insert(0,(STORE_FAST,name_map[argname]))
source_code.code.insert(0,(LOAD_CONST,defval))
# Munge the source bytecode to leave return value on stack
end = Label()
source_code.code.append((end,None))
for (i,(op,arg)) in enumerate(source_code.code):
if op == RETURN_VALUE:
source_code.code[i] = (JUMP_ABSOLUTE,end)
# Replace the callsite with the inlined code
dest_code.code[callsite:callsite+1] = source_code.code
del dest_code.code[loadsite]
# Rinse and repeat
toinline = self._find_inlinable_call(source_func,dest_code)
return fold
def _find_inlinable_call(self,func,code):
"""Find an inlinable call to func in the given code.
If such a call is found, a tuple (loadsite,callsite) is returned
giving the position of the LOAD_CONST on the function and the matching
CALL_FUNCTION. If no inlinable call is found, returns None.
"""
for (i,(op,arg)) in enumerate(code.code):
if op == LOAD_CONST and arg == func:
loadsite = i
callsite = self._find_callsite(loadsite,code.code)
if callsite is not None:
(op,arg) = code.code[callsite]
# Can't currently inline kwdargs
if arg == (arg & 0xFF):
return (loadsite,callsite)
return None
def _find_callsite(self,idx,code):
"""Find index of the opcode calling the value pushed at opcode idx.
This method finds the position of the opcode that calls a function
pushed onto the stack by opcode 'idx'. If we cannot reliably find
such an opcode (due to weird branching etc) then None is returned.
"""
try:
callsite = idx + 1
try:
(curop,curarg) = code[callsite]
except IndexError:
return None
(pop,push) = getse(curop,curarg)
curstack = push - pop
while curstack > 0 or curop != CALL_FUNCTION:
callsite += 1
try:
(curop,curarg) = code[callsite]
except IndexError:
return None
(pop,push) = getse(curop,curarg)
curstack = curstack + push - pop
if curstack == 0:
return callsite
else:
return None
except ValueError:
return None
def _rename_local_vars(self,code):
"""Rename the local variables in the given code to new unique names.
Returns a dictionary mapping old names to new names.
"""
name_map = {}
for nm in code.to_code().co_varnames:
name_map[nm] = new_name(nm)
for (i,(op,arg)) in enumerate(code.code):
if op in (LOAD_FAST,STORE_FAST,DELETE_FAST):
try:
newarg = name_map[arg]
except KeyError:
newarg = new_name(arg)
name_map[arg] = newarg
code.code[i] = (op,newarg)
return name_map
class sensible(Promise):
"""Promise that a function is sensibly behaved. Basically:
* all builtins are constant
* all global functions are constant
* all other globals are invariant
The semantics of this promise will probably change as more types of promise
are added to the module.
"""
def decorate(self,func):
self.defer(func)
def apply(self,func,code):
callable_globals = set()
other_globals = set()
for (nm,obj) in func.func_globals.iteritems():
if callable(obj):
callable_globals.add(nm)
else:
other_globals.add(nm)
constant(__builtins__).apply(func,code)
constant(callable_globals).apply(func,code)
invariant(other_globals).apply(func,code)
|
|
# Copyright 2015 One Zero Capital
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import random
import numpy
import json
import theano
import theano.tensor as T
from theano.tensor.nnet import sigmoid
import time
import bisect
def floatX(x):
return numpy.asarray(x, dtype=theano.config.floatX)
srng = theano.tensor.shared_randomstreams.RandomStreams()
def get_splits(headers, data, bins, linear=False):
values = [[v[header] for v in data if header in v] for header in headers]
splits = []
for j in xrange(len(headers)):
values_j_unique = sorted(set(values[j]))
lo, hi = numpy.percentile(values_j_unique, 1.0), numpy.percentile(values_j_unique, 99.0)
print '%100s %11.2f %11.2f %5.2f%%' % (headers[j], lo, hi, 100. * len(values[j]) / len(data))
j_splits = []
if linear:
for bin in xrange(bins):
j_splits.append(lo + (bin + 1) * (hi - lo) * 1. / bins)
else:
for bin in xrange(bins):
j_splits.append(numpy.percentile(values_j_unique, 100.0 * (bin+1) / (bins+1)))
splits += [(j, x_split) for x_split in j_splits]
return splits
def get_row(headers, K, data_row, splits, headers_keep=None):
# V: values
V_row = numpy.zeros(K, dtype=theano.config.floatX)
# M: what values are missing
M_row = numpy.zeros(K, dtype=theano.config.floatX)
# Q: what values to predict
Q_row = numpy.zeros(K, dtype=theano.config.floatX)
for k, split in enumerate(splits):
j, x_split = split
if headers[j] not in data_row:
M_row[k] = 1
continue
x = data_row[headers[j]]
if x < x_split:
V_row[k] = 1
if headers_keep is not None:
if headers[j] not in headers_keep:
Q_row[k] = 1
return V_row, M_row, Q_row
def build_matrices(headers, data, D, K, splits, batch_size=200):
batch_size = min(len(data), batch_size)
V = numpy.zeros((batch_size, K), dtype=theano.config.floatX)
M = numpy.zeros((batch_size, K), dtype=theano.config.floatX)
Q = numpy.zeros((batch_size, K), dtype=theano.config.floatX)
k = numpy.zeros((batch_size, ), dtype=theano.config.floatX)
for i, data_row in enumerate(random.sample(data, batch_size)):
# How many header should we remove
n_headers_keep = random.randint(0, len(headers))
headers_keep = set(random.sample(headers, n_headers_keep))
V[i], M[i], Q[i] = get_row(headers, K, data_row, splits, headers_keep)
f = len([h for h in headers if h in headers_keep and h in data_row])
k[i] = f > 0 and f ** -0.5 or 0.0
return V, M, Q, k
def W_values(n_in, n_out):
return numpy.random.uniform(
low=-numpy.sqrt(6. / (n_in + n_out)),
high=numpy.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out))
def get_parameters(K, n_hidden_layers=4, n_hidden_units=128):
# Train an autoencoder to reconstruct the rows of the V matrices
Ws, bs = [], []
for l in xrange(n_hidden_layers + 1):
n_in, n_out = n_hidden_units, n_hidden_units
if l == 0:
n_in = K
elif l == n_hidden_layers:
n_out = K
Ws.append(theano.shared(W_values(n_in, n_out)))
gamma = 0.1 # initialize it to slightly positive so the derivative exists
bs.append(theano.shared(numpy.ones(n_out) * gamma))
return Ws, bs
def get_model(Ws, bs, dropout=False):
v = T.matrix('input')
m = T.matrix('missing')
q = T.matrix('target')
k = T.vector('normalization factor')
# Set all missing/target values to 0.5
keep_mask = (1-m) * (1-q)
h = keep_mask * (v * 2 - 1) # Convert to +1, -1
# Normalize layer 0
h *= k.dimshuffle(0, 'x')
for l in xrange(len(Ws)):
h = T.dot(h, Ws[l]) + bs[l]
if l < len(Ws) - 1:
h = h * (h > 0) # relu
if dropout:
mask = srng.binomial(n=1, p=0.5, size=h.shape)
h = h * mask * 2
output = sigmoid(h)
LL = v * T.log(output) + (1 - v) * T.log(1 - output)
# loss = -(q * LL).sum() / q.sum()
loss = -((1 - m) * LL).sum() / (1 - m).sum()
return v, m, q, k, output, loss
def nesterov_updates(loss, all_params, learn_rate, momentum, weight_decay):
updates = []
all_grads = T.grad(loss, all_params)
for param_i, grad_i in zip(all_params, all_grads):
# generate a momentum parameter
mparam_i = theano.shared(numpy.array(param_i.get_value()*0.))
full_grad_i = grad_i + learn_rate * weight_decay * param_i
v = momentum * mparam_i - learn_rate * full_grad_i
w = param_i + momentum * v - learn_rate * full_grad_i
updates.append((param_i, w))
updates.append((mparam_i, v))
return updates
def get_train_f(Ws, bs):
learning_rate = T.scalar('learning rate')
v, m, q, k, output, loss = get_model(Ws, bs, dropout=False)
updates = nesterov_updates(loss, Ws + bs, learning_rate, 0.9, 1e-6)
return theano.function([v, m, q, k, learning_rate], loss, updates=updates)
def get_pred_f(Ws, bs):
v, m, q, k, output, loss = get_model(Ws, bs, dropout=False)
return theano.function([v, m, q, k], output)
def train(headers, data, n_hidden_layers=4, n_hidden_units=128, bins=40):
D = len(data)
K = bins * len(headers)
print D, 'data points', K, 'random splits', bins, 'bins', K, 'features'
splits = get_splits(headers, data, bins)
Ws, bs = get_parameters(K, n_hidden_layers, n_hidden_units)
train_f = get_train_f(Ws, bs)
pred_f = get_pred_f(Ws, bs)
learning_rate = 1.0
n_iters_patience = 1000
avg_decay = 1.0 - 1.0 / n_iters_patience
loss_sum = 0.0
weight_sum = 0.0
best_loss_smoothed = float('inf')
best_iter = 0
for iter in xrange(1000000):
V, M, Q, k = build_matrices(headers, data, D, K, splits)
loss = train_f(V, M, Q, k, learning_rate)
loss_sum = loss_sum * avg_decay + loss
weight_sum = weight_sum * avg_decay + 1.0
loss_smoothed = loss_sum / weight_sum
print '%12.9f %12.9f %5d %5d %12.9f' % (loss_smoothed, loss, iter, iter-best_iter, learning_rate)
if loss_smoothed < best_loss_smoothed:
best_iter = iter
best_loss_smoothed = loss_smoothed
if iter > best_iter + n_iters_patience:
print 'lower learning rate'
learning_rate *= 0.3
best_loss_smoothed = float('inf')
if learning_rate < 1e-4:
break
if (iter + 1) % 10 == 0:
yield {'K': K, 'bins': bins, 'splits': splits, 'headers': headers,
'Ws': [W.get_value().tolist() for W in Ws],
'bs': [b.get_value().tolist() for b in bs]}
if __name__ == '__main__':
data = json.load(open('stock-data.json'))
headers = sorted(list(set([key for v in data.values() for key in v.keys()])))
train(headers, data)
|
|
#!/usr/bin/env python
# Copyright 2015 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tempest import clients
from tempest import config
from tempest import test
LOG = logging.getLogger(__name__)
CONF = config.CONF
CONF_FLAVORS = None
CONF_IMAGES = None
CONF_NETWORKS = []
CONF_PRIV_NETWORK_NAME = None
CONF_PUB_NETWORK = None
CONF_PUB_ROUTER = None
CONF_TENANTS = None
CONF_USERS = None
IS_CEILOMETER = None
IS_CINDER = None
IS_GLANCE = None
IS_HEAT = None
IS_NEUTRON = None
IS_NOVA = None
def init_conf():
global CONF_FLAVORS
global CONF_IMAGES
global CONF_NETWORKS
global CONF_PRIV_NETWORK
global CONF_PRIV_NETWORK_NAME
global CONF_PUB_NETWORK
global CONF_PUB_ROUTER
global CONF_TENANTS
global CONF_USERS
global IS_CEILOMETER
global IS_CINDER
global IS_GLANCE
global IS_HEAT
global IS_NEUTRON
global IS_NOVA
IS_CEILOMETER = CONF.service_available.ceilometer
IS_CINDER = CONF.service_available.cinder
IS_GLANCE = CONF.service_available.glance
IS_HEAT = CONF.service_available.heat
IS_NEUTRON = CONF.service_available.neutron
IS_NOVA = CONF.service_available.nova
CONF_FLAVORS = [CONF.compute.flavor_ref, CONF.compute.flavor_ref_alt]
CONF_IMAGES = [CONF.compute.image_ref, CONF.compute.image_ref_alt]
CONF_PRIV_NETWORK_NAME = CONF.compute.fixed_network_name
CONF_PUB_NETWORK = CONF.network.public_network_id
CONF_PUB_ROUTER = CONF.network.public_router_id
CONF_TENANTS = [CONF.identity.admin_tenant_name,
CONF.identity.tenant_name,
CONF.identity.alt_tenant_name]
CONF_USERS = [CONF.identity.admin_username, CONF.identity.username,
CONF.identity.alt_username]
if IS_NEUTRON:
CONF_PRIV_NETWORK = _get_network_id(CONF.compute.fixed_network_name,
CONF.identity.tenant_name)
CONF_NETWORKS = [CONF_PUB_NETWORK, CONF_PRIV_NETWORK]
def _get_network_id(net_name, tenant_name):
am = clients.AdminManager()
net_cl = am.network_client
id_cl = am.identity_client
networks = net_cl.list_networks()
tenant = id_cl.get_tenant_by_name(tenant_name)
t_id = tenant['id']
n_id = None
for net in networks['networks']:
if (net['tenant_id'] == t_id and net['name'] == net_name):
n_id = net['id']
break
return n_id
class BaseService(object):
def __init__(self, kwargs):
self.client = None
for key, value in kwargs.items():
setattr(self, key, value)
self.tenant_filter = {}
if hasattr(self, 'tenant_id'):
self.tenant_filter['tenant_id'] = self.tenant_id
def _filter_by_tenant_id(self, item_list):
if (item_list is None
or len(item_list) == 0
or not hasattr(self, 'tenant_id')
or self.tenant_id is None
or 'tenant_id' not in item_list[0]):
return item_list
return [item for item in item_list
if item['tenant_id'] == self.tenant_id]
def list(self):
pass
def delete(self):
pass
def dry_run(self):
pass
def save_state(self):
pass
def run(self):
if self.is_dry_run:
self.dry_run()
elif self.is_save_state:
self.save_state()
else:
self.delete()
class SnapshotService(BaseService):
def __init__(self, manager, **kwargs):
super(SnapshotService, self).__init__(kwargs)
self.client = manager.snapshots_client
def list(self):
client = self.client
snaps = client.list_snapshots()
LOG.debug("List count, %s Snapshots" % len(snaps))
return snaps
def delete(self):
snaps = self.list()
client = self.client
for snap in snaps:
try:
client.delete_snapshot(snap['id'])
except Exception:
LOG.exception("Delete Snapshot exception.")
def dry_run(self):
snaps = self.list()
self.data['snapshots'] = snaps
class ServerService(BaseService):
def __init__(self, manager, **kwargs):
super(ServerService, self).__init__(kwargs)
self.client = manager.servers_client
def list(self):
client = self.client
servers_body = client.list_servers()
servers = servers_body['servers']
LOG.debug("List count, %s Servers" % len(servers))
return servers
def delete(self):
client = self.client
servers = self.list()
for server in servers:
try:
client.delete_server(server['id'])
except Exception:
LOG.exception("Delete Server exception.")
def dry_run(self):
servers = self.list()
self.data['servers'] = servers
class ServerGroupService(ServerService):
def list(self):
client = self.client
sgs = client.list_server_groups()['server_groups']
LOG.debug("List count, %s Server Groups" % len(sgs))
return sgs
def delete(self):
client = self.client
sgs = self.list()
for sg in sgs:
try:
client.delete_server_group(sg['id'])
except Exception:
LOG.exception("Delete Server Group exception.")
def dry_run(self):
sgs = self.list()
self.data['server_groups'] = sgs
class StackService(BaseService):
def __init__(self, manager, **kwargs):
super(StackService, self).__init__(kwargs)
self.client = manager.orchestration_client
def list(self):
client = self.client
stacks = client.list_stacks()['stacks']
LOG.debug("List count, %s Stacks" % len(stacks))
return stacks
def delete(self):
client = self.client
stacks = self.list()
for stack in stacks:
try:
client.delete_stack(stack['id'])
except Exception:
LOG.exception("Delete Stack exception.")
def dry_run(self):
stacks = self.list()
self.data['stacks'] = stacks
class KeyPairService(BaseService):
def __init__(self, manager, **kwargs):
super(KeyPairService, self).__init__(kwargs)
self.client = manager.keypairs_client
def list(self):
client = self.client
keypairs = client.list_keypairs()['keypairs']
LOG.debug("List count, %s Keypairs" % len(keypairs))
return keypairs
def delete(self):
client = self.client
keypairs = self.list()
for k in keypairs:
try:
name = k['keypair']['name']
client.delete_keypair(name)
except Exception:
LOG.exception("Delete Keypairs exception.")
def dry_run(self):
keypairs = self.list()
self.data['keypairs'] = keypairs
class SecurityGroupService(BaseService):
def __init__(self, manager, **kwargs):
super(SecurityGroupService, self).__init__(kwargs)
self.client = manager.security_groups_client
def list(self):
client = self.client
secgrps = client.list_security_groups()['security_groups']
secgrp_del = [grp for grp in secgrps if grp['name'] != 'default']
LOG.debug("List count, %s Security Groups" % len(secgrp_del))
return secgrp_del
def delete(self):
client = self.client
secgrp_del = self.list()
for g in secgrp_del:
try:
client.delete_security_group(g['id'])
except Exception:
LOG.exception("Delete Security Groups exception.")
def dry_run(self):
secgrp_del = self.list()
self.data['security_groups'] = secgrp_del
class FloatingIpService(BaseService):
def __init__(self, manager, **kwargs):
super(FloatingIpService, self).__init__(kwargs)
self.client = manager.floating_ips_client
def list(self):
client = self.client
floating_ips = client.list_floating_ips()['floating_ips']
LOG.debug("List count, %s Floating IPs" % len(floating_ips))
return floating_ips
def delete(self):
client = self.client
floating_ips = self.list()
for f in floating_ips:
try:
client.delete_floating_ip(f['id'])
except Exception:
LOG.exception("Delete Floating IPs exception.")
def dry_run(self):
floating_ips = self.list()
self.data['floating_ips'] = floating_ips
class VolumeService(BaseService):
def __init__(self, manager, **kwargs):
super(VolumeService, self).__init__(kwargs)
self.client = manager.volumes_client
def list(self):
client = self.client
vols = client.list_volumes()
LOG.debug("List count, %s Volumes" % len(vols))
return vols
def delete(self):
client = self.client
vols = self.list()
for v in vols:
try:
client.delete_volume(v['id'])
except Exception:
LOG.exception("Delete Volume exception.")
def dry_run(self):
vols = self.list()
self.data['volumes'] = vols
class VolumeQuotaService(BaseService):
def __init__(self, manager, **kwargs):
super(VolumeQuotaService, self).__init__(kwargs)
self.client = manager.volume_quotas_client
def delete(self):
client = self.client
try:
client.delete_quota_set(self.tenant_id)
except Exception:
LOG.exception("Delete Volume Quotas exception.")
def dry_run(self):
quotas = self.client.show_quota_usage(self.tenant_id)
self.data['volume_quotas'] = quotas
class NovaQuotaService(BaseService):
def __init__(self, manager, **kwargs):
super(NovaQuotaService, self).__init__(kwargs)
self.client = manager.quotas_client
self.limits_client = manager.limits_client
def delete(self):
client = self.client
try:
client.delete_quota_set(self.tenant_id)
except Exception:
LOG.exception("Delete Quotas exception.")
def dry_run(self):
client = self.limits_client
quotas = client.show_limits()['limits']
self.data['compute_quotas'] = quotas['absolute']
# Begin network service classes
class NetworkService(BaseService):
def __init__(self, manager, **kwargs):
super(NetworkService, self).__init__(kwargs)
self.client = manager.network_client
def _filter_by_conf_networks(self, item_list):
if not item_list or not all(('network_id' in i for i in item_list)):
return item_list
return [item for item in item_list if item['network_id']
not in CONF_NETWORKS]
def list(self):
client = self.client
networks = client.list_networks(**self.tenant_filter)
networks = networks['networks']
# filter out networks declared in tempest.conf
if self.is_preserve:
networks = [network for network in networks
if network['id'] not in CONF_NETWORKS]
LOG.debug("List count, %s Networks" % networks)
return networks
def delete(self):
client = self.client
networks = self.list()
for n in networks:
try:
client.delete_network(n['id'])
except Exception:
LOG.exception("Delete Network exception.")
def dry_run(self):
networks = self.list()
self.data['networks'] = networks
class NetworkFloatingIpService(NetworkService):
def list(self):
client = self.client
flips = client.list_floatingips(**self.tenant_filter)
flips = flips['floatingips']
LOG.debug("List count, %s Network Floating IPs" % len(flips))
return flips
def delete(self):
client = self.client
flips = self.list()
for flip in flips:
try:
client.delete_floatingip(flip['id'])
except Exception:
LOG.exception("Delete Network Floating IP exception.")
def dry_run(self):
flips = self.list()
self.data['floating_ips'] = flips
class NetworkRouterService(NetworkService):
def list(self):
client = self.client
routers = client.list_routers(**self.tenant_filter)
routers = routers['routers']
if self.is_preserve:
routers = [router for router in routers
if router['id'] != CONF_PUB_ROUTER]
LOG.debug("List count, %s Routers" % len(routers))
return routers
def delete(self):
client = self.client
routers = self.list()
for router in routers:
try:
rid = router['id']
ports = [port for port
in client.list_router_interfaces(rid)['ports']
if port["device_owner"] == "network:router_interface"]
for port in ports:
client.remove_router_interface_with_port_id(rid,
port['id'])
client.delete_router(rid)
except Exception:
LOG.exception("Delete Router exception.")
def dry_run(self):
routers = self.list()
self.data['routers'] = routers
class NetworkHealthMonitorService(NetworkService):
def list(self):
client = self.client
hms = client.list_health_monitors()
hms = hms['health_monitors']
hms = self._filter_by_tenant_id(hms)
LOG.debug("List count, %s Health Monitors" % len(hms))
return hms
def delete(self):
client = self.client
hms = self.list()
for hm in hms:
try:
client.delete_health_monitor(hm['id'])
except Exception:
LOG.exception("Delete Health Monitor exception.")
def dry_run(self):
hms = self.list()
self.data['health_monitors'] = hms
class NetworkMemberService(NetworkService):
def list(self):
client = self.client
members = client.list_members()
members = members['members']
members = self._filter_by_tenant_id(members)
LOG.debug("List count, %s Members" % len(members))
return members
def delete(self):
client = self.client
members = self.list()
for member in members:
try:
client.delete_member(member['id'])
except Exception:
LOG.exception("Delete Member exception.")
def dry_run(self):
members = self.list()
self.data['members'] = members
class NetworkVipService(NetworkService):
def list(self):
client = self.client
vips = client.list_vips()
vips = vips['vips']
vips = self._filter_by_tenant_id(vips)
LOG.debug("List count, %s VIPs" % len(vips))
return vips
def delete(self):
client = self.client
vips = self.list()
for vip in vips:
try:
client.delete_vip(vip['id'])
except Exception:
LOG.exception("Delete VIP exception.")
def dry_run(self):
vips = self.list()
self.data['vips'] = vips
class NetworkPoolService(NetworkService):
def list(self):
client = self.client
pools = client.list_pools()
pools = pools['pools']
pools = self._filter_by_tenant_id(pools)
LOG.debug("List count, %s Pools" % len(pools))
return pools
def delete(self):
client = self.client
pools = self.list()
for pool in pools:
try:
client.delete_pool(pool['id'])
except Exception:
LOG.exception("Delete Pool exception.")
def dry_run(self):
pools = self.list()
self.data['pools'] = pools
class NetworkMeteringLabelRuleService(NetworkService):
def list(self):
client = self.client
rules = client.list_metering_label_rules()
rules = rules['metering_label_rules']
rules = self._filter_by_tenant_id(rules)
LOG.debug("List count, %s Metering Label Rules" % len(rules))
return rules
def delete(self):
client = self.client
rules = self.list()
for rule in rules:
try:
client.delete_metering_label_rule(rule['id'])
except Exception:
LOG.exception("Delete Metering Label Rule exception.")
def dry_run(self):
rules = self.list()
self.data['rules'] = rules
class NetworkMeteringLabelService(NetworkService):
def list(self):
client = self.client
labels = client.list_metering_labels()
labels = labels['metering_labels']
labels = self._filter_by_tenant_id(labels)
LOG.debug("List count, %s Metering Labels" % len(labels))
return labels
def delete(self):
client = self.client
labels = self.list()
for label in labels:
try:
client.delete_metering_label(label['id'])
except Exception:
LOG.exception("Delete Metering Label exception.")
def dry_run(self):
labels = self.list()
self.data['labels'] = labels
class NetworkPortService(NetworkService):
def list(self):
client = self.client
ports = [port for port in
client.list_ports(**self.tenant_filter)['ports']
if port["device_owner"] == "" or
port["device_owner"].startswith("compute:")]
if self.is_preserve:
ports = self._filter_by_conf_networks(ports)
LOG.debug("List count, %s Ports" % len(ports))
return ports
def delete(self):
client = self.client
ports = self.list()
for port in ports:
try:
client.delete_port(port['id'])
except Exception:
LOG.exception("Delete Port exception.")
def dry_run(self):
ports = self.list()
self.data['ports'] = ports
class NetworkSecGroupService(NetworkService):
def list(self):
client = self.client
filter = self.tenant_filter
# cannot delete default sec group so never show it.
secgroups = [secgroup for secgroup in
client.list_security_groups(**filter)['security_groups']
if secgroup['name'] != 'default']
if self.is_preserve:
secgroups = self._filter_by_conf_networks(secgroups)
LOG.debug("List count, %s securtiy_groups" % len(secgroups))
return secgroups
def delete(self):
client = self.client
secgroups = self.list()
for secgroup in secgroups:
try:
client.delete_secgroup(secgroup['id'])
except Exception:
LOG.exception("Delete security_group exception.")
def dry_run(self):
secgroups = self.list()
self.data['secgroups'] = secgroups
class NetworkSubnetService(NetworkService):
def list(self):
client = self.client
subnets = client.list_subnets(**self.tenant_filter)
subnets = subnets['subnets']
if self.is_preserve:
subnets = self._filter_by_conf_networks(subnets)
LOG.debug("List count, %s Subnets" % len(subnets))
return subnets
def delete(self):
client = self.client
subnets = self.list()
for subnet in subnets:
try:
client.delete_subnet(subnet['id'])
except Exception:
LOG.exception("Delete Subnet exception.")
def dry_run(self):
subnets = self.list()
self.data['subnets'] = subnets
# Telemetry services
class TelemetryAlarmService(BaseService):
def __init__(self, manager, **kwargs):
super(TelemetryAlarmService, self).__init__(kwargs)
self.client = manager.telemetry_client
def list(self):
client = self.client
alarms = client.list_alarms()
LOG.debug("List count, %s Alarms" % len(alarms))
return alarms
def delete(self):
client = self.client
alarms = self.list()
for alarm in alarms:
try:
client.delete_alarm(alarm['id'])
except Exception:
LOG.exception("Delete Alarms exception.")
def dry_run(self):
alarms = self.list()
self.data['alarms'] = alarms
# begin global services
class FlavorService(BaseService):
def __init__(self, manager, **kwargs):
super(FlavorService, self).__init__(kwargs)
self.client = manager.flavors_client
def list(self):
client = self.client
flavors = client.list_flavors({"is_public": None})['flavors']
if not self.is_save_state:
# recreate list removing saved flavors
flavors = [flavor for flavor in flavors if flavor['id']
not in self.saved_state_json['flavors'].keys()]
if self.is_preserve:
flavors = [flavor for flavor in flavors
if flavor['id'] not in CONF_FLAVORS]
LOG.debug("List count, %s Flavors after reconcile" % len(flavors))
return flavors
def delete(self):
client = self.client
flavors = self.list()
for flavor in flavors:
try:
client.delete_flavor(flavor['id'])
except Exception:
LOG.exception("Delete Flavor exception.")
def dry_run(self):
flavors = self.list()
self.data['flavors'] = flavors
def save_state(self):
flavors = self.list()
self.data['flavors'] = {}
for flavor in flavors:
self.data['flavors'][flavor['id']] = flavor['name']
class ImageService(BaseService):
def __init__(self, manager, **kwargs):
super(ImageService, self).__init__(kwargs)
self.client = manager.images_client
def list(self):
client = self.client
images = client.list_images({"all_tenants": True})['images']
if not self.is_save_state:
images = [image for image in images if image['id']
not in self.saved_state_json['images'].keys()]
if self.is_preserve:
images = [image for image in images
if image['id'] not in CONF_IMAGES]
LOG.debug("List count, %s Images after reconcile" % len(images))
return images
def delete(self):
client = self.client
images = self.list()
for image in images:
try:
client.delete_image(image['id'])
except Exception:
LOG.exception("Delete Image exception.")
def dry_run(self):
images = self.list()
self.data['images'] = images
def save_state(self):
self.data['images'] = {}
images = self.list()
for image in images:
self.data['images'][image['id']] = image['name']
class IdentityService(BaseService):
def __init__(self, manager, **kwargs):
super(IdentityService, self).__init__(kwargs)
self.client = manager.identity_client
class UserService(IdentityService):
def list(self):
client = self.client
users = client.get_users()
if not self.is_save_state:
users = [user for user in users if user['id']
not in self.saved_state_json['users'].keys()]
if self.is_preserve:
users = [user for user in users if user['name']
not in CONF_USERS]
elif not self.is_save_state: # Never delete admin user
users = [user for user in users if user['name'] !=
CONF.identity.admin_username]
LOG.debug("List count, %s Users after reconcile" % len(users))
return users
def delete(self):
client = self.client
users = self.list()
for user in users:
try:
client.delete_user(user['id'])
except Exception:
LOG.exception("Delete User exception.")
def dry_run(self):
users = self.list()
self.data['users'] = users
def save_state(self):
users = self.list()
self.data['users'] = {}
for user in users:
self.data['users'][user['id']] = user['name']
class RoleService(IdentityService):
def list(self):
client = self.client
try:
roles = client.list_roles()
# reconcile roles with saved state and never list admin role
if not self.is_save_state:
roles = [role for role in roles if
(role['id'] not in
self.saved_state_json['roles'].keys()
and role['name'] != CONF.identity.admin_role)]
LOG.debug("List count, %s Roles after reconcile" % len(roles))
return roles
except Exception:
LOG.exception("Cannot retrieve Roles.")
return []
def delete(self):
client = self.client
roles = self.list()
for role in roles:
try:
client.delete_role(role['id'])
except Exception:
LOG.exception("Delete Role exception.")
def dry_run(self):
roles = self.list()
self.data['roles'] = roles
def save_state(self):
roles = self.list()
self.data['roles'] = {}
for role in roles:
self.data['roles'][role['id']] = role['name']
class TenantService(IdentityService):
def list(self):
client = self.client
tenants = client.list_tenants()['tenants']
if not self.is_save_state:
tenants = [tenant for tenant in tenants if (tenant['id']
not in self.saved_state_json['tenants'].keys()
and tenant['name'] != CONF.identity.admin_tenant_name)]
if self.is_preserve:
tenants = [tenant for tenant in tenants if tenant['name']
not in CONF_TENANTS]
LOG.debug("List count, %s Tenants after reconcile" % len(tenants))
return tenants
def delete(self):
client = self.client
tenants = self.list()
for tenant in tenants:
try:
client.delete_tenant(tenant['id'])
except Exception:
LOG.exception("Delete Tenant exception.")
def dry_run(self):
tenants = self.list()
self.data['tenants'] = tenants
def save_state(self):
tenants = self.list()
self.data['tenants'] = {}
for tenant in tenants:
self.data['tenants'][tenant['id']] = tenant['name']
class DomainService(BaseService):
def __init__(self, manager, **kwargs):
super(DomainService, self).__init__(kwargs)
self.client = manager.identity_v3_client
def list(self):
client = self.client
domains = client.list_domains()['domains']
if not self.is_save_state:
domains = [domain for domain in domains if domain['id']
not in self.saved_state_json['domains'].keys()]
LOG.debug("List count, %s Domains after reconcile" % len(domains))
return domains
def delete(self):
client = self.client
domains = self.list()
for domain in domains:
try:
client.update_domain(domain['id'], enabled=False)
client.delete_domain(domain['id'])
except Exception:
LOG.exception("Delete Domain exception.")
def dry_run(self):
domains = self.list()
self.data['domains'] = domains
def save_state(self):
domains = self.list()
self.data['domains'] = {}
for domain in domains:
self.data['domains'][domain['id']] = domain['name']
def get_tenant_cleanup_services():
tenant_services = []
if IS_CEILOMETER:
tenant_services.append(TelemetryAlarmService)
if IS_NOVA:
tenant_services.append(ServerService)
tenant_services.append(KeyPairService)
tenant_services.append(SecurityGroupService)
tenant_services.append(ServerGroupService)
if not IS_NEUTRON:
tenant_services.append(FloatingIpService)
tenant_services.append(NovaQuotaService)
if IS_HEAT:
tenant_services.append(StackService)
if IS_NEUTRON:
tenant_services.append(NetworkFloatingIpService)
if test.is_extension_enabled('metering', 'network'):
tenant_services.append(NetworkMeteringLabelRuleService)
tenant_services.append(NetworkMeteringLabelService)
tenant_services.append(NetworkRouterService)
tenant_services.append(NetworkPortService)
tenant_services.append(NetworkSubnetService)
tenant_services.append(NetworkService)
tenant_services.append(NetworkSecGroupService)
if IS_CINDER:
tenant_services.append(SnapshotService)
tenant_services.append(VolumeService)
tenant_services.append(VolumeQuotaService)
return tenant_services
def get_global_cleanup_services():
global_services = []
if IS_NOVA:
global_services.append(FlavorService)
if IS_GLANCE:
global_services.append(ImageService)
global_services.append(UserService)
global_services.append(TenantService)
global_services.append(DomainService)
global_services.append(RoleService)
return global_services
|
|
# -*- coding: utf-8 -*-
from time import time, mktime
from datetime import datetime, timedelta
from retrying import retry
from iso8601 import parse_date
from socket import setdefaulttimeout
from openprocurement.search.source import BaseSource, TendersClient
from openprocurement.search.utils import restkit_error
from logging import getLogger
logger = getLogger(__name__)
class AssetSource(BaseSource):
"""Asset Source
"""
__doc_type__ = 'asset'
config = {
'asset_api_key': '',
'asset_api_url': "",
'asset_api_version': '0',
'asset_resource': 'assets',
'asset_api_mode': '',
'asset_skip_until': None,
'asset_skip_after': None,
'asset_limit': 1000,
'asset_preload': 10000,
'asset_reseteach': 3,
'asset_resethour': 23,
'asset_user_agent': '',
'asset_file_cache': '',
'asset_cache_allow': 'complete,cancelled,unsuccessful',
'asset_cache_minage': 15,
'timeout': 30,
}
def __init__(self, config={}, use_cache=False):
if config:
self.config.update(config)
self.config['asset_limit'] = int(self.config['asset_limit'] or 100)
self.config['asset_preload'] = int(self.config['asset_preload'] or 100)
self.config['asset_reseteach'] = int(self.config['asset_reseteach'] or 3)
self.config['asset_resethour'] = int(self.config['asset_resethour'] or 0)
self.client_user_agent += " (assets) " + self.config['asset_user_agent']
if use_cache:
self.cache_setpath(self.config['asset_file_cache'], self.config['asset_api_url'],
self.config['asset_api_version'], 'assets')
if self.cache_path:
self.cache_allow_status = self.config['asset_cache_allow'].split(',')
logger.info("[asset] Cache allow status %s", self.cache_allow_status)
self.client = None
def procuring_entity(self, item):
try:
return item.data.get('assetCustodian', None)
except (KeyError, AttributeError):
return None
def patch_version(self, item):
"""Convert dateModified to long version
"""
item['doc_type'] = self.__doc_type__
dt = parse_date(item['dateModified'])
version = 1e6 * mktime(dt.timetuple()) + dt.microsecond
item['version'] = long(version)
return item
def patch_asset(self, asset):
return asset
def need_reset(self):
if self.should_reset:
return True
if time() - self.last_reset_time > 3600 * int(self.config['asset_reseteach']):
return True
if time() - self.last_reset_time > 3600:
return datetime.now().hour == int(self.config['asset_resethour'])
@retry(stop_max_attempt_number=5, wait_fixed=5000)
def reset(self):
logger.info("Reset assets, asset_skip_until=%s asset_skip_after=%s",
self.config['asset_skip_until'], self.config['asset_skip_after'])
self.stat_resets += 1
if self.config.get('timeout', None):
setdefaulttimeout(float(self.config['timeout']))
params = {}
if self.config['asset_api_mode']:
params['mode'] = self.config['asset_api_mode']
if self.config['asset_limit']:
params['limit'] = self.config['asset_limit']
self.client = TendersClient(
key=self.config['asset_api_key'],
host_url=self.config['asset_api_url'],
api_version=self.config['asset_api_version'],
resource=self.config['asset_resource'],
params=params,
timeout=float(self.config['timeout']),
user_agent=self.client_user_agent)
if self.config['asset_file_cache'] and self.cache_path:
cache_minage = int(self.config['asset_cache_minage'])
cache_date = datetime.now() - timedelta(days=cache_minage)
self.cache_allow_dateModified = cache_date.isoformat()
logger.info("[asset] Cache allow dateModified before %s",
self.cache_allow_dateModified)
logger.info("AssetClient %s", self.client.headers)
self.skip_until = self.config.get('asset_skip_until', None)
if self.skip_until and self.skip_until[:2] != '20':
self.skip_until = None
self.skip_after = self.config.get('asset_skip_after', None)
if self.skip_after and self.skip_after[:2] != '20':
self.skip_after = None
self.last_reset_time = time()
self.should_reset = False
def preload(self):
preload_items = []
retry_count = 0
while True:
if retry_count > 3 or self.should_exit:
break
try:
items = self.client.get_tenders()
self.stat_queries += 1
except Exception as e:
retry_count += 1
logger.error("GET %s retry %d count %d error %s", self.client.prefix_path,
retry_count, len(preload_items), restkit_error(e, self.client))
self.sleep(5 * retry_count)
self.reset()
continue
if not items:
break
preload_items.extend(items)
if len(preload_items) >= 100:
logger.info("Preload %d assets, last %s",
len(preload_items), items[-1]['dateModified'])
if len(items) < 10:
break
if len(preload_items) >= self.config['asset_preload']:
break
return preload_items
def items(self):
if not self.client:
self.reset()
self.last_skipped = None
for asset in self.preload():
if self.should_exit:
raise StopIteration()
if self.skip_until and self.skip_until > asset['dateModified']:
self.last_skipped = asset['dateModified']
self.stat_skipped += 1
continue
if self.skip_after and self.skip_after < asset['dateModified']:
self.last_skipped = asset['dateModified']
self.stat_skipped += 1
continue
self.stat_fetched += 1
yield self.patch_version(asset)
def cache_allow(self, data):
if data and data['data']['status'] in self.cache_allow_status:
return data['data']['dateModified'] < self.cache_allow_dateModified
return False
def get(self, item):
asset = {}
retry_count = 0
if self.cache_path:
asset = self.cache_get(item)
while not asset:
if self.should_exit:
break
try:
asset = self.client.get_tender(item['id'])
assert asset['data']['id'] == item['id'], "asset.id"
assert asset['data']['dateModified'] >= item['dateModified'], "asset.dateModified"
except Exception as e:
if retry_count > 3:
raise e
retry_count += 1
logger.error("GET %s/%s retry %d error %s", self.client.prefix_path,
str(item['id']), retry_count, restkit_error(e, self.client))
self.sleep(5 * retry_count)
if retry_count > 1:
self.reset()
asset = {}
# save to cache
if asset and self.cache_path:
self.cache_put(asset)
if item['dateModified'] != asset['data']['dateModified']:
logger.debug("Asset dateModified mismatch %s %s %s",
item['id'], item['dateModified'],
asset['data']['dateModified'])
item['dateModified'] = asset['data']['dateModified']
item = self.patch_version(item)
asset['meta'] = item
self.stat_getitem += 1
return self.patch_asset(asset)
|
|
"""
XML serializer.
"""
from collections import OrderedDict
from xml.dom import pulldom
from xml.sax import handler
from xml.sax.expatreader import ExpatParser as _ExpatParser
from django.apps import apps
from django.conf import settings
from django.core.serializers import base
from django.db import DEFAULT_DB_ALIAS, models
from django.utils.xmlutils import (
SimplerXMLGenerator, UnserializableContentError,
)
class Serializer(base.Serializer):
"""Serialize a QuerySet to XML."""
def indent(self, level):
if self.options.get('indent') is not None:
self.xml.ignorableWhitespace('\n' + ' ' * self.options.get('indent') * level)
def start_serialization(self):
"""
Start serialization -- open the XML document and the root element.
"""
self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET))
self.xml.startDocument()
self.xml.startElement("django-objects", {"version": "1.0"})
def end_serialization(self):
"""
End serialization -- end the document.
"""
self.indent(0)
self.xml.endElement("django-objects")
self.xml.endDocument()
def start_object(self, obj):
"""
Called as each object is handled.
"""
if not hasattr(obj, "_meta"):
raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj))
self.indent(1)
attrs = OrderedDict([("model", str(obj._meta))])
if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'):
obj_pk = obj._get_pk_val()
if obj_pk is not None:
attrs['pk'] = str(obj_pk)
self.xml.startElement("object", attrs)
def end_object(self, obj):
"""
Called after handling all fields for an object.
"""
self.indent(1)
self.xml.endElement("object")
def handle_field(self, obj, field):
"""
Handle each field on an object (except for ForeignKeys and
ManyToManyFields).
"""
self.indent(2)
self.xml.startElement("field", OrderedDict([
("name", field.name),
("type", field.get_internal_type()),
]))
# Get a "string version" of the object's data.
if getattr(obj, field.name) is not None:
try:
self.xml.characters(field.value_to_string(obj))
except UnserializableContentError:
raise ValueError("%s.%s (pk:%s) contains unserializable characters" % (
obj.__class__.__name__, field.name, obj._get_pk_val()))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_fk_field(self, obj, field):
"""
Handle a ForeignKey (they need to be treated slightly
differently from regular fields).
"""
self._start_relational_field(field)
related_att = getattr(obj, field.get_attname())
if related_att is not None:
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
related = getattr(obj, field.name)
# If related object has a natural key, use it
related = related.natural_key()
# Iterable natural keys are rolled out as subelements
for key_value in related:
self.xml.startElement("natural", {})
self.xml.characters(str(key_value))
self.xml.endElement("natural")
else:
self.xml.characters(str(related_att))
else:
self.xml.addQuickElement("None")
self.xml.endElement("field")
def handle_m2m_field(self, obj, field):
"""
Handle a ManyToManyField. Related objects are only serialized as
references to the object's PK (i.e. the related *data* is not dumped,
just the relation).
"""
if field.remote_field.through._meta.auto_created:
self._start_relational_field(field)
if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'):
# If the objects in the m2m have a natural key, use it
def handle_m2m(value):
natural = value.natural_key()
# Iterable natural keys are rolled out as subelements
self.xml.startElement("object", {})
for key_value in natural:
self.xml.startElement("natural", {})
self.xml.characters(str(key_value))
self.xml.endElement("natural")
self.xml.endElement("object")
else:
def handle_m2m(value):
self.xml.addQuickElement("object", attrs={
'pk': str(value._get_pk_val())
})
for relobj in getattr(obj, field.name).iterator():
handle_m2m(relobj)
self.xml.endElement("field")
def _start_relational_field(self, field):
"""Output the <field> element for relational fields."""
self.indent(2)
self.xml.startElement("field", OrderedDict([
("name", field.name),
("rel", field.remote_field.__class__.__name__),
("to", str(field.remote_field.model._meta)),
]))
class Deserializer(base.Deserializer):
"""Deserialize XML."""
def __init__(self, stream_or_string, *, using=DEFAULT_DB_ALIAS, ignorenonexistent=False, **options):
super().__init__(stream_or_string, **options)
self.event_stream = pulldom.parse(self.stream, self._make_parser())
self.db = using
self.ignore = ignorenonexistent
def _make_parser(self):
"""Create a hardened XML parser (no custom/external entities)."""
return DefusedExpatParser()
def __next__(self):
for event, node in self.event_stream:
if event == "START_ELEMENT" and node.nodeName == "object":
self.event_stream.expandNode(node)
return self._handle_object(node)
raise StopIteration
def _handle_object(self, node):
"""Convert an <object> node to a DeserializedObject."""
# Look up the model using the model loading mechanism. If this fails,
# bail.
Model = self._get_model_from_node(node, "model")
# Start building a data dictionary from the object.
data = {}
if node.hasAttribute('pk'):
data[Model._meta.pk.attname] = Model._meta.pk.to_python(
node.getAttribute('pk'))
# Also start building a dict of m2m data (this is saved as
# {m2m_accessor_attribute : [list_of_related_objects]})
m2m_data = {}
field_names = {f.name for f in Model._meta.get_fields()}
# Deserialize each field.
for field_node in node.getElementsByTagName("field"):
# If the field is missing the name attribute, bail (are you
# sensing a pattern here?)
field_name = field_node.getAttribute("name")
if not field_name:
raise base.DeserializationError("<field> node is missing the 'name' attribute")
# Get the field from the Model. This will raise a
# FieldDoesNotExist if, well, the field doesn't exist, which will
# be propagated correctly unless ignorenonexistent=True is used.
if self.ignore and field_name not in field_names:
continue
field = Model._meta.get_field(field_name)
# As is usually the case, relation fields get the special treatment.
if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel):
m2m_data[field.name] = self._handle_m2m_field_node(field_node, field)
elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel):
data[field.attname] = self._handle_fk_field_node(field_node, field)
else:
if field_node.getElementsByTagName('None'):
value = None
else:
value = field.to_python(getInnerText(field_node).strip())
data[field.name] = value
obj = base.build_instance(Model, data, self.db)
# Return a DeserializedObject so that the m2m data has a place to live.
return base.DeserializedObject(obj, m2m_data)
def _handle_fk_field_node(self, node, field):
"""
Handle a <field> node for a ForeignKey
"""
# Check if there is a child node named 'None', returning None if so.
if node.getElementsByTagName('None'):
return None
else:
model = field.remote_field.model
if hasattr(model._default_manager, 'get_by_natural_key'):
keys = node.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj = model._default_manager.db_manager(self.db).get_by_natural_key(*field_value)
obj_pk = getattr(obj, field.remote_field.field_name)
# If this is a natural foreign key to an object that
# has a FK/O2O as the foreign key, use the FK value
if field.remote_field.model._meta.pk.remote_field:
obj_pk = obj_pk.pk
else:
# Otherwise, treat like a normal PK
field_value = getInnerText(node).strip()
obj_pk = model._meta.get_field(field.remote_field.field_name).to_python(field_value)
return obj_pk
else:
field_value = getInnerText(node).strip()
return model._meta.get_field(field.remote_field.field_name).to_python(field_value)
def _handle_m2m_field_node(self, node, field):
"""
Handle a <field> node for a ManyToManyField.
"""
model = field.remote_field.model
default_manager = model._default_manager
if hasattr(default_manager, 'get_by_natural_key'):
def m2m_convert(n):
keys = n.getElementsByTagName('natural')
if keys:
# If there are 'natural' subelements, it must be a natural key
field_value = [getInnerText(k).strip() for k in keys]
obj_pk = default_manager.db_manager(self.db).get_by_natural_key(*field_value).pk
else:
# Otherwise, treat like a normal PK value.
obj_pk = model._meta.pk.to_python(n.getAttribute('pk'))
return obj_pk
else:
def m2m_convert(n):
return model._meta.pk.to_python(n.getAttribute('pk'))
return [m2m_convert(c) for c in node.getElementsByTagName("object")]
def _get_model_from_node(self, node, attr):
"""
Look up a model from a <object model=...> or a <field rel=... to=...>
node.
"""
model_identifier = node.getAttribute(attr)
if not model_identifier:
raise base.DeserializationError(
"<%s> node is missing the required '%s' attribute"
% (node.nodeName, attr))
try:
return apps.get_model(model_identifier)
except (LookupError, TypeError):
raise base.DeserializationError(
"<%s> node has invalid model identifier: '%s'"
% (node.nodeName, model_identifier))
def getInnerText(node):
"""Get all the inner text of a DOM node (recursively)."""
# inspired by http://mail.python.org/pipermail/xml-sig/2005-March/011022.html
inner_text = []
for child in node.childNodes:
if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE:
inner_text.append(child.data)
elif child.nodeType == child.ELEMENT_NODE:
inner_text.extend(getInnerText(child))
else:
pass
return "".join(inner_text)
# Below code based on Christian Heimes' defusedxml
class DefusedExpatParser(_ExpatParser):
"""
An expat parser hardened against XML bomb attacks.
Forbid DTDs, external entity references
"""
def __init__(self, *args, **kwargs):
_ExpatParser.__init__(self, *args, **kwargs)
self.setFeature(handler.feature_external_ges, False)
self.setFeature(handler.feature_external_pes, False)
def start_doctype_decl(self, name, sysid, pubid, has_internal_subset):
raise DTDForbidden(name, sysid, pubid)
def entity_decl(self, name, is_parameter_entity, value, base,
sysid, pubid, notation_name):
raise EntitiesForbidden(name, value, base, sysid, pubid, notation_name)
def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name):
# expat 1.2
raise EntitiesForbidden(name, None, base, sysid, pubid, notation_name)
def external_entity_ref_handler(self, context, base, sysid, pubid):
raise ExternalReferenceForbidden(context, base, sysid, pubid)
def reset(self):
_ExpatParser.reset(self)
parser = self._parser
parser.StartDoctypeDeclHandler = self.start_doctype_decl
parser.EntityDeclHandler = self.entity_decl
parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl
parser.ExternalEntityRefHandler = self.external_entity_ref_handler
class DefusedXmlException(ValueError):
"""Base exception."""
def __repr__(self):
return str(self)
class DTDForbidden(DefusedXmlException):
"""Document type definition is forbidden."""
def __init__(self, name, sysid, pubid):
super().__init__()
self.name = name
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class EntitiesForbidden(DefusedXmlException):
"""Entity definition is forbidden."""
def __init__(self, name, value, base, sysid, pubid, notation_name):
super().__init__()
self.name = name
self.value = value
self.base = base
self.sysid = sysid
self.pubid = pubid
self.notation_name = notation_name
def __str__(self):
tpl = "EntitiesForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
class ExternalReferenceForbidden(DefusedXmlException):
"""Resolving an external reference is forbidden."""
def __init__(self, context, base, sysid, pubid):
super().__init__()
self.context = context
self.base = base
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "ExternalReferenceForbidden(system_id='{}', public_id={})"
return tpl.format(self.sysid, self.pubid)
|
|
import math
import sys
import types
import fishlib
import cli
import flags
if sys.version_info < (2, 6):
try:
import simplejson as json
except ImportError:
from django.utils import simplejson as json
else:
import json
class PermissionsError(Exception):
pass
class WisdomFailure(Exception):
pass
class RemoveError(Exception):
pass
class PermissionDesc:
def __init__(self, entity, path, actions, names):
self.entity = entity
self.path = path
self.actions = actions
self.names = names
def action(self, name):
return self.actions[self.names.index(name)]
RAW_PERMS = {
u'abstract-tag': PermissionDesc(u'abstract-tag', u'tags',
[u'update', u'delete', u'control'],
[u'metadata', u'delete', u'acontrol']),
u'tag': PermissionDesc(u'tag', u'tag-values',
[u'create', u'read', u'delete', u'control'],
[u'tag', u'read', u'untag', 'tcontrol']),
u'namespace': PermissionDesc(u'namespace', u'namespaces',
[u'create', u'update', u'delete', u'list',
u'control'],
[u'create', u'metadata', u'delete', u'read',
u'control'])
}
RAW_PERM_ENTITIES = RAW_PERMS.keys()
READ_NAMES = [u'read']
WRITE_NAMES = [u'create', u'metadata', u'tag', u'untag', u'delete']
CONTROL_NAMES = [u'acontrol', u'tcontrol', u'control']
ALL_NAMES = READ_NAMES + WRITE_NAMES + CONTROL_NAMES
class UnixPerm:
def __init__(self, s):
n = ord(s) - ord(u'0')
if not 0 <= n <= 7:
raise PermissionsError(u'Permissions string must have form ddd '
u'with each d in range 0-7')
self.read = n & 4 == 4
self.write = n & 2 == 2
self.control = n & 1 == 1
def __unicode__(self):
return u'%s%s%s' % (u'r' if self.read else u'-',
u'w' if self.write else u'-',
u'c' if self.control else u'-')
class UnixPerms:
def __init__(self, spec, user, isTag=None):
self.user = user
self.owner = UnixPerm(spec[0])
self.group = UnixPerm(spec[1])
self.world = UnixPerm(spec[2])
self.read = self.fi_perm(self.world.read, self.owner.read, isTag)
self.write = self.fi_perm(self.world.write, self.owner.write, isTag)
self.control = self.fi_perm(self.world.control, self.owner.control,
isTag)
def fi_perm(self, w, o, isTag):
policy = u'open' if w else u'closed'
if policy == u'closed':
exceptions = [self.user] if o else []
else:
exceptions = [] if o else [self.user]
return FluidinfoPerm(self.user, policy, exceptions, isTag)
def check_owner_control_ok(self):
if not self.owner.control:
raise WisdomFailure(u'I\'m reluctant to all you to remove your '
u'own control permission')
def __unicode__(self):
return u'%s%s%s' % (unicode(self.owner), unicode(self.group),
unicode(self.world))
class FluidinfoPerm:
def __init__(self, owner, policy=None, exceptions=None, hash=None,
name=None, action=None, isTag=None, isPolicy=False):
self.owner = owner
self.name = name
self.action = action
self.isTag = isTag
self.isPolicy = isPolicy
if hash:
self.policy=hash[u'policy']
self.exceptions=hash[u'exceptions']
else:
self.policy = policy
self.exceptions = exceptions
def accessible(self, code, user):
return (code if self.policy == u'open' or user in self.exceptions
else u'-')
def isOpen(self, code):
return code if self.policy == u'open' else u'-'
def __unicode__(self):
return u'policy: %s; exceptions = [%s]' % (self.policy,
u', '.join(self.exceptions))
class FluidinfoPerms:
def __init__(self, db, path, isTag, getFromFI=True, isPolicy=False):
self.isTag = isTag
self.isPolicy = isPolicy
self.path = path
self.valid = True
self.entities = [u'abstract-tag', u'tag'] if isTag else [u'namespace']
self.compressIfPoss = True # used by ls -L / -G
self.owner = path.split(u'/')[1]
for entity in self.entities:
desc = RAW_PERMS[entity]
for (action, name) in zip(desc.actions, desc.names):
if getFromFI:
if isPolicy:
p = db.get_raw_policy(entity, path[1:], action)
else:
p = db.get_raw_perm(entity, path[1:], action, isTag)
self.__dict__[name] = p
if type(p) in (int, long):
self.value = False
else:
# figure out defaults etc.
policy = u'open' if name in READ_NAMES else u'closed'
exceptions = [] if name in READ_NAMES else [self.owner]
self.__dict__[name] = FluidinfoPerm(self.owner, policy,
exceptions, None,
name, action, isTag,
isPolicy)
def set_to_private(self):
for name in ALL_NAMES:
if name in self.__dict__:
self.__dict__[name].policy = u'closed'
self.__dict__[name].exceptions = [self.owner]
def set_to_default(self):
for name in ALL_NAMES:
if name in self.__dict__:
if name in READ_NAMES:
self.__dict__[name].policy = u'open'
self.__dict__[name].exceptions = []
else:
self.__dict__[name].policy = u'closed'
self.__dict__[name].exceptions = [self.owner]
def set_group_writable(self, group):
fullgroup = group + ([] if self.owner in group else [self.owner])
for name in WRITE_NAMES:
if hasattr(self, name):
self.__dict__[name].policy = u'closed'
self.__dict__[name].exceptions = fullgroup
def set_group_readable(self, group):
fullgroup = group + ([] if self.owner in group else [self.owner])
self.__dict__[u'read'].policy = u'closed'
self.__dict__[u'read'].exceptions = fullgroup
def lock(self):
for name in WRITE_NAMES:
if hasattr(self, name):
self.__dict__[name].policy = u'closed'
self.__dict__[name].exceptions = []
def unlock(self):
for name in WRITE_NAMES:
if hasattr(self, name):
self.__dict__[name].policy = u'closed'
self.__dict__[name].exceptions = [self.owner]
def check_owner_control(self, p):
if ((p.policy == 'open' and self.owner in p.exceptions)
or (p.policy == 'closed' and not self.owner in p.exceptions)):
raise PermissionsError(u'You are attempting to set permissions '
u'that do not include owner control.\n'
u' If you are sure you know what you are '
u'doing, you can force this with -f.')
def update_fluidinfo(self, db, permsToSet=None, force=False,
restrict_to=None, verbose=False):
permsToSet = permsToSet or ALL_NAMES
# check owner has control permissions
if not force:
for name in CONTROL_NAMES:
if hasattr(self, name):
self.check_owner_control(self.__dict__[name])
entities = (u'abstract-tag', u'tag') if self.isTag else (u'namespace',)
for entity in entities:
for name in RAW_PERMS[entity].names:
if name in permsToSet and (not restrict_to
or name in restrict_to):
action = RAW_PERMS[entity].action(name)
if verbose:
db.Print(u'Setting %s %s\'s %s permission to %s '
u'except %s' % (entity, self.path[1:],
action,
self.__dict__[name].policy,
unicode(self.__dict__[name].exceptions)))
err = db.set_raw_perm(entity, self.path[1:], action,
self.__dict__[name].policy,
self.__dict__[name].exceptions)
if err:
db.warning(cli.error_code(err))
def fi_tag_desc(self):
s = [u'']
writes = (u'metadata', u'delete', u'tag', u'untag')
controls = (self.acontrol, self.tcontrol)
wPerms = []
for kind in (u'ABSTRACT TAG', u'TAG'):
desc = RAW_PERMS[u'tag' if kind == u'TAG' else u'abstract-tag']
L = u't' if kind == u'TAG' else u'a'
s.append(kind[-3:] + u' (/%s)' % desc.path)
if kind == u'TAG':
s.append(u' Read')
s.append(u' %-19s %s' % (u'read (read):',
unicode(self.read)))
s.append(u' Write')
for (fi, fdb) in zip(desc.actions, desc.names):
if fdb in writes:
w = unicode(self.__dict__[fdb])
wPerms.append(w)
s.append(u' %-19s %s' % (u'%s (%s):' % (fi, fdb), w))
s.extend([u' Control'])
c = self.tcontrol if kind == u'TAG' else self.acontrol
s.append(u' %-19s %s' % (u'control (%scontrol):' % L,
unicode(c)))
s.append(u'')
if self.compressIfPoss:
if (all(w == wPerms[0] for w in wPerms[1:])
and unicode(self.acontrol) == unicode(self.tcontrol)):
s = [u' read: %s' % unicode(self.read),
u' write: %s' % unicode(wPerms[0]),
u' control: %s' % unicode(self.acontrol),
u'']
return u'\n'.join(s)
def fi_ns_desc(self):
s = [u'']
writes = (u'create', u'metadata', u'delete')
desc = RAW_PERMS[u'namespace']
s.append(u'NAMESPACE (/%s)' % desc.path)
s.append(u' Read')
s.append(u' %-18s %s' % (u'list (read):', unicode(self.read)))
s.append(u' Write')
wPerms = []
for (fi, fdb) in zip(desc.actions, desc.names):
if fdb in writes:
w = unicode(self.__dict__[fdb])
wPerms.append(w)
s.append(u' %-18s %s' % (u'%s (%s):' % (fi, fdb), w))
s.append(u' Control')
s.append(u' %-18s %s' % (u'control (control):', self.control))
if self.compressIfPoss and all(w == wPerms[0] for w in wPerms[1:]):
s = [u' read: %s' % unicode(self.read),
u' write: %s' % unicode(wPerms[0]),
u' control: %s' % unicode(self.control)]
s.append(u'')
return u'\n'.join(s)
def __unicode__(self):
if self.isTag is None:
raise Exception
if not self.valid:
return ' (denied) '
return self.fi_tag_desc() if self.isTag else self.fi_ns_desc()
def string_format(n):
return u'%%-%ds' % n
def len_exc(exceptions, owner):
return len(exceptions) - 1 if owner in exceptions else len(exceptions)
def combined_status(list_):
L = list_[0]
return L if all(ell == L for ell in list_) else u'/'
def to_string_grid(items, pageWidth=78, maxCols=9):
if items == []:
return u''
widest = max([len(s) for s in items])
nCols = min(max(pageWidth / (widest + 1), 1), maxCols)
colWidth = int(math.ceil(float(pageWidth) / nCols)) - 1
fmt = string_format(colWidth)
nItems = len(items)
nRows = int(math.ceil(float(nItems) / nCols))
return u'\n'.join([u' '.join([fmt % items[col * nRows + row]
for col in range(nCols)
if col * nRows + row < nItems])
for row in range(nRows)])
class ExtendedFluidinfo(fishlib.Fluidinfo):
def __init__(self, credentials=None, host=None, debug=False,
encoding=fishlib.DEFAULT_ENCODING, unixStylePaths=None,
saveOut=False, cache=None, webapp=False):
fishlib.Fluidinfo.__init__(self, credentials, host, debug,
encoding, unixStylePaths, saveOut,
cache, webapp)
def list_namespace(self, ns, returnDescription=True,
returnNamespaces=True, returnTags=True):
status, content = self.call(u'GET', u'/namespaces/%s' % ns, None,
returnDescription=returnDescription,
returnNamespaces=returnNamespaces,
returnTags=returnTags)
return content if status == fishlib.STATUS.OK else status
def list_r_namespace(self, rootns):
L = self.list_namespace(rootns, returnDescription=False)
if L == fishlib.STATUS.NOT_FOUND:
return fishlib.STATUS.NOT_FOUND
if type(L) in (int, long):
return {u'namespaceNames': [], u'tagNames': [], u'failures': True}
failures = False
namespaces = [u'%s/%s' % (rootns, space)
for space in L[u'namespaceNames']]
tags = [u'%s/%s' % (rootns, tag) for tag in L[u'tagNames']]
subns = []
subtags = []
for s in namespaces:
L = self.list_namespace(s, returnDescription=False)
if type(L) in (int, long):
failures = True
else:
subns.extend([u'%s/%s' % (s, space)
for space in L[u'namespaceNames']])
subtags.extend([u'%s/%s' % (s, space)
for space in L[u'tagNames']])
namespaces.extend(subns)
tags.extend(subtags)
return {u'namespaceNames': namespaces, u'tagNames': tags,
u'failures': failures}
def rm_r(self, rootns):
L = self.list_r_namespace(rootns)
if L == fishlib.STATUS.NOT_FOUND:
return L
elif L[u'failures']:
return 1 # non-zero failure code
# First delete all the tags & sub-namespaces
for items, fn in ((L[u'tagNames'], self.delete_abstract_tag),
(L[u'namespaceNames'], self.delete_namespace)):
si = [(i.split(u'/'), i) for i in items]
z = [(len(s), i) for (s, i) in si]
z.sort()
z.reverse()
failed = 0
for (n, i) in z:
r = fn(u'/%s' % i)
failed = failed or (r if r != fishlib.STATUS.NO_CONTENT
else 0)
if failed:
return failed
return self.delete_namespace(u'/' + rootns)
def rm_all_tag_values(self, tag):
self.untag_by_query()
def list_sorted_ns(self, ns, long_=False, columns=True, recurse=False,
prnt=False, longer=False, longest=False):
h = self.list_namespace(ns)
return self.list_sorted_nshash(h, ns, long_, columns, recurse,
prnt=prnt, longer=longer,
longest=longest)
def list_sorted_nshash(self, h, ns, long_=False, columns=True,
recurse=False, prnt=False, longer=False,
longest=False):
if type(h) in (int, long):
if h == fishlib.STATUS.UNAUTHORIZED:
return u'Permission denied.'
else:
return u'Error status %s' % h
tagNames = h[u'tagNames']
tagNames.sort()
spaces = h[u'namespaceNames']
spaces.sort()
items = tagNames[:] + [u'%s/' % space for space in spaces]
items.sort()
if items:
fmt = string_format(max([len(item) for item in items]))
if recurse:
self.Print(u'\n%s:' % ns)
if long_ or longer or longest:
res = []
for item in items:
r = self.full_perms(ns + u'/' + fmt % item, longer,
longest=longest)
res.append(r)
if prnt:
self.Print(r)
result = u'\n'.join(res)
elif columns == False:
result = u'\n'.join(items)
if prnt:
self.Print(result)
else:
result = to_string_grid(items)
if prnt:
self.Print(result)
if recurse:
others = u'\n'.join([self.list_sorted_ns(u'%s/%s' % (ns, space),
long_, columns, recurse,
prnt=prnt, longer=longer,
longest=longest)
for space in spaces])
return u'%s:\n%s\n\n%s' % (ns, result, others)
else:
return result
def get_raw_perm(self, entity, name, action, isTag):
assert entity in RAW_PERM_ENTITIES
owner = entity.split(u'/')[0]
perm = RAW_PERMS[entity]
assert action in perm.actions
path = u'/permissions/%s/%s' % (perm.path, name)
status, content = self.call(u'GET', path, None, action=action)
return (FluidinfoPerm(owner, hash=content, name=name, action=action,
isTag=isTag)
if status == fishlib.STATUS.OK else status)
def get_raw_policy(self, entity, owner, action):
assert entity in RAW_PERM_ENTITIES
perm = RAW_PERMS[entity]
assert action in perm.actions
path = u'/policies/%s/%s/%s' % (owner, perm.path, action)
status, content = self.call(u'GET', path, None, action=action)
return (FluidinfoPerm(owner, hash=content, name=owner, action=action,
isPolicy=True)
if status == fishlib.STATUS.OK else status)
def get_tag_perms_hash(self, tag):
h = {}
for entity in [u'abstract-tag', u'tag']:
desc = RAW_PERMS[entity]
for (action, name) in zip(desc.actions, desc.names):
h[name] = self.get_raw_perm(entity, tag, action, True)
return h
def get_ns_perms_hash(self, ns):
h = {}
for entity in [u'namespace']:
desc = RAW_PERMS[entity]
for (action, name) in zip(desc.actions, desc.names):
h[name] = self.get_raw_perm(entity, ns, action, False)
return h
def tag_perms_string(self, tag, group=False):
h = self.get_tag_perms_hash(tag)
s = []
owner = tag.split(u'/')[0]
r = h[u'read']
a, t = h[u'acontrol'], h[u'tcontrol']
writes = (h[u'metadata'], h[u'delete'], h[u'tag'], h[u'untag'])
ownerRead = r.accessible(u'r', owner)
ownerWrites = [w.accessible(u'w', owner) for w in writes]
ownerControl = combined_status((t.accessible(u'c', owner),
a.accessible(u'c', owner)))
worldControlT = t.isOpen(u'c')
worldControlA = a.isOpen(u'c')
worldControl = (worldControlT if worldControlT == worldControlA
else u'/')
worldRead = r.isOpen(u'r')
worldWrites = [w.isOpen(u'w') for w in writes]
ownerWrite = write_status(ownerWrites)
worldWrite = write_status(worldWrites)
groupRead = self.group_perm(u'r', r, owner)
groupWrite = combined_status([self.group_perm(u'w', w, owner)
for w in writes])
groupControl = combined_status([self.group_perm(u'c', c, owner)
for c in (a, t)])
ps = u't%s%s%s%s%s%s%s%s%s' % (ownerRead, ownerWrite, ownerControl,
groupRead, groupWrite, groupControl,
worldRead, worldWrite, worldControl)
if group:
return ps + u' ' + self.group_members(r, writes, owner)
else:
return ps
def group_perm(self, code, perm, owner):
n = len_exc(perm.exceptions, owner)
if perm.policy == u'closed':
return code if n > 0 else u'-'
else:
return u'-' if n > 0 else code
def fi_perm_desc(self, perm):
return (u'policy: %s; exceptions [%s]'
% (perm.policy,
u', '.join(u for u in perm.exceptions)))
def ns_perms_string(self, ns, group=False):
h = self.get_ns_perms_hash(ns)
for k in h:
if type(h[k]) in (int, long):
return ' (denied) '
s = []
owner = ns.split(u'/')[0]
r = h[u'read']
control = h[u'control']
writes = (h[u'create'], h[u'metadata'], h[u'delete'])
ownerRead = r.accessible(u'r', owner)
ownerWrites = [w.accessible(u'w', owner) for w in writes]
worldRead = r.isOpen(u'r')
worldWrites = [w.isOpen(u'w') for w in writes]
ownerWrite = write_status(ownerWrites)
worldWrite = write_status(worldWrites)
ownerControl = control.accessible(u'c', owner)
worldControl = control.isOpen(u'c')
groupRead = self.group_perm(u'r', r, owner)
groupWrite = combined_status([self.group_perm(u'w', w, owner)
for w in writes])
groupControl = self.group_perm(u'c', control, owner)
ps = u'n%s%s%s%s%s%s%s%s%s' % (ownerRead, ownerWrite, ownerControl,
groupRead, groupWrite, groupControl,
worldRead, worldWrite, worldControl)
if group:
return ps + u' ' + self.group_members(r, writes, owner)
else:
return ps
def group_members(self, r, writes, owner):
if r.isOpen(u'r') == u'r':
gr = u''.join((u'-%s' % e) for e in r.exceptions
if not e == owner)
else:
gr = u'+'.join(e for e in r.exceptions if not e == owner)
gw = []
for w in writes:
if w.isOpen(u'w') == u'w':
g = u''.join((u'-%s' % e) for e in w.exceptions
if not e == owner)
else:
g = u'+'.join(e for e in w.exceptions if not e == owner)
gw.append(g)
gw = gw[0] if all(w == gw[0] for w in gw) else u'<variable>'
if gw == u'':
gw = u'(world)'
if gr == u'':
gr = u'(world)'
if gw == gr:
gs = gr
else:
gs = u'r:%s w:%s' % (gr, gw)
return gs
def perms_string(self, tagOrNS, longer=False, group=False, longest=False):
tagOrNS = tagOrNS.strip()
if tagOrNS.endswith(u'/'):
if longer or longest:
p = FluidinfoPerms(self, u'/' + tagOrNS[:-1], isTag=False)
p.compressIfPoss = not longest
return unicode(p)
else:
return self.ns_perms_string(tagOrNS[:-1], group)
else:
if longer or longest:
p = FluidinfoPerms(self, u'/' + tagOrNS, isTag=True)
p.compressIfPoss = not longest
return unicode(p)
else:
try:
return self.tag_perms_string(tagOrNS, group)
except AttributeError:
raise PermissionsError(u'Forbidden (you don\'t appear to'
u' have control permission for %s)' % tagOrNS)
def full_perms(self, tagOrNS, longer, group=False, longest=False):
perms = self.perms_string(tagOrNS, longer, group, longest)
if longer or longest:
return u'\n%s:\n%s' % (tagOrNS, perms)
else:
return u'%s %s' % (perms, tagOrNS)
def set_raw_perm(self, entity, path, action, policy, exceptions):
assert entity in RAW_PERM_ENTITIES
perm = RAW_PERMS[entity]
assert action in perm.actions
assert policy in (u'open', u'closed')
assert not type(exceptions) in types.StringTypes # forgot the []
body = json.dumps({u'policy': policy, u'exceptions': exceptions})
path = u'/permissions/%s/%s' % (perm.path, path)
if self.debug:
self.Print(u'%s %s %s' % (path, body, action))
status, content = self.call(u'PUT', path, body, action=action)
return 0 if status == fishlib.STATUS.NO_CONTENT else status
def write_status(writes):
if all(w == u'w' for w in writes):
return u'w'
else:
return u'-' if all(w == u'-' for w in writes) else u'/'
def execute_ls_command(db, objs, tags, options, credentials, unixPaths=None):
long_ = options.long or options.group or options.longest
if len(tags) == 0:
tags = [(u'/' if db.unixStyle else u'') + db.credentials.username]
for tag in tags:
fulltag = db.abs_tag_path(tag, inPref=True)
if options.namespace or options.ns:
if db.ns_exists(fulltag):
if long_ or options.longer or options.longest:
nsResult = db.full_perms(fulltag[1:] + u'/',
options.longer, options.group,
options.longest)
else:
nsResult = fulltag
db.Print(nsResult)
else:
nsResult = u'Error status 404'
else:
nsResult = db.list_sorted_ns(fulltag[1:], long_=long_,
recurse=options.recurse, prnt=True,
longer=options.longer,
longest=options.longest)
tagExists = db.tag_exists(fulltag)
if nsResult == 'Error status 404':
if not tagExists:
db.Print(u'%s not found' % fulltag)
if tagExists:
if long_ or options.longer or options.longest:
db.Print(db.full_perms(fulltag[1:], options.longer,
options.group, options.longest))
else:
db.Print(tag)
def execute_rm_command(db, objs, tags, options, credentials, unixPaths=None):
if len(tags) == 0:
raise RemoveError('Remove what?')
failures = []
for tag in tags:
nsDone = tagDone = False
fulltag = db.abs_tag_path(tag, inPref=True)
if db.ns_exists(fulltag):
if options.recurse:
nsResult = db.rm_r(fulltag[1:])
else:
nsResult = db.delete_namespace(fulltag)
if nsResult:
raise RemoveError(fishlib.human_status(nsResult,
db.abs_tag_path(tag, True, True),
{412: '(probably not empty)'}))
nsDone = True
if db.tag_exists(fulltag):
if options.force or options.recurse:
db.untag_by_query(u'has %s' % fulltag[1:],
[fulltag[1:]])
else:
ids = db.query(u'has %s' % fulltag[1:])
if type(ids) in (int, long):
raise RemoveError(fishlib.human_status(ids))
elif len(ids) > 0:
raise RemoveError(u'%s tagged with %s:\n%s'
% (flags.Plural(len(ids),
'object').decode('UTF-8'),
db.abs_tag_path(tag, True, True),
u' Use -f to force removal'))
tagResult = db.delete_abstract_tag(fulltag)
if tagResult:
raise RemoveError(fishlib.human_status(tagResult,
db.abs_tag_path(tag, True, True)))
tagDone = True
if not nsDone and not tagDone:
failures.append(tag)
if failures:
if not options.force:
raise RemoveError(u'No tag or namespace found for: %s'
% u' '.join(failures))
def execute_chmod_command(db, objs, args, options, credentials, unixPaths=None):
db.warning('Use the perms command to change permissions.')
return
def execute_perms_command(db, objs, args, options, credentials, unixPaths=None):
if len(args) < 2:
db.Print(u'Form: perms SPEC list of tags and namespaces')
return
spec = args[0]
primitives = [u'write', u'read', u'control']
if not (spec in [u'private', u'default', u'group', u'group-write',
u'group-read', u'lock', u'unlock'] + primitives):
raise PermissionsError(u'perms must be followed by one of:\n%s'
% u', '.join(spec + primitives))
isGroup = spec.startswith(u'group')
if isGroup:
group = args[1].split(u'+')
if len(args) < 3:
raise PermissionsError(
u'Group form: perms %s list+of+group+members list of tags '
u'and namespaces' % spec)
if spec in primitives:
errorText = (u'%s form: perms %s [open|closed]'
u'[except list+of+group+members list'
u'of tags and namespaces]' % (spec, spec))
if len(args) < 3 or not args[1] in (u'open', u'closed'):
raise PermissionsError(errorText)
policy = args[1]
exceptions = args[3].split(u'+') if args[2] == u'except' else []
fullpaths = (db.abs_tag_path(t, inPref=True)
for t in args[2 + 2 * bool(exceptions):])
if options.extravals:
for v in options.extravals:
assert v in ALL_NAMES
else:
fullpaths = (db.abs_tag_path(t, inPref=True)
for t in args[1 + isGroup:])
for path in fullpaths:
done = False
owner = path.split(u'/')[1]
permsToSet = ALL_NAMES
for (exists, isTag) in ((db.tag_exists, True), (db.ns_exists, False)):
if exists(path):
inPerms = FluidinfoPerms(db, path, isTag=isTag,
getFromFI=False)
if spec == u'private':
inPerms.set_to_private()
elif spec == u'default':
inPerms.set_to_default()
elif spec == u'lock':
inPerms = FluidinfoPerms(db, path, isTag=isTag,
getFromFI=True)
inPerms.lock()
elif spec == u'unlock':
inPerms = FluidinfoPerms(db, path, isTag=isTag,
getFromFI=True)
inPerms.unlock()
elif isGroup:
inPerms = FluidinfoPerms(db, path, isTag=isTag,
getFromFI=True)
if spec in (u'group', u'group-read'):
inPerms.set_group_readable(group)
if spec in (u'group', u'group-write'):
inPerms.set_group_writable(group)
else: # if spec in primitives:
if spec == u'control':
permsToSet = CONTROL_NAMES
elif spec == u'write':
permsToSet = WRITE_NAMES
else:
permsToSet = READ_NAMES
for name in permsToSet:
if hasattr(inPerms, name):
inPerms.__dict__[name].policy = policy
inPerms.__dict__[name].exceptions = exceptions
inPerms.update_fluidinfo(db, permsToSet, options.force,
restrict_to=options.extravals,
verbose=options.verbose)
done = True
if not done:
db.Print('No tag or namespace %s found' % db.abs_tag_path(path,
outPref=True))
|
|
# -*- coding: utf-8 -*-
import sys
from prody import *
import traceback
__name__ = "Models"
class Serializer():
def to_json(self,o):
return str(self)
class Journal(Serializer):
def __init__(self):
self.publisher = ""
self.reference = ""
self.title = ""
self.issn = ""
self.editors = []
self.authors = []
self.pmid = ""
def __repr__(self):
return "{'publisher':%r, 'reference':%r, 'title':%r, 'issn':%r, 'pmid':%r, 'editors':[%s], 'authors':[%s]}" % (self.publisher, self.reference, self.title, self.issn, self.pmid, ','.join(str(a) for a in self.editors), ','.join(str(a) for a in self.authors))
class PdbChemical(Serializer):
def __init__(self):
self.residueName = ""
self.chain = ""
self.residueNumber = 0
self.insertionCode = ""
self.numberOfAtoms = 0
self.description = ""
self.name = ""
self.synonyms = []
self.formula = ""
self.pdbentry = ""
def __repr__(self):
return "{'residueName':%r, 'chain':%r, 'residueNumber':%d, 'insertionCode':%r, 'numberOfAtoms':%d, 'description':%r, 'name':%r', 'pdbentry': %r, 'formula':%r, synonyms':[%s]}" % (self.residueName, self.chain, self.residueNumber, self.insertionCode, self.numberOfAtoms, self.description, self.name, self.pdbentry, self.formula, ','.join(str(a) for a in self.synonyms))
class Compound(Serializer):
def __init__(self):
self.name = ""
self.fragment = ""
self.engineered = False
self.mutation = False
self.chid = ""
self.synonyms = []
self.ec = []
self.seqres = ""
self.pdbentry = ""
self.dbrefs = []
self.seqmod = []
def __repr__(self):
return "{'name':%r, 'fragment':%r, 'engineered':%s, 'mutation':%s, 'chid':%r, 'seqres':%r, 'pdbentry': %r, 'synonyms':[%s], 'ec':[%s], 'dbrefs':[%s], 'seqmod':[%s]}" % (self.name, self.fragment, self.engineered, self.mutation, self.chid, self.seqres, self.pdbentry, ','.join(str(a) for a in self.synonyms),','.join(str(a) for a in self.ec),','.join(str(a) for a in self.dbrefs),','.join(str(a) for a in self.seqmod))
class DbRef(Serializer):
def __init__(self):
self.accession = ""
self.database = ""
self.dbabbr = ""
self.idcode = ""
self.first = ()
self.last = ()
self.diff = []
def __repr__(self):
return "{'accession':%r, 'database':%r, 'dbabbr':%r, 'idcode':%r, 'first':(%s), 'last':(%s), 'diff':[%s]}" % (self.accession, self.database, self.dbabbr, self.idcode, ','.join(str(a) for a in self.first),','.join(str(a) for a in self.last),','.join(str(a) for a in self.diff))
class PdbAtom(Serializer):
def __init__(self):
self.serial = 0
self.name = ""
self.altloc = ""
self.resname = ""
self.chid = ""
self.resnum = 0
self.icode = ""
self.coords = []
self.occupancy = 0
self.beta = 0
self.element = ""
self.charge = ""
self.rectype = ""
self.acsindex = 0
def __repr__(self):
return "{'serial':%d, 'name':%r, 'altloc':%r, 'resname':%r, 'chid':%r, 'resnum':%d, 'icode':%r, 'occupancy': %d, 'beta':%d, 'element':%r, 'charge':%r, 'rectype':%r, 'acsindex':%d, 'coords':[%s]}" % (self.serial, self.name, self.altloc, self.resname, self.chid, self.resnum, self.icode, self.occupancy, self.beta, self.element, self.charge, self.rectype, self.acsindex, ','.join(str(a) for a in self.coords))
class PdbInfo(Serializer):
def __init__(self):
self.status = "Created"
self.identifier = ""
self.version = ""
self.classification = ""
self.deposition_date = ""
self.title = ""
self.resolution = 0
self.experiment = ""
self.space_group = ""
self.authors = []
self.journal = Journal()
self.chemicals = []
self.compounds = []
self.atomcoords = []
def __repr__(self):
return "{'identifier':%r, 'version':%r, 'classification':%r, 'deposition_date':%r, 'title':%r, 'resolution':%d, 'experiment':%r, 'space_group': %r, 'journal':%s, 'authors':[%s], 'chemicals':[%s], 'compounds':[%s], 'atomcoords':[%s]}" % (self.identifier, self.version, self.classification, self.deposition_date, self.title, self.resolution, self.experiment, self.space_group, self.journal, ','.join(str(a) for a in self.authors),','.join(str(a) for a in self.chemicals),','.join(str(a) for a in self.compounds),','.join(str(a) for a in self.atomcoords))
def isCreated(self):
return (self.status == "Created")
def importData(self, atoms, header):
#self.status = 'Not Imported'
try:
if self.isCreated() and 'identifier' in header:
self.identifier = header["identifier"]
self.version = header["version"]
self.classification = header["classification"]
self.deposition_date = header["deposition_date"]
self.title = header["title"]
self.resolution = header["resolution"]
self.experiment = header["experiment"]
self.space_group = header["space_group"]
if header["authors"]:
self.authors.extend(header["authors"])
self.journal.publisher = header["reference"]["publisher"]
self.journal.reference = header["reference"]["reference"]
self.journal.title = header["reference"]["title"]
if header["reference"]["editors"]:
self.journal.editors.extend(header["reference"]["editors"])
if header["reference"]["authors"]:
self.journal.authors.extend(header["reference"]["authors"])
if 'issn' in header["reference"]:
self.journal.issn = header["reference"]["issn"]
if "pmid" in header["reference"]:
self.journal.pmid = header["reference"]["pmid"]
if header["chemicals"]:
for chem_item in header["chemicals"]:
item = PdbChemical()
item.residueName = chem_item.resname
item.residueNumber = chem_item.resnum
item.name = chem_item.name
item.chain = chem_item.chain
item.insertionCode = chem_item.icode
item.numberOfAtoms = chem_item.natoms
item.description = chem_item.description
if chem_item.synonyms:
item.synonyms.extend(chem_item.synonyms)
item.formula = chem_item.formula
item.pdbentry = chem_item.pdbentry
self.chemicals.append(item)
if header["polymers"]:
for pol_item in header["polymers"]:
itemC = Compound()
itemC.chid = pol_item.chid
itemC.fragment = pol_item.fragment
itemC.name = pol_item.name
itemC.pdbentry = pol_item.pdbentry
itemC.seqres = pol_item.sequence
if pol_item.synonyms:
itemC.synonyms.extend(pol_item.synonyms)
if pol_item.modified:
itemC.seqmod.extend(pol_item.modified)
itemC.engineered = pol_item.engineered
itemC.mutation = pol_item.mutation
if pol_item.ec:
itemC.ec.extend(pol_item.ec)
if pol_item.dbrefs:
for db_item in pol_item.dbrefs:
itemD = DbRef()
itemD.accession = db_item.accession
itemD.database = db_item.database
itemD.dbabbr = db_item.dbabbr
itemD.idcode = db_item.idcode
itemD.first = db_item.first
itemD.last = db_item.last
if db_item.diff:
itemD.diff.extend(db_item.diff)
itemC.dbrefs.append(itemD)
self.compounds.append(itemC)
if atoms:
for itemA in iter(atoms):
at = PdbAtom()
at.acsindex = itemA.getACSIndex()
at.serial = itemA.getSerial()
at.name = itemA.getName()
at.altloc = itemA.getAltloc()
at.resname = itemA.getResname()
at.chid = itemA.getChid()
at.resnum = itemA.getResnum()
at.icode = itemA.getIcode()
at.coords = itemA.getCoords()
at.occupancy = itemA.getOccupancy()
at.beta = itemA.getBeta()
at.element = itemA.getElement()
at.charge = itemA.getCharge()
if at.resname in ('NAD','HOH'):
at.rectype = 'HETATM'
else:
at.rectype = 'ATOM'
self.atomcoords.append(at)
self.status = "Imported"
except:
self.status = 'Not Imported'
print(traceback.format_exc())
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
These tests assume that the quickstart database exists.
To create it run /opt/nuodb/run-quickstart or use the web console.
"""
import unittest
import decimal
from .nuodb_base import NuoBase
class NuoDBStatementManagementTest(NuoBase):
def test_stable_statement(self):
con = self._connect()
cursor = con.cursor()
init_handle = extract_statement_handle(cursor)
cursor.execute("drop table typetest if exists")
try:
cursor.execute("create table typetest (id integer GENERATED ALWAYS AS IDENTITY, smallint_col smallint, "
"integer_col integer, bigint_col bigint, numeric_col numeric(10, 2), "
"decimal_col decimal(10, 2), number_col number, double_col double precision)")
cursor.execute("insert into typetest (smallint_col, integer_col, bigint_col, numeric_col, decimal_col, "
"number_col, double_col) values (0, 0, 0, 0, 0, 0, 0)")
con.commit()
cursor.execute("select * from typetest order by id desc limit 1")
row = cursor.fetchone()
for i in range(1, len(row)):
self.assertEqual(row[i], 0)
current_handle = extract_statement_handle(cursor)
self.assertEqual(init_handle, current_handle)
finally:
try:
cursor.execute("drop table typetest if exists")
finally:
con.close()
def test_statement_per_cursor(self):
con = self._connect()
try:
cursor1 = con.cursor()
cursor2 = con.cursor()
cursor3 = con.cursor()
self.assertNotEqual(extract_statement_handle(cursor1), extract_statement_handle(cursor2))
self.assertNotEqual(extract_statement_handle(cursor2), extract_statement_handle(cursor3))
self.assertNotEqual(extract_statement_handle(cursor1), extract_statement_handle(cursor3))
finally:
con.close()
def test_prepared_statement_cache(self):
con = self._connect()
cursor = con.cursor()
cursor.execute("drop table typetest if exists")
try:
cursor.execute("create table typetest (id integer GENERATED ALWAYS AS IDENTITY, smallint_col smallint, "
"integer_col integer, bigint_col bigint, numeric_col numeric(10, 2), "
"decimal_col decimal(10, 2), number_col number, double_col double)")
test_vals = (3424, 23453464, 45453453454545, decimal.Decimal('234355.33'), decimal.Decimal('976.2'),
decimal.Decimal('34524584057.3434234'), 10000.999)
query = "insert into typetest (smallint_col, integer_col, bigint_col, numeric_col, decimal_col, " \
"number_col, double_col) values (?, ?, ?, ?, ?, ?, ?)"
cursor.execute(query, test_vals)
cursor.execute("select * from typetest order by id desc limit 1")
row = cursor.fetchone()
for i in range(1, len(row)):
self.assertEqual(row[i], test_vals[i - 1])
ps_cache = extract_prepared_statement_dict(cursor)
self.assertEqual(1, len(ps_cache))
self.assertIn(query, ps_cache)
finally:
try:
cursor.execute("drop table typetest if exists")
finally:
con.close()
def test_prepared_statement_cache_should_not_grow(self):
con = self._connect()
cursor = con.cursor()
cursor.execute("drop table typetest if exists")
try:
cursor.execute("create table typetest (id integer GENERATED ALWAYS AS IDENTITY, smallint_col smallint, "
"integer_col integer, bigint_col bigint, numeric_col numeric(10, 2), "
"decimal_col decimal(10, 2), number_col number, double_col double)")
test_vals = (3424, 23453464, 45453453454545, decimal.Decimal('234355.33'), decimal.Decimal('976.2'),
decimal.Decimal('34524584057.3434234'), 10000.999)
query = "insert into typetest (smallint_col, integer_col, bigint_col, numeric_col, decimal_col, " \
"number_col, double_col) values (?, ?, ?, ?, ?, ?, ?)"
for _ in range(0, 20):
cursor.execute(query, test_vals)
cursor.execute("select * from typetest order by id desc limit 1")
row = cursor.fetchone()
for i in range(1, len(row)):
self.assertEqual(row[i], test_vals[i - 1])
ps_cache = extract_prepared_statement_dict(cursor)
self.assertEqual(1, len(ps_cache))
self.assertIn(query, ps_cache)
finally:
try:
cursor.execute("drop table typetest if exists")
finally:
con.close()
def test_prepared_statement_cache_stable(self):
con = self._connect()
cursor = con.cursor()
cursor.execute("drop table typetest if exists")
try:
cursor.execute("create table typetest (id integer GENERATED ALWAYS AS IDENTITY, smallint_col smallint, "
"integer_col integer, bigint_col bigint, numeric_col numeric(10, 2), "
"decimal_col decimal(10, 2), number_col number, double_col double)")
test_vals = (3424, 23453464, 45453453454545, decimal.Decimal('234355.33'), decimal.Decimal('976.2'),
decimal.Decimal('34524584057.3434234'), 10000.999)
query = "insert into typetest (smallint_col, integer_col, bigint_col, numeric_col, decimal_col, " \
"number_col, double_col) values (?, ?, ?, ?, ?, ?, ?)"
handle = None
for _ in range(0, 20):
cursor.execute(query, test_vals)
cursor.execute("select * from typetest order by id desc limit 1")
row = cursor.fetchone()
for i in range(1, len(row)):
self.assertEqual(row[i], test_vals[i - 1])
ps_cache = extract_prepared_statement_dict(cursor)
self.assertEqual(1, len(ps_cache))
self.assertIn(query, ps_cache)
if handle is None:
handle = ps_cache[query].handle
else:
self.assertEqual(handle, ps_cache[query].handle)
finally:
try:
cursor.execute("drop table typetest if exists")
finally:
con.close()
def test_prepared_statement_cache_should_grow(self):
con = self._connect()
cursor = con.cursor()
cursor.execute("drop table typetest if exists")
try:
cursor.execute("create table typetest (id integer GENERATED ALWAYS AS IDENTITY, smallint_col smallint, "
"integer_col integer, bigint_col bigint, numeric_col numeric(10, 2), "
"decimal_col decimal(10, 2), number_col number, double_col double)")
test_vals = (3424, 23453464, 45453453454545, decimal.Decimal('234355.33'), decimal.Decimal('976.2'),
decimal.Decimal('34524584057.3434234'), 10000.999)
queries = ["insert into typetest (smallint_col, integer_col, bigint_col, numeric_col, decimal_col, "
"number_col, double_col) values (?, ?, ?, ?, ?, ?, ?)",
"insert into typetest (smallint_col, integer_col, bigint_col, numeric_col, decimal_col, "
"number_col) values (?, ?, ?, ?, ?, ?)",
"insert into typetest (smallint_col, integer_col, bigint_col, numeric_col, decimal_col) values "
"(?, ?, ?, ?, ?)",
"insert into typetest (smallint_col, integer_col, bigint_col, numeric_col) values (?, ?, ?, ?)",
"insert into typetest (smallint_col, integer_col, bigint_col) values (?, ?, ?)",
"insert into typetest (smallint_col, integer_col) values (?, ?)",
"insert into typetest (smallint_col) values (?)"]
for _ in range(0, 10):
for i in range(0, len(queries)):
cursor.execute(queries[i], test_vals[0:len(queries) - i])
ps_cache = extract_prepared_statement_dict(cursor)
self.assertEqual(len(queries), len(ps_cache))
for query in queries:
self.assertIn(query, ps_cache)
finally:
try:
cursor.execute("drop table typetest if exists")
finally:
con.close()
def test_prepared_statement_cache_eviction(self):
con = self._connect()
cache_size = 5
cursor = con.cursor(prepared_statement_cache_size=cache_size)
cursor.execute("drop table typetest if exists")
try:
cursor.execute("create table typetest (id integer GENERATED ALWAYS AS IDENTITY, smallint_col smallint, "
"integer_col integer, bigint_col bigint, numeric_col numeric(10, 2), "
"decimal_col decimal(10, 2), number_col number, double_col double)")
test_vals = (3424, 23453464, 45453453454545, decimal.Decimal('234355.33'), decimal.Decimal('976.2'),
decimal.Decimal('34524584057.3434234'), 10000.999)
queries = ["insert into typetest (smallint_col, integer_col, bigint_col, numeric_col, decimal_col, "
"number_col, double_col) values (?, ?, ?, ?, ?, ?, ?)",
"insert into typetest (smallint_col, integer_col, bigint_col, numeric_col, decimal_col, "
"number_col) values (?, ?, ?, ?, ?, ?)",
"insert into typetest (smallint_col, integer_col, bigint_col, numeric_col, decimal_col) values "
"(?, ?, ?, ?, ?)",
"insert into typetest (smallint_col, integer_col, bigint_col, numeric_col) values (?, ?, ?, ?)",
"insert into typetest (smallint_col, integer_col, bigint_col) values (?, ?, ?)",
"insert into typetest (smallint_col, integer_col) values (?, ?)",
"insert into typetest (smallint_col) values (?)"]
for i in range(0, len(queries)):
cursor.execute(queries[i], test_vals[0:len(queries) - i])
ps_cache = extract_prepared_statement_dict(cursor)
self.assertEqual(cache_size, len(ps_cache))
for query in queries[len(queries) - cache_size:]:
self.assertIn(query, ps_cache)
finally:
try:
cursor.execute("drop table typetest if exists")
finally:
con.close()
def test_prepared_statement_cache_eviction_lru(self):
con = self._connect()
cache_size = 5
cursor = con.cursor(prepared_statement_cache_size=cache_size)
cursor.execute("drop table typetest if exists")
try:
cursor.execute("create table typetest (id integer GENERATED ALWAYS AS IDENTITY, smallint_col smallint, "
"integer_col integer, bigint_col bigint, numeric_col numeric(10, 2), "
"decimal_col decimal(10, 2), number_col number, double_col double)")
test_vals = (3424, 23453464, 45453453454545, decimal.Decimal('234355.33'), decimal.Decimal('976.2'),
decimal.Decimal('34524584057.3434234'), 10000.999)
queries = ["insert into typetest (smallint_col, integer_col, bigint_col, numeric_col, decimal_col, "
"number_col, double_col) values (?, ?, ?, ?, ?, ?, ?)",
"insert into typetest (smallint_col, integer_col, bigint_col, numeric_col, decimal_col, "
"number_col) values (?, ?, ?, ?, ?, ?)",
"insert into typetest (smallint_col, integer_col, bigint_col, numeric_col, decimal_col) values "
"(?, ?, ?, ?, ?)",
"insert into typetest (smallint_col, integer_col, bigint_col, numeric_col) values (?, ?, ?, ?)",
"insert into typetest (smallint_col, integer_col, bigint_col) values (?, ?, ?)",
"insert into typetest (smallint_col, integer_col) values (?, ?)",
"insert into typetest (smallint_col) values (?)"]
query_order = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 5, 5, 3, 1, 4, 6, 1, 5, 4, 5, 2, 3, 1, 5, 6, 4, 3,
6, 1, 5, 6, 1, 6, 3, 1, 2, 1, 1]
for i in query_order:
cursor.execute(queries[i], test_vals[0:len(queries) - i])
ps_cache = extract_prepared_statement_dict(cursor)
self.assertEqual(cache_size, len(ps_cache))
for query in [queries[1], queries[2], queries[3], queries[5], queries[6]]:
self.assertIn(query, ps_cache)
for query in [queries[0], queries[4]]:
self.assertNotIn(query, ps_cache)
finally:
try:
cursor.execute("drop table typetest if exists")
finally:
con.close()
def extract_statement_handle(cursor):
return cursor._statement_cache._statement.handle
def extract_prepared_statement_dict(cursor):
return cursor._statement_cache._ps_cache
if __name__ == '__main__':
unittest.main()
|
|
# Word2Vec: Skipgram Model
#---------------------------------------
#
# In this example, we will download and preprocess the movie
# review data.
#
# From this data set we will compute/fit the skipgram model of
# the Word2Vec Algorithm
#
# Skipgram: based on predicting the surrounding words from the
# Ex sentence "the cat in the hat"
# context word: ["hat"]
# target words: ["the", "cat", "in", "the"]
# context-target pairs:
# ("hat", "the"), ("hat", "cat"), ("hat", "in"), ("hat", "the")
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
import random
import os
import string
import requests
import collections
import io
import tarfile
import urllib.request
from nltk.corpus import stopwords
from tensorflow.python.framework import ops
ops.reset_default_graph()
os.chdir(os.path.dirname(os.path.realpath(__file__)))
# Start a graph session
sess = tf.Session()
# Declare model parameters
batch_size = 50
embedding_size = 200
vocabulary_size = 10000
generations = 50000
print_loss_every = 500
num_sampled = int(batch_size/2) # Number of negative examples to sample.
window_size = 2 # How many words to consider left and right.
# Declare stop words
stops = stopwords.words('english')
# We pick five test words. We are expecting synonyms to appear
print_valid_every = 2000
valid_words = ['cliche', 'love', 'hate', 'silly', 'sad']
# Later we will have to transform these into indices
# Load the movie review data
# Check if data was downloaded, otherwise download it and save for future use
def load_movie_data():
save_folder_name = 'temp'
pos_file = os.path.join(save_folder_name, 'rt-polarity.pos')
neg_file = os.path.join(save_folder_name, 'rt-polarity.neg')
# Check if files are already downloaded
if os.path.exists(save_folder_name):
pos_data = []
with open(pos_file, 'r') as temp_pos_file:
for row in temp_pos_file:
pos_data.append(row)
neg_data = []
with open(neg_file, 'r') as temp_neg_file:
for row in temp_neg_file:
neg_data.append(row)
else: # If not downloaded, download and save
movie_data_url = 'http://www.cs.cornell.edu/people/pabo/movie-review-data/rt-polaritydata.tar.gz'
stream_data = urllib.request.urlopen(movie_data_url)
tmp = io.BytesIO()
while True:
s = stream_data.read(16384)
if not s:
break
tmp.write(s)
stream_data.close()
tmp.seek(0)
tar_file = tarfile.open(fileobj=tmp, mode="r:gz")
pos = tar_file.extractfile('rt-polaritydata/rt-polarity.pos')
neg = tar_file.extractfile('rt-polaritydata/rt-polarity.neg')
# Save pos/neg reviews
pos_data = []
for line in pos:
pos_data.append(line.decode('ISO-8859-1').encode('ascii',errors='ignore').decode())
neg_data = []
for line in neg:
neg_data.append(line.decode('ISO-8859-1').encode('ascii',errors='ignore').decode())
tar_file.close()
# Write to file
if not os.path.exists(save_folder_name):
os.makedirs(save_folder_name)
# Save files
with open(pos_file, "w") as pos_file_handler:
pos_file_handler.write(''.join(pos_data))
with open(neg_file, "w") as neg_file_handler:
neg_file_handler.write(''.join(neg_data))
texts = pos_data + neg_data
target = [1]*len(pos_data) + [0]*len(neg_data)
return(texts, target)
texts, target = load_movie_data()
# Normalize text
def normalize_text(texts, stops):
# Lower case
texts = [x.lower() for x in texts]
# Remove punctuation
texts = [''.join(c for c in x if c not in string.punctuation) for x in texts]
# Remove numbers
texts = [''.join(c for c in x if c not in '0123456789') for x in texts]
# Remove stopwords
texts = [' '.join([word for word in x.split() if word not in (stops)]) for x in texts]
# Trim extra whitespace
texts = [' '.join(x.split()) for x in texts]
return(texts)
texts = normalize_text(texts, stops)
# Texts must contain at least 3 words
target = [target[ix] for ix, x in enumerate(texts) if len(x.split()) > 2]
texts = [x for x in texts if len(x.split()) > 2]
# Build dictionary of words
def build_dictionary(sentences, vocabulary_size):
# Turn sentences (list of strings) into lists of words
split_sentences = [s.split() for s in sentences]
words = [x for sublist in split_sentences for x in sublist]
# Initialize list of [word, word_count] for each word, starting with unknown
count = [['RARE', -1]]
# Now add most frequent words, limited to the N-most frequent (N=vocabulary size)
count.extend(collections.Counter(words).most_common(vocabulary_size-1))
# Now create the dictionary
word_dict = {}
# For each word, that we want in the dictionary, add it, then make it
# the value of the prior dictionary length
for word, word_count in count:
word_dict[word] = len(word_dict)
return(word_dict)
# Turn text data into lists of integers from dictionary
def text_to_numbers(sentences, word_dict):
# Initialize the returned data
data = []
for sentence in sentences:
sentence_data = []
# For each word, either use selected index or rare word index
for word in sentence:
if word in word_dict:
word_ix = word_dict[word]
else:
word_ix = 0
sentence_data.append(word_ix)
data.append(sentence_data)
return(data)
# Build our data set and dictionaries
word_dictionary = build_dictionary(texts, vocabulary_size)
word_dictionary_rev = dict(zip(word_dictionary.values(), word_dictionary.keys()))
text_data = text_to_numbers(texts, word_dictionary)
# Get validation word keys
valid_examples = [word_dictionary[x] for x in valid_words]
# Generate data randomly (N words behind, target, N words ahead)
def generate_batch_data(sentences, batch_size, window_size, method='skip_gram'):
# Fill up data batch
batch_data = []
label_data = []
while len(batch_data) < batch_size:
# select random sentence to start
rand_sentence = np.random.choice(sentences)
# Generate consecutive windows to look at
window_sequences = [rand_sentence[max((ix-window_size),0):(ix+window_size+1)] for ix, x in enumerate(rand_sentence)]
# Denote which element of each window is the center word of interest
label_indices = [ix if ix<window_size else window_size for ix,x in enumerate(window_sequences)]
# Pull out center word of interest for each window and create a tuple for each window
if method=='skip_gram':
batch_and_labels = [(x[y], x[:y] + x[(y+1):]) for x,y in zip(window_sequences, label_indices)]
# Make it in to a big list of tuples (target word, surrounding word)
tuple_data = [(x, y_) for x,y in batch_and_labels for y_ in y]
elif method=='cbow':
batch_and_labels = [(x[:y] + x[(y+1):], x[y]) for x,y in zip(window_sequences, label_indices)]
# Make it in to a big list of tuples (target word, surrounding word)
tuple_data = [(x_, y) for x,y in batch_and_labels for x_ in x]
else:
raise ValueError('Method {} not implmented yet.'.format(method))
# extract batch and labels
batch, labels = [list(x) for x in zip(*tuple_data)]
batch_data.extend(batch[:batch_size])
label_data.extend(labels[:batch_size])
# Trim batch and label at the end
batch_data = batch_data[:batch_size]
label_data = label_data[:batch_size]
# Convert to numpy array
batch_data = np.array(batch_data)
label_data = np.transpose(np.array([label_data]))
return(batch_data, label_data)
# Define Embeddings:
embeddings = tf.Variable(tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
# NCE loss parameters
nce_weights = tf.Variable(tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / np.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Create data/target placeholders
x_inputs = tf.placeholder(tf.int32, shape=[batch_size])
y_target = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Lookup the word embedding:
embed = tf.nn.embedding_lookup(embeddings, x_inputs)
# Get loss from prediction
loss = tf.reduce_mean(tf.nn.nce_loss(nce_weights, nce_biases, embed, y_target,
num_sampled, vocabulary_size))
# Create optimizer
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0).minimize(loss)
# Cosine similarity between words
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
#Add variable initializer.
init = tf.initialize_all_variables()
sess.run(init)
# Run the skip gram model.
loss_vec = []
loss_x_vec = []
for i in range(generations):
batch_inputs, batch_labels = generate_batch_data(text_data, batch_size, window_size)
feed_dict = {x_inputs : batch_inputs, y_target : batch_labels}
# Run the train step
sess.run(optimizer, feed_dict=feed_dict)
# Return the loss
if (i+1) % print_loss_every == 0:
loss_val = sess.run(loss, feed_dict=feed_dict)
loss_vec.append(loss_val)
loss_x_vec.append(i+1)
print("Loss at step {} : {}".format(i+1, loss_val))
# Validation: Print some random words and top 5 related words
if (i+1) % print_valid_every == 0:
sim = sess.run(similarity, feed_dict=feed_dict)
for j in range(len(valid_words)):
valid_word = word_dictionary_rev[valid_examples[j]]
top_k = 5 # number of nearest neighbors
nearest = (-sim[j, :]).argsort()[1:top_k+1]
log_str = "Nearest to {}:".format(valid_word)
for k in range(top_k):
close_word = word_dictionary_rev[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
|
|
"""
<Program Name>
test_signercli.py
<Author>
Konstantin Andrianov
<Started>
September 20, 2012
<Copyright>
See LICENSE for licensing information.
<Purpose>
test_signercli.py provides collection of methods that tries to test all the
units (methods) of the module under test.
unittest_toolbox module was created to provide additional testing tools for
tuf's modules. For more info see unittest_toolbox.py.
<Methodology>
Unittests must follow a specific structure i.e. independent methods should
be tested prior to dependent methods. More accurately: least dependent
methods are tested before most dependent methods. There is no reason to
rewrite or construct other methods that replicate already-tested methods
solely for testing purposes. This is possible because 'unittest.TestCase'
class guarantees the order of unit tests. So that, 'test_something_A'
method would be tested before 'test_something_B'. To ensure the structure
a number will be placed after 'test' and before methods name like so:
'test_1_check_directory'. The number is a measure of dependence, where 1
is less dependent than 2.
"""
import os
import tuf.util
import tuf.repo.keystore as keystore
import tuf.repo.signerlib as signerlib
# Module to test: signercli.py
import tuf.repo.signercli as signercli
# Helper module unittest_toolbox.py
import tuf.tests.unittest_toolbox as unittest_toolbox
# Populating 'rsa_keystore' and 'rsa_passwords' dictionaries.
# We will need them in creating keystore directory.
unittest_toolbox.Modified_TestCase.bind_keys_to_roles()
class TestSignercli(unittest_toolbox.Modified_TestCase):
# HELPER METHODS.
# Generic patch for signerlib._prompt().
def mock_prompt(self, output):
# Method to patch signercli._prompt().
def _mock_prompt(junk1, junk2, ret=output):
return ret
# Patch signercli._prompt()
signercli._prompt = _mock_prompt
# Patch signercli._get_metadata_directory()
def mock_get_metadata_directory(self, directory=None):
# Create method to patch signercli._get_metadata_directory()
def _mock_get_meta_dir(directory=directory):
# If directory was specified, return that directory.
if directory:
return directory
# Else create a temporary directory and return it.
else:
return self.make_temp_directory()
# Patch signercli._get_metadata_directory()
signercli._get_metadata_directory = _mock_get_meta_dir
# This method patches signercli._prompt() that are called from
# make_role_metadata methods (e.g., tuf.signercli.make_root_metadata()).
def make_metadata_mock_prompts(self, targ_dir, conf_path):
def _mock_prompt(msg, junk):
if msg.startswith('\nEnter the directory containing the target'):
return targ_dir
elif msg.startswith('\nEnter the configuration file path'):
return conf_path
else:
error_msg = ('Prompt: '+'\''+msg[1:]+'\''+
' did not match any predefined mock prompts.')
self.fail(error_msg)
# Patch signercli._prompt().
signercli._prompt = _mock_prompt
# This mock method can be easily modified, by altering unittest_toolbox's
# dictionaries. For instance, if you want to modify password for certain
# keyid just save the existing 'self.rsa_passwords[keyid]' and set it
# to some other value like self.random_string(), after the test reassign
# the saved value back to 'self.rsa_passwords[keyid]'.
def get_passwords(self):
# Mock '_get_password' method.
def _mock_get_password(msg):
for role in self.role_list:
if msg.startswith('\nEnter the password for the '+role):
for keyid in self.semi_roledict[role]['keyids']:
if msg.endswith(keyid+'): '):
return self.rsa_passwords[keyid]
error_msg = ('Prompt: '+'\''+msg+'\''+
' did not match any predefined mock prompts.')
raise tuf.Error(error_msg)
# Monkey patch '_prompt'.
signercli._get_password = _mock_get_password
# UNIT TESTS.
# If a unit test starts with test_# followed by two underscores,
# (test_#__method) this means that it's an internal method of signercli.
# For instance the unit test for signercli._get_password() would
# look like this: test_1__get_password, whereas unit test for
# signercli.change_password would look like this:
# test_3_change_password().
def test_1__check_directory(self):
# SETUP
directory = self.make_temp_directory()
no_such_dir = self.random_path()
# TESTS
# Test: normal case.
self.assertEqual(signercli._check_directory(directory), directory)
# Test: invalid directory.
self.assertRaises(tuf.RepositoryError, signercli._check_directory,
no_such_dir)
# Test: invalid directory type.
self.assertRaises(tuf.RepositoryError, signercli._check_directory,
[no_such_dir])
self.assertRaises(tuf.RepositoryError, signercli._check_directory,
1234)
self.assertRaises(tuf.RepositoryError, signercli._check_directory,
{'directory':no_such_dir})
def test_1__get_password(self):
# SETUP
password = self.random_string()
def _mock_getpass(junk1, junk2, pw=password):
return pw
# Patch getpass.getpass().
signercli.getpass.getpass = _mock_getpass
# Test: normal case.
self.assertEqual(signercli._get_password(), password)
def test_2__get_metadata_directory(self):
# SETUP
meta_directory = self.make_temp_directory()
self.mock_prompt(meta_directory)
# TESTS
self.assertEqual(signercli._get_metadata_directory(), meta_directory)
self.assertTrue(os.path.exists(signercli._get_metadata_directory()))
self.mock_prompt(self.random_string())
self.assertRaises(tuf.RepositoryError, signercli._get_metadata_directory)
self.mock_prompt([self.random_string()])
self.assertRaises(tuf.RepositoryError, signercli._get_metadata_directory)
def test_1__list_keyids(self):
# SETUP
keystore_dir = self.create_temp_keystore_directory()
# TESTS
# Test: normal case.
try:
signercli._list_keyids(keystore_dir)
except Exception, e:
self.fail(str(e))
def test_2__get_keyids(self):
# SETUP
# Create a temp keystore directory.
keystore_dir = self.create_temp_keystore_directory()
# List of keyids including keyword 'quit'.
keyids = ['quit'] + self.rsa_keyids
# Patching signercli._prompt()
def _mock_prompt(msg, junk):
# Pop 'keyids' everytime signercli._prompt() is called.
keyid = keyids.pop()
if keyid != 'quit':
get_password(keyid)
return keyid
signercli._prompt = _mock_prompt
# Pathching signercli._get_password().
def get_password(keyid):
password = self.rsa_passwords[keyid]
def _mock_get_password(msg):
return password
signercli._get_password = _mock_get_password
# TESTS
# Test: normal case.
try:
loaded_keyids = signercli._get_keyids(keystore_dir)
except Exception, e:
raise
# Check if all the keysids were loaded.
for keyid in self.rsa_keyids:
if keyid not in loaded_keyids:
msg = '\nCould not load the keyid: '+repr(keyid)
self.fail(msg)
# Test: invalid password.
keyids = ['quit', self.rsa_keyids[0]]
saved_pw = self.rsa_passwords[keyid]
# Invalid password
self.rsa_passwords[self.rsa_keyids[0]] = self.random_string()
self.assertEqual(signercli._get_keyids(keystore_dir), [])
# Restore the password.
self.rsa_passwords[self.rsa_keyids[0]] = saved_pw
# Test: invalid keyid.
keyid = self.random_string()
keyids = ['quit', keyid]
# Create an entry in the passwords dictionary.
self.rsa_passwords[keyid] = self.random_string()
self.assertEqual(signercli._get_keyids(keystore_dir), [])
# Restore passwords dictionary.
del self.rsa_passwords[keyid]
def test_2__get_all_config_keyids(self):
# SETUP
# Create temp directory for config file.
config_dir = self.make_temp_directory()
# Build config file.
config_filepath = signerlib.build_config_file(config_dir, 365,
self.semi_roledict)
# Create a temp keystore directory.
keystore_dir = self.create_temp_keystore_directory()
# 'sample_keyid' used to test invalid keyid.
sample_keyid = self.rsa_keyids[0]
# Patch signercli._get_password()
self.get_passwords()
# TESTS
# Test: an incorrect password.
saved_pw = self.rsa_passwords[sample_keyid]
self.rsa_passwords[sample_keyid] = self.random_string()
self.assertRaises(tuf.Error, signercli._get_all_config_keyids,
config_filepath, keystore_dir)
# Restore the password.
self.rsa_passwords[sample_keyid] = saved_pw
# Test: missing top-level role in the config file.
# Clear keystore's dictionaries.
keystore.clear_keystore()
# Remove a role from 'semi_roledict' which is used to construct
# config file.
self.semi_roledict['targets_holder'] = self.semi_roledict['targets']
del self.semi_roledict['targets']
# Build config file without 'targets' role.
config_filepath = signerlib.build_config_file(config_dir, 365,
self.semi_roledict)
self.assertRaises(tuf.Error, signercli._get_all_config_keyids,
config_filepath, keystore_dir)
# Rebuild config file and 'semi_roledict'.
self.semi_roledict['targets'] = self.semi_roledict['targets_holder']
del self.semi_roledict['targets_holder']
config_filepath = signerlib.build_config_file(config_dir, 365,
self.semi_roledict)
# Test: non-existing config file path.
keystore.clear_keystore()
self.assertRaises(tuf.Error, signercli._get_all_config_keyids,
self.random_path(), keystore_dir)
# Test: normal case.
keystore.clear_keystore()
try:
signercli._get_all_config_keyids(config_filepath, keystore_dir)
except Exception, e:
self.fail(str(e))
def test_2__get_role_config_keyids(self):
# SETUP
# Create temp directory for config file.
config_dir = self.make_temp_directory()
# Build a config file.
config_filepath = signerlib.build_config_file(config_dir, 365,
self.semi_roledict)
# Create a temp keystore directory.
keystore_dir = self.create_temp_keystore_directory()
# Patch '_get_password' method.
self.get_passwords()
# TESTS
for role in self.role_list:
# Test: normal cases.
keystore.clear_keystore()
try:
signercli._get_role_config_keyids(config_filepath,
keystore_dir, role)
except Exception, e:
self.fail(str(e))
# Test: incorrect passwords.
keystore.clear_keystore()
role_keyids = self.semi_roledict[role]['keyids']
for keyid in role_keyids:
saved_pw = self.rsa_passwords[keyid]
self.rsa_passwords[keyid] = self.random_string()
self.assertRaises(tuf.Error, signercli._get_role_config_keyids,
config_filepath, keystore_dir, role)
# Restore the password.
self.rsa_passwords[keyid] = saved_pw
# Test: non-existing config file path.
keystore.clear_keystore()
self.assertRaises(tuf.Error, signercli._get_role_config_keyids,
self.random_path(), keystore_dir, 'release')
# Test: non-existing role.
keystore.clear_keystore()
self.assertRaises(tuf.Error, signercli._get_role_config_keyids,
config_filepath, keystore_dir, 'no_such_role')
def test_1__sign_and_write_metadata(self):
# SETUP
# Role to test.
role = 'root'
# Create temp directory.
temp_dir = self.make_temp_directory()
# File name.
filename = os.path.join(temp_dir, role+'.txt')
# Role's keyids.
keyids = self.semi_roledict[role]['keyids']
# Create a temp keystore directory.
keystore_dir =\
self.create_temp_keystore_directory(keystore_dicts=True)
# Create temp directory for config file.
config_dir = self.make_temp_directory()
# Build config file.
config_filepath = signerlib.build_config_file(config_dir, 365,
self.semi_roledict)
# Create role's metadata.
signable_meta = signerlib.generate_root_metadata(config_filepath)
# TESTS
# Test: normal case.
try:
signercli._sign_and_write_metadata(signable_meta, keyids, filename)
except Exception, e:
self.fail(str(e))
# Verify that the root meta file was created.
self.assertTrue(os.path.exists(filename))
Errors = (tuf.Error, tuf.FormatError)
# Test: invalid metadata.
self.assertRaises(Errors, signercli._sign_and_write_metadata,
self.random_string(), keyids, filename)
# Test: invalid keyids
invalid_keyids = self.random_string()
self.assertRaises(Errors, signercli._sign_and_write_metadata,
signable_meta, invalid_keyids, filename)
# Test: invalid filename
self.assertRaises(Errors, signercli._sign_and_write_metadata,
signable_meta, invalid_keyids, True)
def test_2_change_password(self):
# SETUP
test_keyid = self.rsa_keyids[0]
self.mock_prompt(test_keyid)
# Create keystore directory.
keystore_dir = self.create_temp_keystore_directory()
# Specify old password and create a new password.
old_password = self.rsa_passwords[test_keyid]
new_password = self.random_string()
# Mock method for signercli._get_password()
def _mock_get_password(msg, confirm=False, old_pw=old_password,
new_pw=new_password):
if msg.startswith('\nEnter the old password for the keyid: '):
return old_pw
else:
return new_pw
# Patch signercli._get_password.
signercli._get_password = _mock_get_password
# TESTS
# Test: normal case.
try:
signercli.change_password(keystore_dir)
except Exception, e:
self.fail(str(e))
# Verify password change.
self.assertEqual(keystore._key_passwords[test_keyid], new_password)
# Test: non-existing keyid.
keystore.clear_keystore()
self.mock_prompt(self.random_string(15))
self.assertRaises(tuf.RepositoryError, signercli.change_password,
keystore_dir)
# Restore the prompt input to existing keyid.
self.mock_prompt(test_keyid)
# Test: non-existing old password.
keystore.clear_keystore()
old_password = self.random_string()
self.assertRaises(tuf.RepositoryError, signercli.change_password,
keystore_dir)
def test_2_generate_rsa_key(self):
# SETUP
# Method to patch signercli._get_password()
def _mock_get_password(junk, confirm=False):
return self.random_string()
# Patch signercli._get_password()
signercli._get_password = _mock_get_password
# Create a temp keystore directory.
keystore_dir = self.make_temp_directory()
# TESTS
# Test: invalid rsa bits.
self.mock_prompt(1024)
self.assertRaises(tuf.RepositoryError, signercli.generate_rsa_key,
keystore_dir)
# Input appropriate number of rsa bits.
self.mock_prompt(3072)
# Test: normal case.
try:
signercli.generate_rsa_key(keystore_dir)
except Exception, e:
self.fail(str(e))
# Was the key file added to the directory?
self.assertTrue(os.listdir(keystore_dir))
# This method just prints keyids.
def test_3_list_signing_keys(self):
pass
def test_2_dump_key(self):
# SETUP
keyid = self.rsa_keyids[0]
password = self.rsa_passwords[keyid]
show_priv = 'private'
# Mock method for signercli._get_password().
def _mock_get_password(msg):
return password
# Mock method for signercli._prompt().
def _mock_prompt(msg, junk):
if msg.startswith('\nEnter the keyid'):
return keyid
else:
return show_priv
# Patch signercli._get_password().
signercli._get_password = _mock_get_password
# Patch signercli._prompt().
signercli._prompt = _mock_prompt
# Create keystore directory.
keystore_dir = self.create_temp_keystore_directory()
# TESTS
# Test: normal case.
try:
signercli.dump_key(keystore_dir)
except Exception, e:
self.fail(str(e))
# Test: incorrect password.
saved_pw = password
password = self.random_string()
self.assertRaises(tuf.RepositoryError, signercli.dump_key,
keystore_dir)
# Restore the correct password.
password = saved_pw
# Test: non-existing keyid.
keyid = self.random_string()
self.assertRaises(tuf.RepositoryError, signercli.dump_key,
keystore_dir)
keyid = self.rsa_keyids[0]
def test_3_make_root_metadata(self):
# SETUP
# Create temp directory for config file.
config_dir = self.make_temp_directory()
# Build a config file.
config_filepath = signerlib.build_config_file(config_dir, 365,
self.semi_roledict)
# Create a temp metadata directory.
meta_dir = self.make_temp_directory()
# Patch signercli._get_metadata_directory().
self.mock_get_metadata_directory(directory=meta_dir)
# Patch signercli._prompt().
self.mock_prompt(config_filepath)
# Patch signercli._get_password().
self.get_passwords()
# Create keystore directory.
keystore_dir = self.create_temp_keystore_directory()
# TESTS
# Test: normal case.
try:
signercli.make_root_metadata(keystore_dir)
except Exception, e:
self.fail(str(e))
# Verify that the root metadata path was created.
self.assertTrue(os.path.exists(os.path.join(meta_dir, 'root.txt')))
# Test: invalid config path.
# Clear keystore's dictionaries.
keystore.clear_keystore()
# Supply a non-existing path to signercli._prompt().
self.mock_prompt(self.random_path())
self.assertRaises(tuf.RepositoryError, signercli.make_root_metadata,
keystore_dir)
# Re-patch signercli._prompt() with valid config path.
self.mock_prompt(config_filepath)
# Test: incorrect 'root' passwords.
# Clear keystore's dictionaries.
keystore.clear_keystore()
keyids = self.semi_roledict['root']['keyids']
for keyid in keyids:
saved_pw = self.rsa_passwords[keyid]
self.rsa_passwords[keyid] = self.random_string()
self.assertRaises(tuf.RepositoryError, signercli.make_root_metadata,
keystore_dir)
self.rsa_passwords[keyid] = saved_pw
def test_3_make_targets_metadata(self):
# SETUP
# Create a temp repository and metadata directories.
repo_dir = self.make_temp_directory()
meta_dir = self.make_temp_directory(directory=repo_dir)
# Create a directory containing target files.
targets_dir, targets_paths =\
self.make_temp_directory_with_data_files(directory=repo_dir)
# Create temp directory for config file.
config_dir = self.make_temp_directory()
# Build a config file.
config_filepath = signerlib.build_config_file(config_dir, 365,
self.semi_roledict)
# Patch signercli._get_metadata_directory()
self.mock_get_metadata_directory(directory=meta_dir)
# Patch signercli._get_password(). Used in _get_role_config_keyids()
self.get_passwords()
# Create keystore directory.
keystore_dir = self.create_temp_keystore_directory()
# Mock method for signercli._prompt().
self.make_metadata_mock_prompts(targ_dir=targets_dir,
conf_path=config_filepath)
# TESTS
# Test: normal case.
try:
signercli.make_targets_metadata(keystore_dir)
except Exception, e:
self.fail(str(e))
# Verify that targets metadata file was created.
self.assertTrue(os.path.exists(os.path.join(meta_dir, 'targets.txt')))
# Test: invalid targets path.
# Clear keystore's dictionaries.
keystore.clear_keystore()
# Supply a non-existing targets directory.
self.make_metadata_mock_prompts(targ_dir=self.random_path(),
conf_path=config_filepath)
self.assertRaises(tuf.RepositoryError, signercli.make_targets_metadata,
keystore_dir)
# Restore the targets directory.
self.make_metadata_mock_prompts(targ_dir=targets_dir,
conf_path=config_filepath)
# Test: invalid config path.
# Clear keystore's dictionaries.
keystore.clear_keystore()
# Supply a non-existing config path.
self.make_metadata_mock_prompts(targ_dir=targets_dir,
conf_path=self.random_path())
self.assertRaises(tuf.RepositoryError, signercli.make_targets_metadata,
keystore_dir)
# Restore the config file path.
self.make_metadata_mock_prompts(targ_dir=targets_dir,
conf_path=config_filepath)
# Test: incorrect 'targets' passwords.
# Clear keystore's dictionaries.
keystore.clear_keystore()
keyids = self.semi_roledict['targets']['keyids']
for keyid in keyids:
saved_pw = self.rsa_passwords[keyid]
self.rsa_passwords[keyid] = self.random_string()
self.assertRaises(tuf.RepositoryError, signercli.make_targets_metadata,
keystore_dir)
self.rsa_passwords[keyid] = saved_pw
def test_4_make_release_metadata(self):
# In order to build release metadata file (release.txt),
# root and targets metadata files (root.txt, targets.txt)
# must exist in the metadata directory.
# SETUP
# Create temp directory for config file.
config_dir = self.make_temp_directory()
# Build a config file.
config_filepath = signerlib.build_config_file(config_dir, 365,
self.semi_roledict)
# Create a temp repository and metadata directories.
repo_dir = self.make_temp_directory()
meta_dir = self.make_temp_directory(repo_dir)
# Create a directory containing target files.
targets_dir, targets_paths = \
self.make_temp_directory_with_data_files(directory=repo_dir)
# Patch signercli._get_metadata_directory().
self.mock_get_metadata_directory(directory=meta_dir)
# Patch signercli._get_password(). Used in _get_role_config_keyids().
self.get_passwords()
# Create keystore directory.
keystore_dir = self.create_temp_keystore_directory()
# Mock method for signercli._prompt().
self.make_metadata_mock_prompts(targ_dir=targets_dir,
conf_path=config_filepath)
# TESTS
# Test: no root.txt in the metadata dir.
try:
signercli.make_targets_metadata(keystore_dir)
except Exception, e:
self.fail(str(e))
# Verify that 'tuf.RepositoryError' is raised due to a missing root.txt.
keystore.clear_keystore()
self.assertTrue(os.path.exists(os.path.join(meta_dir, 'targets.txt')))
self.assertRaises(tuf.RepositoryError, signercli.make_release_metadata,
keystore_dir)
os.remove(os.path.join(meta_dir,'targets.txt'))
keystore.clear_keystore()
# Test: no targets.txt in the metadatadir.
try:
signercli.make_root_metadata(keystore_dir)
keystore.clear_keystore()
except Exception, e:
self.fail(str(e))
# Verify that 'tuf.RepositoryError' is raised due to a missing targets.txt.
self.assertTrue(os.path.exists(os.path.join(meta_dir, 'root.txt')))
self.assertRaises(tuf.RepositoryError, signercli.make_release_metadata,
keystore_dir)
os.remove(os.path.join(meta_dir,'root.txt'))
keystore.clear_keystore()
# Test: normal case.
try:
signercli.make_root_metadata(keystore_dir)
keystore.clear_keystore()
signercli.make_targets_metadata(keystore_dir)
keystore.clear_keystore()
signercli.make_release_metadata(keystore_dir)
keystore.clear_keystore()
except Exception, e:
self.fail(str(e))
# Verify if the root, targets and release meta files were created.
self.assertTrue(os.path.exists(os.path.join(meta_dir, 'root.txt')))
self.assertTrue(os.path.exists(os.path.join(meta_dir, 'targets.txt')))
self.assertTrue(os.path.exists(os.path.join(meta_dir, 'release.txt')))
# Test: invalid config path.
# Supply a non-existing config file path.
self.make_metadata_mock_prompts(targ_dir=targets_dir,
conf_path=self.random_path())
self.assertRaises(tuf.RepositoryError, signercli.make_release_metadata,
keystore_dir)
# Restore the config file path.
self.make_metadata_mock_prompts(targ_dir=targets_dir,
conf_path=config_filepath)
# Test: incorrect 'release' passwords.
# Clear keystore's dictionaries.
keystore.clear_keystore()
keyids = self.semi_roledict['release']['keyids']
for keyid in keyids:
saved_pw = self.rsa_passwords[keyid]
self.rsa_passwords[keyid] = self.random_string()
self.assertRaises(tuf.RepositoryError, signercli.make_release_metadata,
keystore_dir)
self.rsa_passwords[keyid] = saved_pw
def test_5_make_timestamp_metadata(self):
# In order to build timestamp metadata file (timestamp.txt),
# root, targets and release metadata files (root.txt, targets.txt
# release.txt) must exist in the metadata directory.
# SETUP
# Create temp directory for config file.
config_dir = self.make_temp_directory()
# Build a config file.
config_filepath = signerlib.build_config_file(config_dir, 365,
self.semi_roledict)
# Create a temp repository and metadata directories.
repo_dir = self.make_temp_directory()
meta_dir = self.make_temp_directory(repo_dir)
# Create a directory containing target files.
targets_dir, targets_paths = \
self.make_temp_directory_with_data_files(directory=repo_dir)
# Patch signercli._get_metadata_directory().
self.mock_get_metadata_directory(directory=meta_dir)
# Patch signercli._get_password(). Used in _get_role_config_keyids().
self.get_passwords()
# Create keystore directory.
keystore_dir = self.create_temp_keystore_directory()
# Mock method for signercli._prompt().
self.make_metadata_mock_prompts(targ_dir=targets_dir,
conf_path=config_filepath)
# TESTS
# Test: no root.txt in the metadata dir.
try:
signercli.make_targets_metadata(keystore_dir)
except Exception, e:
self.fail(str(e))
# Verify if the targets metadata file was created.
keystore.clear_keystore()
self.assertTrue(os.path.exists(os.path.join(meta_dir, 'targets.txt')))
self.assertRaises(tuf.RepositoryError, signercli.make_timestamp_metadata,
keystore_dir)
os.remove(os.path.join(meta_dir,'targets.txt'))
keystore.clear_keystore()
# Test: no targets.txt in the metadatadir.
try:
signercli.make_root_metadata(keystore_dir)
except Exception, e:
self.fail(str(e))
# Verify if the root metadata file was created.
keystore.clear_keystore()
self.assertTrue(os.path.exists(os.path.join(meta_dir, 'root.txt')))
self.assertRaises(tuf.RepositoryError, signercli.make_timestamp_metadata,
keystore_dir)
os.remove(os.path.join(meta_dir,'root.txt'))
keystore.clear_keystore()
# Test: no release.txt in the metadatadir.
try:
signercli.make_root_metadata(keystore_dir)
keystore.clear_keystore()
signercli.make_targets_metadata(keystore_dir)
keystore.clear_keystore()
except Exception, e:
self.fail(str(e))
# Verify that 'tuf.Repository' is raised due to a missing release.txt.
self.assertTrue(os.path.exists(os.path.join(meta_dir, 'root.txt')))
self.assertTrue(os.path.exists(os.path.join(meta_dir, 'targets.txt')))
self.assertRaises(tuf.RepositoryError, signercli.make_timestamp_metadata,
keystore_dir)
os.remove(os.path.join(meta_dir,'root.txt'))
os.remove(os.path.join(meta_dir,'targets.txt'))
keystore.clear_keystore()
# Test: normal case.
try:
signercli.make_root_metadata(keystore_dir)
keystore.clear_keystore()
signercli.make_targets_metadata(keystore_dir)
keystore.clear_keystore()
signercli.make_release_metadata(keystore_dir)
keystore.clear_keystore()
signercli.make_timestamp_metadata(keystore_dir)
keystore.clear_keystore()
except Exception, e:
self.fail(str(e))
# Verify if the root, targets and release metadata files were created.
self.assertTrue(os.path.exists(os.path.join(meta_dir, 'root.txt')))
self.assertTrue(os.path.exists(os.path.join(meta_dir, 'targets.txt')))
self.assertTrue(os.path.exists(os.path.join(meta_dir, 'release.txt')))
self.assertTrue(os.path.exists(os.path.join(meta_dir, 'timestamp.txt')))
# Test: invalid config path.
# Supply a non-existing config file path.
self.make_metadata_mock_prompts(targ_dir=targets_dir,
conf_path=self.random_path())
self.assertRaises(tuf.RepositoryError,
signercli.make_release_metadata, keystore_dir)
# Restore the config file path.
self.make_metadata_mock_prompts(targ_dir=targets_dir,
conf_path=config_filepath)
# Test: incorrect 'release' passwords.
# Clear keystore's dictionaries.
keystore.clear_keystore()
keyids = self.semi_roledict['release']['keyids']
for keyid in keyids:
saved_pw = self.rsa_passwords[keyid]
self.rsa_passwords[keyid] = self.random_string()
self.assertRaises(tuf.RepositoryError,
signercli.make_release_metadata, keystore_dir)
self.rsa_passwords[keyid] = saved_pw
def test_6_sign_metadata_file(self):
# To test this method, an RSA key will be created with
# a password in addition to the existing RSA keys.
# SETUP
# Create temp directory for config file.
config_dir = self.make_temp_directory()
# Build a config file.
config_filepath = signerlib.build_config_file(config_dir, 365,
self.semi_roledict)
# Create a temp repository and metadata directories.
repo_dir = self.make_temp_directory()
meta_dir = self.make_temp_directory(repo_dir)
# Create a directory containing target files.
targets_dir, targets_paths = \
self.make_temp_directory_with_data_files(directory=repo_dir)
# Patch signercli._get_metadata_directory().
self.mock_get_metadata_directory(directory=meta_dir)
# Patch signercli._get_password(). Used in _get_role_config_keyids().
self.get_passwords()
# Create keystore directory.
keystore_dir = self.create_temp_keystore_directory()
# Mock method for signercli._prompt().
self.make_metadata_mock_prompts(targ_dir=targets_dir,
conf_path=config_filepath)
# Create metadata files.
try:
signercli.make_root_metadata(keystore_dir)
keystore.clear_keystore()
signercli.make_targets_metadata(keystore_dir)
keystore.clear_keystore()
signercli.make_release_metadata(keystore_dir)
keystore.clear_keystore()
signercli.make_timestamp_metadata(keystore_dir)
keystore.clear_keystore()
except Exception, e:
self.fail(str(e))
# Verify if the root, targets and release meta files were created.
root_meta_filepath = os.path.join(meta_dir, 'root.txt')
targets_meta_filepath = os.path.join(meta_dir, 'targets.txt')
release_meta_filepath = os.path.join(meta_dir, 'release.txt')
timestamp_meta_filepath = os.path.join(meta_dir, 'timestamp.txt')
self.assertTrue(os.path.exists(root_meta_filepath))
self.assertTrue(os.path.exists(targets_meta_filepath))
self.assertTrue(os.path.exists(release_meta_filepath))
self.assertTrue(os.path.exists(timestamp_meta_filepath))
# Create a new RSA key, indicate metadata filename.
new_keyid = self.generate_rsakey()
meta_filename = targets_meta_filepath
# Create keystore directory. New key is untouched.
keystore_dir = self.create_temp_keystore_directory(keystore_dicts=True)
# List of keyids to be returned by _get_keyids()
signing_keyids = []
# Method to patch signercli._get_keyids()
def _mock_get_keyids(junk):
return signing_keyids
# Method to patch signercli._prompt().
def _mock_prompt(msg, junk):
return meta_filename
# Patch signercli._get_keyids()
signercli._get_keyids = _mock_get_keyids
# Patch signercli._prompt().
signercli._prompt = _mock_prompt
# TESTS
# Test: no loaded keyids.
self.assertRaises(tuf.RepositoryError,
signercli.sign_metadata_file, keystore_dir)
# Load new keyid.
signing_keyids = [new_keyid]
# Test: normal case.
try:
signercli.sign_metadata_file(keystore_dir)
except Exception, e:
self.fail(str(e))
# Verify the change.
self.assertTrue(os.path.exists(targets_meta_filepath))
# Load targets metadata from the file ('targets.txt').
targets_metadata = tuf.util.load_json_file(targets_meta_filepath)
keyid_exists = False
for signature in targets_metadata['signatures']:
if new_keyid == signature['keyid']:
keyid_exists = True
break
self.assertTrue(keyid_exists)
def test_7_make_delegation(self):
# SETUP
# Create a temp repository and metadata directories.
repo_dir = self.make_temp_directory()
meta_dir = self.make_temp_directory(directory=repo_dir)
# Create targets directories.
targets_dir, targets_paths =\
self.make_temp_directory_with_data_files(directory=repo_dir)
delegated_targets_dir = os.path.join(targets_dir,'targets',
'delegated_level1')
# Assign parent role and name of the delegated role.
parent_role = 'targets'
delegated_role = 'delegated_role_1'
# Create couple new RSA keys for delegation levels 1 and 2.
new_keyid_1 = self.generate_rsakey()
new_keyid_2 = self.generate_rsakey()
# Create temp directory for config file.
config_dir = self.make_temp_directory()
# Build a config file.
config_filepath = signerlib.build_config_file(config_dir, 365,
self.semi_roledict)
# Patch signercli._get_metadata_directory().
self.mock_get_metadata_directory(directory=meta_dir)
# Patch signercli._get_password(). Get passwords for parent's keyids.
self.get_passwords()
# Create keystore directory.
keystore_dir = self.create_temp_keystore_directory()
# Mock method for signercli._prompt() to generate targets.txt file.
self.make_metadata_mock_prompts(targ_dir=targets_dir,
conf_path=config_filepath)
# List of keyids to be returned by _get_keyids()
signing_keyids = [new_keyid_1]
# Load keystore.
load_keystore = keystore.load_keystore_from_keyfiles
# Build targets metadata file (targets.txt).
try:
signercli.make_targets_metadata(keystore_dir)
except Exception, e:
self.fail(str(e))
# Clear kestore's dictionaries.
keystore.clear_keystore()
# Mock method for signercli._prompt().
def _mock_prompt(msg, junk):
if msg.startswith('\nNOTE: The directory entered'):
return delegated_targets_dir
elif msg.startswith('\nChoose and enter the parent'):
return parent_role
elif msg.endswith('\nEnter the delegated role\'s name: '):
return delegated_role
else:
error_msg = ('Prompt: '+'\''+msg+'\''+
' did not match any predefined mock prompts.')
self.fail(error_msg)
# Mock method for signercli._get_password().
def _mock_get_password(msg):
for keyid in self.rsa_keyids:
if msg.endswith('('+keyid+'): '):
return self.rsa_passwords[keyid]
# Method to patch signercli._get_keyids()
def _mock_get_keyids(junk):
if signing_keyids:
for keyid in signing_keyids:
password = self.rsa_passwords[keyid]
# Load the keyfile.
load_keystore(keystore_dir, [keyid], [password])
return signing_keyids
# Patch signercli._prompt().
signercli._prompt = _mock_prompt
# Patch signercli._get_password().
signercli._get_password = _mock_get_password
# Patch signercli._get_keyids().
signercli._get_keyids = _mock_get_keyids
# TESTS
# Test: invalid parent role.
# Assign a non-existing parent role.
parent_role = self.random_string()
self.assertRaises(tuf.RepositoryError, signercli.make_delegation,
keystore_dir)
# Restore parent role.
parent_role = 'targets'
# Test: invalid password(s) for parent's keyids.
keystore.clear_keystore()
parent_keyids = self.semi_roledict[parent_role]['keyids']
for keyid in parent_keyids:
saved_pw = self.rsa_passwords[keyid]
self.rsa_passwords[keyid] = self.random_string()
self.assertRaises(tuf.RepositoryError, signercli.make_delegation,
keystore_dir)
self.rsa_passwords[keyid] = saved_pw
# Test: delegated_keyids > 1 or (== 0).
keystore.clear_keystore()
# Load keyids ( > 1).
# 'signing_keyids' already contains 'new_keyid', add more.
for keyid in self.semi_roledict['release']['keyids']:
signing_keyids.append(keyid)
self.assertRaises(tuf.RepositoryError, signercli.make_delegation,
keystore_dir)
keystore.clear_keystore()
# Load 0 keyids (== 0).
signing_keyids = []
self.assertRaises(tuf.RepositoryError, signercli.make_delegation,
keystore_dir)
keystore.clear_keystore()
# Restore signing_keyids (== 1).
signing_keyids = [new_keyid_1]
# Test: normal case 1.
# Testing first level delegation.
try:
signercli.make_delegation(keystore_dir)
except Exception, e:
self.fail(str(e))
# Verify delegated metadata file exists.
delegated_meta_file = os.path.join(meta_dir, parent_role,
delegated_role+'.txt')
self.assertTrue(os.path.exists(delegated_meta_file))
# Test: normal case 2.
# Testing second level delegation.
keystore.clear_keystore()
# Make necessary adjustments for the test.
signing_keyids = [new_keyid_2]
delegated_targets_dir = os.path.join(delegated_targets_dir,
'delegated_level2')
parent_role = os.path.join(parent_role, delegated_role)
delegated_role = 'delegated_role_2'
try:
signercli.make_delegation(keystore_dir)
except Exception, e:
self.fail(str(e))
# Verify delegated metadata file exists.
delegated_meta_file = os.path.join(meta_dir, parent_role,
delegated_role+'.txt')
self.assertTrue(os.path.exists(delegated_meta_file))
# Run unit tests.
loader = unittest_toolbox.unittest.TestLoader
suite = loader().loadTestsFromTestCase(TestSignercli)
unittest_toolbox.unittest.TextTestRunner(verbosity=2).run(suite)
|
|
from typing import Dict, List, Optional, Sequence, Iterable
import itertools
import logging
import warnings
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader, PathOrStr
from allennlp.data.dataset_readers.dataset_utils import to_bioul
from allennlp.data.fields import TextField, SequenceLabelField, Field, MetadataField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import TokenIndexer, SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
logger = logging.getLogger(__name__)
def _is_divider(line: str) -> bool:
empty_line = line.strip() == ""
if empty_line:
return True
else:
first_token = line.split()[0]
if first_token == "-DOCSTART-":
return True
else:
return False
@DatasetReader.register("conll2003")
class Conll2003DatasetReader(DatasetReader):
"""
Reads instances from a pretokenised file where each line is in the following format:
```
WORD POS-TAG CHUNK-TAG NER-TAG
```
with a blank line indicating the end of each sentence
and `-DOCSTART- -X- -X- O` indicating the end of each article,
and converts it into a `Dataset` suitable for sequence tagging.
Each `Instance` contains the words in the `"tokens"` `TextField`.
The values corresponding to the `tag_label`
values will get loaded into the `"tags"` `SequenceLabelField`.
And if you specify any `feature_labels` (you probably shouldn't),
the corresponding values will get loaded into their own `SequenceLabelField` s.
This dataset reader ignores the "article" divisions and simply treats
each sentence as an independent `Instance`. (Technically the reader splits sentences
on any combination of blank lines and "DOCSTART" tags; in particular, it does the right
thing on well formed inputs.)
Registered as a `DatasetReader` with name "conll2003".
# Parameters
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
tag_label : `str`, optional (default=`ner`)
Specify `ner`, `pos`, or `chunk` to have that tag loaded into the instance field `tag`.
feature_labels : `Sequence[str]`, optional (default=`()`)
These labels will be loaded as features into the corresponding instance fields:
`pos` -> `pos_tags`, `chunk` -> `chunk_tags`, `ner` -> `ner_tags`
Each will have its own namespace : `pos_tags`, `chunk_tags`, `ner_tags`.
If you want to use one of the tags as a `feature` in your model, it should be
specified here.
convert_to_coding_scheme : `str`, optional (default=`None`)
Specifies the coding scheme for `ner_labels` and `chunk_labels`.
`Conll2003DatasetReader` assumes a coding scheme of input data is `IOB1`.
Valid options are `None` and `BIOUL`. The `None` default maintains
the original IOB1 scheme in the CoNLL 2003 NER data.
In the IOB1 scheme, I is a token inside a span, O is a token outside
a span and B is the beginning of span immediately following another
span of the same type.
coding_scheme : `str`, optional (default=`IOB1`)
This parameter is deprecated. If you specify `coding_scheme` to
`IOB1`, consider simply removing it or specifying `convert_to_coding_scheme`
to `None`. If you want to specify `BIOUL` for `coding_scheme`,
replace it with `convert_to_coding_scheme`.
label_namespace : `str`, optional (default=`labels`)
Specifies the namespace for the chosen `tag_label`.
"""
_VALID_LABELS = {"ner", "pos", "chunk"}
def __init__(
self,
token_indexers: Dict[str, TokenIndexer] = None,
tag_label: str = "ner",
feature_labels: Sequence[str] = (),
convert_to_coding_scheme: Optional[str] = None,
label_namespace: str = "labels",
**kwargs,
) -> None:
if "coding_scheme" in kwargs:
warnings.warn("`coding_scheme` is deprecated.", DeprecationWarning)
coding_scheme = kwargs.pop("coding_scheme")
if coding_scheme not in ("IOB1", "BIOUL"):
raise ConfigurationError("unknown coding_scheme: {}".format(coding_scheme))
if coding_scheme == "IOB1":
convert_to_coding_scheme = None
else:
convert_to_coding_scheme = coding_scheme
super().__init__(
manual_distributed_sharding=True, manual_multiprocess_sharding=True, **kwargs
)
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
if tag_label is not None and tag_label not in self._VALID_LABELS:
raise ConfigurationError("unknown tag label type: {}".format(tag_label))
for label in feature_labels:
if label not in self._VALID_LABELS:
raise ConfigurationError("unknown feature label type: {}".format(label))
if convert_to_coding_scheme not in (None, "BIOUL"):
raise ConfigurationError(
"unknown convert_to_coding_scheme: {}".format(convert_to_coding_scheme)
)
self.tag_label = tag_label
self.feature_labels = set(feature_labels)
self.convert_to_coding_scheme = convert_to_coding_scheme
self.label_namespace = label_namespace
self._original_coding_scheme = "IOB1"
def _read(self, file_path: PathOrStr) -> Iterable[Instance]:
# if `file_path` is a URL, redirect to the cache
file_path = cached_path(file_path)
with open(file_path, "r") as data_file:
logger.info("Reading instances from lines in file at: %s", file_path)
# Group lines into sentence chunks based on the divider.
line_chunks = (
lines
for is_divider, lines in itertools.groupby(data_file, _is_divider)
# Ignore the divider chunks, so that `lines` corresponds to the words
# of a single sentence.
if not is_divider
)
for lines in self.shard_iterable(line_chunks):
fields = [line.strip().split() for line in lines]
# unzipping trick returns tuples, but our Fields need lists
fields = [list(field) for field in zip(*fields)]
tokens_, pos_tags, chunk_tags, ner_tags = fields
# TextField requires `Token` objects
tokens = [Token(token) for token in tokens_]
yield self.text_to_instance(tokens, pos_tags, chunk_tags, ner_tags)
def text_to_instance( # type: ignore
self,
tokens: List[Token],
pos_tags: List[str] = None,
chunk_tags: List[str] = None,
ner_tags: List[str] = None,
) -> Instance:
"""
We take `pre-tokenized` input here, because we don't have a tokenizer in this class.
"""
sequence = TextField(tokens)
instance_fields: Dict[str, Field] = {"tokens": sequence}
instance_fields["metadata"] = MetadataField({"words": [x.text for x in tokens]})
# Recode the labels if necessary.
if self.convert_to_coding_scheme == "BIOUL":
coded_chunks = (
to_bioul(chunk_tags, encoding=self._original_coding_scheme)
if chunk_tags is not None
else None
)
coded_ner = (
to_bioul(ner_tags, encoding=self._original_coding_scheme)
if ner_tags is not None
else None
)
else:
# the default IOB1
coded_chunks = chunk_tags
coded_ner = ner_tags
# Add "feature labels" to instance
if "pos" in self.feature_labels:
if pos_tags is None:
raise ConfigurationError(
"Dataset reader was specified to use pos_tags as "
"features. Pass them to text_to_instance."
)
instance_fields["pos_tags"] = SequenceLabelField(pos_tags, sequence, "pos_tags")
if "chunk" in self.feature_labels:
if coded_chunks is None:
raise ConfigurationError(
"Dataset reader was specified to use chunk tags as "
"features. Pass them to text_to_instance."
)
instance_fields["chunk_tags"] = SequenceLabelField(coded_chunks, sequence, "chunk_tags")
if "ner" in self.feature_labels:
if coded_ner is None:
raise ConfigurationError(
"Dataset reader was specified to use NER tags as "
" features. Pass them to text_to_instance."
)
instance_fields["ner_tags"] = SequenceLabelField(coded_ner, sequence, "ner_tags")
# Add "tag label" to instance
if self.tag_label == "ner" and coded_ner is not None:
instance_fields["tags"] = SequenceLabelField(coded_ner, sequence, self.label_namespace)
elif self.tag_label == "pos" and pos_tags is not None:
instance_fields["tags"] = SequenceLabelField(pos_tags, sequence, self.label_namespace)
elif self.tag_label == "chunk" and coded_chunks is not None:
instance_fields["tags"] = SequenceLabelField(
coded_chunks, sequence, self.label_namespace
)
return Instance(instance_fields)
def apply_token_indexers(self, instance: Instance) -> None:
instance.fields["tokens"]._token_indexers = self._token_indexers # type: ignore
|
|
"""
Machine arithmetics - determine the parameters of the
floating-point arithmetic system
Author: Pearu Peterson, September 2003
"""
from __future__ import division, absolute_import, print_function
__all__ = ['MachAr']
from numpy.core.fromnumeric import any
from numpy.core.numeric import errstate
# Need to speed this up...especially for longfloat
class MachAr(object):
"""
Diagnosing machine parameters.
Attributes
----------
ibeta : int
Radix in which numbers are represented.
it : int
Number of base-`ibeta` digits in the floating point mantissa M.
machep : int
Exponent of the smallest (most negative) power of `ibeta` that,
added to 1.0, gives something different from 1.0
eps : float
Floating-point number ``beta**machep`` (floating point precision)
negep : int
Exponent of the smallest power of `ibeta` that, substracted
from 1.0, gives something different from 1.0.
epsneg : float
Floating-point number ``beta**negep``.
iexp : int
Number of bits in the exponent (including its sign and bias).
minexp : int
Smallest (most negative) power of `ibeta` consistent with there
being no leading zeros in the mantissa.
xmin : float
Floating point number ``beta**minexp`` (the smallest [in
magnitude] usable floating value).
maxexp : int
Smallest (positive) power of `ibeta` that causes overflow.
xmax : float
``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
usable floating value).
irnd : int
In ``range(6)``, information on what kind of rounding is done
in addition, and on how underflow is handled.
ngrd : int
Number of 'guard digits' used when truncating the product
of two mantissas to fit the representation.
epsilon : float
Same as `eps`.
tiny : float
Same as `xmin`.
huge : float
Same as `xmax`.
precision : float
``- int(-log10(eps))``
resolution : float
``- 10**(-precision)``
Parameters
----------
float_conv : function, optional
Function that converts an integer or integer array to a float
or float array. Default is `float`.
int_conv : function, optional
Function that converts a float or float array to an integer or
integer array. Default is `int`.
float_to_float : function, optional
Function that converts a float array to float. Default is `float`.
Note that this does not seem to do anything useful in the current
implementation.
float_to_str : function, optional
Function that converts a single float to a string. Default is
``lambda v:'%24.16e' %v``.
title : str, optional
Title that is printed in the string representation of `MachAr`.
See Also
--------
finfo : Machine limits for floating point types.
iinfo : Machine limits for integer types.
References
----------
.. [1] Press, Teukolsky, Vetterling and Flannery,
"Numerical Recipes in C++," 2nd ed,
Cambridge University Press, 2002, p. 31.
"""
def __init__(self, float_conv=float, int_conv=int,
float_to_float=float,
float_to_str=lambda v: '%24.16e' % v,
title='Python floating point number'):
"""
float_conv - convert integer to float (array)
int_conv - convert float (array) to integer
float_to_float - convert float array to float
float_to_str - convert array float to str
title - description of used floating point numbers
"""
# We ignore all errors here because we are purposely triggering
# underflow to detect the properties of the runninng arch.
with errstate(under='ignore'):
self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
max_iterN = 10000
msg = "Did not converge after %d tries with %s"
one = float_conv(1)
two = one + one
zero = one - one
# Do we really need to do this? Aren't they 2 and 2.0?
# Determine ibeta and beta
a = one
for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
b = one
for _ in range(max_iterN):
b = b + b
temp = a + b
itemp = int_conv(temp - a)
if any(itemp != 0):
break
else:
raise RuntimeError(msg % (_, one.dtype))
ibeta = itemp
beta = float_conv(ibeta)
# Determine it and irnd
it = -1
b = one
for _ in range(max_iterN):
it = it + 1
b = b * beta
temp = b + one
temp1 = temp - b
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
betah = beta / two
a = one
for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
temp = a + betah
irnd = 0
if any(temp - a != zero):
irnd = 1
tempa = a + beta
temp = tempa + betah
if irnd == 0 and any(temp - tempa != zero):
irnd = 2
# Determine negep and epsneg
negep = it + 3
betain = one / beta
a = one
for i in range(negep):
a = a * betain
b = a
for _ in range(max_iterN):
temp = one - a
if any(temp - one != zero):
break
a = a * beta
negep = negep - 1
# Prevent infinite loop on PPC with gcc 4.0:
if negep < 0:
raise RuntimeError("could not determine machine tolerance "
"for 'negep', locals() -> %s" % (locals()))
else:
raise RuntimeError(msg % (_, one.dtype))
negep = -negep
epsneg = a
# Determine machep and eps
machep = - it - 3
a = b
for _ in range(max_iterN):
temp = one + a
if any(temp - one != zero):
break
a = a * beta
machep = machep + 1
else:
raise RuntimeError(msg % (_, one.dtype))
eps = a
# Determine ngrd
ngrd = 0
temp = one + eps
if irnd == 0 and any(temp * one - one != zero):
ngrd = 1
# Determine iexp
i = 0
k = 1
z = betain
t = one + eps
nxres = 0
for _ in range(max_iterN):
y = z
z = y * y
a = z * one # Check here for underflow
temp = z * t
if any(a + a == zero) or any(abs(z) >= y):
break
temp1 = temp * betain
if any(temp1 * beta == z):
break
i = i + 1
k = k + k
else:
raise RuntimeError(msg % (_, one.dtype))
if ibeta != 10:
iexp = i + 1
mx = k + k
else:
iexp = 2
iz = ibeta
while k >= iz:
iz = iz * ibeta
iexp = iexp + 1
mx = iz + iz - 1
# Determine minexp and xmin
for _ in range(max_iterN):
xmin = y
y = y * betain
a = y * one
temp = y * t
if any((a + a) != zero) and any(abs(y) < xmin):
k = k + 1
temp1 = temp * betain
if any(temp1 * beta == y) and any(temp != y):
nxres = 3
xmin = y
break
else:
break
else:
raise RuntimeError(msg % (_, one.dtype))
minexp = -k
# Determine maxexp, xmax
if mx <= k + k - 3 and ibeta != 10:
mx = mx + mx
iexp = iexp + 1
maxexp = mx + minexp
irnd = irnd + nxres
if irnd >= 2:
maxexp = maxexp - 2
i = maxexp + minexp
if ibeta == 2 and not i:
maxexp = maxexp - 1
if i > 20:
maxexp = maxexp - 1
if any(a != y):
maxexp = maxexp - 2
xmax = one - epsneg
if any(xmax * one != xmax):
xmax = one - beta * epsneg
xmax = xmax / (xmin * beta * beta * beta)
i = maxexp + minexp + 3
for j in range(i):
if ibeta == 2:
xmax = xmax + xmax
else:
xmax = xmax * beta
self.ibeta = ibeta
self.it = it
self.negep = negep
self.epsneg = float_to_float(epsneg)
self._str_epsneg = float_to_str(epsneg)
self.machep = machep
self.eps = float_to_float(eps)
self._str_eps = float_to_str(eps)
self.ngrd = ngrd
self.iexp = iexp
self.minexp = minexp
self.xmin = float_to_float(xmin)
self._str_xmin = float_to_str(xmin)
self.maxexp = maxexp
self.xmax = float_to_float(xmax)
self._str_xmax = float_to_str(xmax)
self.irnd = irnd
self.title = title
# Commonly used parameters
self.epsilon = self.eps
self.tiny = self.xmin
self.huge = self.xmax
import math
self.precision = int(-math.log10(float_to_float(self.eps)))
ten = two + two + two + two + two
resolution = ten ** (-self.precision)
self.resolution = float_to_float(resolution)
self._str_resolution = float_to_str(resolution)
def __str__(self):
fmt = (
'Machine parameters for %(title)s\n'
'---------------------------------------------------------------------\n'
'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
'---------------------------------------------------------------------\n'
)
return fmt % self.__dict__
if __name__ == '__main__':
print(MachAr())
|
|
# version code ef5291f09f60+
# pass = ZzBcTmfdMK
coursera = 1
# Please fill out this stencil and submit using the provided submission script.
## 1: (Task 1) Minutes in a Week
minutes_in_week = 7*24*60
## 2: (Task 2) Remainder
#For this task, your expression must use //
remainder_without_mod = 2304811 // 47
## 3: (Task 3) Divisibility
divisible_by_3 = (673 + 909)//3==0
## 4: (Task 4) Conditional Expression
x = -9
y = 1/2
expression_val = 1
## 5: (Task 5) Squares Set Comprehension
first_five_squares = { x*x for x in {1,2,3,4,5} }
## 6: (Task 6) Powers-of-2 Set Comprehension
first_five_pows_two = { 2**x for x in {0,1,2,3,4} }
## 7: (Task 7) Double comprehension evaluating to nine-element set
# Assign three-element sets to X1 and Y1 so that
# {x*y for x in X1 for y in Y1} evaluates to a nine-element set.
X1 = { 1, 2, 3 }
Y1 = { 5, 7, 11 }
## 8: (Task 8) Double comprehension evaluating to five-element set
# Assign disjoint three-element sets to X1 and Y1 so that
# {x*y for x in X1 for y in Y1} evaluates to a five-element set.
X1 = { 2, 4, 4 }
Y1 = { 1, 2, 3 }
## 9: (Task 9) Set intersection as a comprehension
S = {1, 2, 3, 4}
T = {3, 4, 5, 6}
# Replace { ... } with a one-line set comprehension that evaluates to the intersection of S and T
S_intersect_T = { i for i in S if i in T }
## 10: (Task 10) Average
list_of_numbers = [20, 10, 15, 75]
# Replace ... with a one-line expression that evaluates to the average of list_of_numbers.
# Your expression should refer to the variable list_of_numbers, and should work
# for a list of any length greater than zero.
list_average = sum(list_of_numbers)/len(list_of_numbers)
## 11: (Task 11) Cartesian-product comprehension
# Replace ... with a double list comprehension over ['A','B','C'] and [1,2,3]
cartesian_product = [[x]+[y] for x in ['A','B','C'] for y in [1,2,3]]
## 12: (Task 12) Sum of numbers in list of list of numbers
LofL = [[.25, .75, .1], [-1, 0], [4, 4, 4, 4]]
# Replace ... with a one-line expression of the form sum([sum(...) ... ]) that
# includes a comprehension and evaluates to the sum of all numbers in all the lists.
LofL_sum = sum(sum(i) for i in LofL)
## 13: (Task 13) Three-element tuples summing to zero
S = {-4, -2, 1, 2, 5, 0}
# Replace [ ... ] with a one-line list comprehension in which S appears
zero_sum_list = [ (x,y,z) for x in S for y in S for z in S if (x+y+z)==0 ]
## 14: (Task 14) Nontrivial three-element tuples summing to zero
S = {-4, -2, 1, 2, 5, 0}
# Replace [ ... ] with a one-line list comprehension in which S appears
exclude_zero_list = [ (x,y,z) for x in S for y in S for z in S if (x+y+z)!=0 ]
## 15: (Task 15) One nontrivial three-element tuple summing to zero
S = {-4, -2, 1, 2, 5, 0}
# Replace ... with a one-line expression that uses a list comprehension in which S appears
first_of_tuples_list = [ (x,y,z) for x in S for y in S for z in S if (x+y+z)==0 ][0]
## 16: (Task 16) List and set differ
# Assign to example_L a list such that len(example_L) != len(list(set(example_L)))
example_L = ["Peter","Peter","Thong"]
## 17: (Task 17) Odd numbers
# Replace {...} with a one-line set comprehension over a range of the form range(n)
odd_num_list_range = {x for x in range(100) if (x==1 or x//2!=0) }
## 18: (Task 18) Using range and zip
# In the line below, replace ... with an expression that does not include a comprehension.
# Instead, it should use zip and range.
# Note: zip() does not return a list. It returns an 'iterator of tuples'
range_and_zip = zip(range(5),['A','B','C','D','E'])
## 19: (Task 19) Using zip to find elementwise sums
A = [10, 25, 40]
B = [1, 15, 20]
# Replace [...] with a one-line comprehension that uses zip together with the variables A and B.
# The comprehension should evaluate to a list whose ith element is the ith element of
# A plus the ith element of B.
list_sum_zip = sorted({sum(x) for x in zip(A,B)})
## 20: (Task 20) Extracting the value corresponding to key k from each dictionary in a list
dlist = [{'James':'Sean', 'director':'Terence'}, {'James':'Roger', 'director':'Lewis'}, {'James':'Pierce', 'director':'Roger'}]
k = 'James'
# Replace [...] with a one-line comprehension that uses dlist and k
# and that evaluates to ['Sean','Roger','Pierce']
value_list = [dlist[i][k] for i in range(len(dlist))]
## 21: (Task 21) Extracting the value corresponding to k when it exists
dlist = [{'Bilbo':'Ian','Frodo':'Elijah'},{'Bilbo':'Martin','Thorin':'Richard'}]
k = 'Bilbo'
#Replace [...] with a one-line comprehension
value_list_modified_1 = [dlist[i][k] if k in dlist[i] else 'NOT PRESENT' for i in range(len(dlist))] # <-- Use the same expression here
k = 'Frodo'
value_list_modified_2 = [dlist[i][k] if k in dlist[i] else 'NOT PRESENT' for i in range(len(dlist))] # <-- as you do here
## 22: (Task 22) A dictionary mapping integers to their squares
# Replace {...} with a one-line dictionary comprehension
square_dict = {k:k**2 for k in range(100)}
## 23: (Task 23) Making the identity function
D = {'red','white','blue'}
# Replace {...} with a one-line dictionary comprehension
identity_dict = {i:i for i in D}
## 24: (Task 24) Mapping integers to their representation over a given base
base = 10
digits = set(range(base))
# Replace { ... } with a one-line dictionary comprehension
# Your comprehension should use the variables 'base' and 'digits' so it will work correctly if these
# are assigned different values (e.g. base = 2 and digits = {0,1})
representation_dict = {k:(x,y,z) for k in range((base-1)*base**2 + (base-1)*base**1 + base*base**0) for x,y,z in [(x,y,z) for x in range(base) for y in range(base) for z in range(base)] if (x*base**2 + y*base**1 + z*base**0)==k}
## 25: (Task 25) A dictionary mapping names to salaries
id2salary = {0:1000.0, 1:1200.50, 2:990}
names = ['Larry', 'Curly', 'Moe']
# Replace { ... } with a one-line dictionary comprehension that uses id2salary and names.
listdict2dict = { names[i]:id2salary[i] for i in range(len(names)) }
## 26: (Task 26) Procedure nextInts
# Complete the procedure definition by replacing [ ... ] with a one-line list comprehension
def nextInts(L): return [x+1 for x in L]
## 27: (Task 27) Procedure cubes
# Complete the procedure definition by replacing [ ... ] with a one-line list comprehension
def cubes(L): return [x**3 for x in L]
## 28: (Task 28) Procedure dict2list
# Input: a dictionary dct and a list keylist consisting of the keys of dct
# Output: the list L such that L[i] is the value associated in dct with keylist[i]
# Example: dict2list({'a':'A', 'b':'B', 'c':'C'},['b','c','a']) should equal ['B','C','A']
# Complete the procedure definition by replacing [ ... ] with a one-line list comprehension
def dict2list(dct, keylist): return [dct[x] for x in keylist]
## 29: (Task 29) Procedure list2dict
# Input: a list L and a list keylist of the same length
# Output: the dictionary that maps keylist[i] to L[i] for i=0,1,...len(L)-1
# Example: list2dict(['A','B','C'],['a','b','c']) should equal {'a':'A', 'b':'B', 'c':'C'}
# Complete the procedure definition by replacing { ... } with a one-line dictionary comprehension
def list2dict(L, keylist): return {x:y for x,y in zip(keylist,L)}
|
|
import os
import subprocess
from scripts.lib.zulip_tools import run, ENDC, WARNING
from scripts.lib.hash_reqs import expand_reqs
ZULIP_PATH = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
VENV_CACHE_PATH = "/srv/zulip-venv-cache"
if 'TRAVIS' in os.environ:
# In Travis CI, we don't have root access
VENV_CACHE_PATH = "/home/travis/zulip-venv-cache"
if False:
# Don't add a runtime dependency on typing
from typing import List, Optional, Tuple, Set
VENV_DEPENDENCIES = [
"build-essential",
"libffi-dev",
"libfreetype6-dev", # Needed for image types with Pillow
"libz-dev", # Needed to handle compressed PNGs with Pillow
"libjpeg-dev", # Needed to handle JPEGs with Pillow
"libldap2-dev",
"libmemcached-dev",
"python3-dev", # Needed to install typed-ast dependency of mypy
"python-dev",
"python3-pip",
"python-pip",
"python-virtualenv", # Trusty lacks `python3-virtualenv`.
# Fortunately we don't need the library,
# only the command, and this suffices.
"python3-six",
"python-six",
"libxml2-dev", # Used for installing talon
"libxslt1-dev", # Used for installing talon
"libpq-dev", # Needed by psycopg2
]
def install_venv_deps(requirements_file):
# type: (str) -> None
pip_requirements = os.path.join(ZULIP_PATH, "requirements", "pip.txt")
run(["pip", "install", "-U", "--requirement", pip_requirements])
run(["pip", "install", "--no-deps", "--requirement", requirements_file])
def get_index_filename(venv_path):
# type: (str) -> str
return os.path.join(venv_path, 'package_index')
def get_package_names(requirements_file):
# type: (str) -> List[str]
packages = expand_reqs(requirements_file)
cleaned = []
operators = ['~=', '==', '!=', '<', '>']
for package in packages:
if package.startswith("git+https://") and '#egg=' in package:
split_package = package.split("#egg=")
if len(split_package) != 2:
raise Exception("Unexpected duplicate #egg in package %s" % (package,))
# Extract the package name from Git requirements entries
package = split_package[1]
for operator in operators:
if operator in package:
package = package.split(operator)[0]
package = package.strip()
if package:
cleaned.append(package.lower())
return sorted(cleaned)
def create_requirements_index_file(venv_path, requirements_file):
# type: (str, str) -> str
"""
Creates a file, called package_index, in the virtual environment
directory that contains all the PIP packages installed in the
virtual environment. This file is used to determine the packages
that can be copied to a new virtual environment.
"""
index_filename = get_index_filename(venv_path)
packages = get_package_names(requirements_file)
with open(index_filename, 'w') as writer:
writer.write('\n'.join(packages))
writer.write('\n')
return index_filename
def get_venv_packages(venv_path):
# type: (str) -> Set[str]
"""
Returns the packages installed in the virtual environment using the
package index file.
"""
with open(get_index_filename(venv_path)) as reader:
return set(p.strip() for p in reader.read().split('\n') if p.strip())
def try_to_copy_venv(venv_path, new_packages):
# type: (str, Set[str]) -> bool
"""
Tries to copy packages from an old virtual environment in the cache
to the new virtual environment. The algorithm works as follows:
1. Find a virtual environment, v, from the cache that has the
highest overlap with the new requirements such that:
a. The new requirements only add to the packages of v.
b. The new requirements only upgrade packages of v.
2. Copy the contents of v to the new virtual environment using
virtualenv-clone.
3. Delete all .pyc files in the new virtual environment.
"""
if not os.path.exists(VENV_CACHE_PATH):
return False
venv_name = os.path.basename(venv_path)
overlaps = [] # type: List[Tuple[int, str, Set[str]]]
old_packages = set() # type: Set[str]
for sha1sum in os.listdir(VENV_CACHE_PATH):
curr_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, venv_name)
if (curr_venv_path == venv_path or
not os.path.exists(get_index_filename(curr_venv_path))):
continue
old_packages = get_venv_packages(curr_venv_path)
# We only consider using using old virtualenvs that only
# contain packages that we want in our new virtualenv.
if not (old_packages - new_packages):
overlap = new_packages & old_packages
overlaps.append((len(overlap), curr_venv_path, overlap))
target_log = get_logfile_name(venv_path)
source_venv_path = None
if overlaps:
# Here, we select the old virtualenv with the largest overlap
overlaps = sorted(overlaps)
_, source_venv_path, copied_packages = overlaps[-1]
print('Copying packages from {}'.format(source_venv_path))
clone_ve = "{}/bin/virtualenv-clone".format(source_venv_path)
cmd = "sudo {exe} {source} {target}".format(exe=clone_ve,
source=source_venv_path,
target=venv_path).split()
try:
run(cmd)
except Exception:
# Virtualenv-clone is not installed. Install it and try running
# the command again.
try:
run("{}/bin/pip install --no-deps virtualenv-clone".format(
source_venv_path).split())
run(cmd)
except Exception:
# virtualenv-clone isn't working, so just make a new venv
return False
run(["sudo", "chown", "-R",
"{}:{}".format(os.getuid(), os.getgid()), venv_path])
source_log = get_logfile_name(source_venv_path)
copy_parent_log(source_log, target_log)
create_log_entry(target_log, source_venv_path, copied_packages,
new_packages - copied_packages)
return True
return False
def get_logfile_name(venv_path):
# type: (str) -> str
return "{}/setup-venv.log".format(venv_path)
def create_log_entry(target_log, parent, copied_packages, new_packages):
# type: (str, str, Set[str], Set[str]) -> None
venv_path = os.path.dirname(target_log)
with open(target_log, 'a') as writer:
writer.write("{}\n".format(venv_path))
if copied_packages:
writer.write(
"Copied from {}:\n".format(parent))
writer.write("\n".join('- {}'.format(p) for p in sorted(copied_packages)))
writer.write("\n")
writer.write("New packages:\n")
writer.write("\n".join('- {}'.format(p) for p in sorted(new_packages)))
writer.write("\n\n")
def copy_parent_log(source_log, target_log):
# type: (str, str) -> None
if os.path.exists(source_log):
run('cp {} {}'.format(source_log, target_log).split())
def do_patch_activate_script(venv_path):
# type: (str) -> None
"""
Patches the bin/activate script so that the value of the environment variable VIRTUAL_ENV
is set to venv_path during the script's execution whenever it is sourced.
"""
# venv_path should be what we want to have in VIRTUAL_ENV after patching
script_path = os.path.join(venv_path, "bin", "activate")
file_obj = open(script_path)
lines = file_obj.readlines()
for i, line in enumerate(lines):
if line.startswith('VIRTUAL_ENV='):
lines[i] = 'VIRTUAL_ENV="%s"\n' % (venv_path,)
file_obj.close()
file_obj = open(script_path, 'w')
file_obj.write("".join(lines))
file_obj.close()
def setup_virtualenv(target_venv_path, requirements_file, virtualenv_args=None, patch_activate_script=False):
# type: (Optional[str], str, Optional[List[str]], bool) -> str
# Check if a cached version already exists
path = os.path.join(ZULIP_PATH, 'scripts', 'lib', 'hash_reqs.py')
output = subprocess.check_output([path, requirements_file], universal_newlines=True)
sha1sum = output.split()[0]
if target_venv_path is None:
cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, 'venv')
else:
cached_venv_path = os.path.join(VENV_CACHE_PATH, sha1sum, os.path.basename(target_venv_path))
success_stamp = os.path.join(cached_venv_path, "success-stamp")
if not os.path.exists(success_stamp):
do_setup_virtualenv(cached_venv_path, requirements_file, virtualenv_args or [])
run(["touch", success_stamp])
print("Using cached Python venv from %s" % (cached_venv_path,))
if target_venv_path is not None:
run(["sudo", "ln", "-nsf", cached_venv_path, target_venv_path])
if patch_activate_script:
do_patch_activate_script(target_venv_path)
activate_this = os.path.join(cached_venv_path, "bin", "activate_this.py")
exec(open(activate_this).read(), {}, dict(__file__=activate_this))
return cached_venv_path
def do_setup_virtualenv(venv_path, requirements_file, virtualenv_args):
# type: (str, str, List[str]) -> None
# Setup Python virtualenv
new_packages = set(get_package_names(requirements_file))
run(["sudo", "rm", "-rf", venv_path])
if not try_to_copy_venv(venv_path, new_packages):
# Create new virtualenv.
run(["sudo", "mkdir", "-p", venv_path])
run(["sudo", "virtualenv"] + virtualenv_args + [venv_path])
run(["sudo", "chown", "-R",
"{}:{}".format(os.getuid(), os.getgid()), venv_path])
create_log_entry(get_logfile_name(venv_path), "", set(), new_packages)
create_requirements_index_file(venv_path, requirements_file)
# Switch current Python context to the virtualenv.
activate_this = os.path.join(venv_path, "bin", "activate_this.py")
exec(open(activate_this).read(), {}, dict(__file__=activate_this))
try:
install_venv_deps(requirements_file)
except subprocess.CalledProcessError:
# Might be a failure due to network connection issues. Retrying...
print(WARNING + "`pip install` failed; retrying..." + ENDC)
install_venv_deps(requirements_file)
run(["sudo", "chmod", "-R", "a+rX", venv_path])
|
|
from math import *
# function [alpha, xmin, L]=plfit(x, varargin)
# PLFIT fits a power-law distributional model to data.
# Source: http://www.santafe.edu/~aaronc/powerlaws/
#
# PLFIT(x) estimates x_min and alpha according to the goodness-of-fit
# based method described in Clauset, Shalizi, Newman (2007). x is a
# vector of observations of some quantity to which we wish to fit the
# power-law distribution p(x) ~ x^-alpha for x >= xmin.
# PLFIT automatically detects whether x is composed of real or integer
# values, and applies the appropriate method. For discrete data, if
# min(x) > 1000, PLFIT uses the continuous approximation, which is
# a reliable in this regime.
#
# The fitting procedure works as follows:
# 1) For each possible choice of x_min, we estimate alpha via the
# method of maximum likelihood, and calculate the Kolmogorov-Smirnov
# goodness-of-fit statistic D.
# 2) We then select as our estimate of x_min, the value that gives the
# minimum value D over all values of x_min.
#
# Note that this procedure gives no estimate of the uncertainty of the
# fitted parameters, nor of the validity of the fit.
#
# Example:
# x = [500,150,90,81,75,75,70,65,60,58,49,47,40]
# [alpha, xmin, L] = plfit(x)
# or a = plfit(x)
#
# The output 'alpha' is the maximum likelihood estimate of the scaling
# exponent, 'xmin' is the estimate of the lower bound of the power-law
# behavior, and L is the log-likelihood of the data x>=xmin under the
# fitted power law.
#
# For more information, try 'type plfit'
#
# See also PLVAR, PLPVA
# Version 1.0.10 (2010 January)
# Copyright (C) 2008-2011 Aaron Clauset (Santa Fe Institute)
# Ported to Python by Joel Ornstein (2011 July)
# ([email protected])
# Distributed under GPL 2.0
# http://www.gnu.org/copyleft/gpl.html
# PLFIT comes with ABSOLUTELY NO WARRANTY
#
#
# The 'zeta' helper function is modified from the open-source library 'mpmath'
# mpmath: a Python library for arbitrary-precision floating-point arithmetic
# http://code.google.com/p/mpmath/
# version 0.17 (February 2011) by Fredrik Johansson and others
#
# Notes:
#
# 1. In order to implement the integer-based methods in Matlab, the numeric
# maximization of the log-likelihood function was used. This requires
# that we specify the range of scaling parameters considered. We set
# this range to be 1.50 to 3.50 at 0.01 intervals by default.
# This range can be set by the user like so,
#
# a = plfit(x,'range',[1.50,3.50,0.01])
#
# 2. PLFIT can be told to limit the range of values considered as estimates
# for xmin in three ways. First, it can be instructed to sample these
# possible values like so,
#
# a = plfit(x,'sample',100)
#
# which uses 100 uniformly distributed values on the sorted list of
# unique values in the data set. Second, it can simply omit all
# candidates above a hard limit, like so
#
# a = plfit(x,'limit',3.4)
#
# Finally, it can be forced to use a fixed value, like so
#
# a = plfit(x,'xmin',3.4)
#
# In the case of discrete data, it rounds the limit to the nearest
# integer.
#
# 3. When the input sample size is small (e.g., < 100), the continuous
# estimator is slightly biased (toward larger values of alpha). To
# explicitly use an experimental finite-size correction, call PLFIT like
# so
#
# a = plfit(x,'finite')
#
# which does a small-size correction to alpha.
#
# 4. For continuous data, PLFIT can return erroneously large estimates of
# alpha when xmin is so large that the number of obs x >= xmin is very
# small. To prevent this, we can truncate the search over xmin values
# before the finite-size bias becomes significant by calling PLFIT as
#
# a = plfit(x,'nosmall')
#
# which skips values xmin with finite size bias > 0.1.
def plfit(x, *varargin):
vec = []
sample = []
xminx = []
limit = []
finite = False
nosmall = False
nowarn = False
# parse command-line parameters trap for bad input
i=0
while i<len(varargin):
argok = 1
if type(varargin[i])==str:
if varargin[i]=='range':
Range = varargin[i+1]
if Range[1]>Range[0]:
argok=0
vec=[]
try:
vec=map(lambda X:X*float(Range[2])+Range[0],\
range(int((Range[1]-Range[0])/Range[2])))
except:
argok=0
vec=[]
if Range[0]>=Range[1]:
argok=0
vec=[]
i-=1
i+=1
elif varargin[i]== 'sample':
sample = varargin[i+1]
i = i + 1
elif varargin[i]== 'limit':
limit = varargin[i+1]
i = i + 1
elif varargin[i]== 'xmin':
xminx = varargin[i+1]
i = i + 1
elif varargin[i]== 'finite': finite = True
elif varargin[i]== 'nowarn': nowarn = True
elif varargin[i]== 'nosmall': nosmall = True
else: argok=0
if not argok:
print '(PLFIT) Ignoring invalid argument #',i+1
i = i+1
if vec!=[] and (type(vec)!=list or min(vec)<=1):
print '(PLFIT) Error: ''range'' argument must contain a vector or minimum <= 1. using default.\n'
vec = []
if sample!=[] and sample<2:
print'(PLFIT) Error: ''sample'' argument must be a positive integer > 1. using default.\n'
sample = []
if limit!=[] and limit<min(x):
print'(PLFIT) Error: ''limit'' argument must be a positive value >= 1. using default.\n'
limit = []
if xminx!=[] and xminx>=max(x):
print'(PLFIT) Error: ''xmin'' argument must be a positive value < max(x). using default behavior.\n'
xminx = []
# select method (discrete or continuous) for fitting
if reduce(lambda X,Y:X==True and floor(Y)==float(Y),x,True): f_dattype = 'INTS'
elif reduce(lambda X,Y:X==True and (type(Y)==int or type(Y)==float or type(Y)==long),x,True): f_dattype = 'REAL'
else: f_dattype = 'UNKN'
if f_dattype=='INTS' and min(x) > 1000 and len(x)>100:
f_dattype = 'REAL'
# estimate xmin and alpha, accordingly
if f_dattype== 'REAL':
xmins = unique(x)
xmins.sort()
xmins = xmins[0:-1]
if xminx!=[]:
xmins = [min(filter(lambda X: X>=xminx,xmins))]
if limit!=[]:
xmins=filter(lambda X: X<=limit,xmins)
if xmins==[]: xmins = [min(x)]
if sample!=[]:
step = float(len(xmins))/(sample-1)
index_curr=0
new_xmins=[]
for i in range (0,sample):
if round(index_curr)==len(xmins): index_curr-=1
new_xmins.append(xmins[int(round(index_curr))])
index_curr+=step
xmins = unique(new_xmins)
xmins.sort()
dat = []
z = sorted(x)
for xm in range(0,len(xmins)):
xmin = xmins[xm]
z = filter(lambda X:X>=xmin,z)
n = len(z)
# estimate alpha using direct MLE
a = float(n) / sum(map(lambda X: log(float(X)/xmin),z))
if nosmall:
if (a-1)/sqrt(n) > 0.1 and dat!=[]:
xm = len(xmins)+1
break
# compute KS statistic
#cx = map(lambda X:float(X)/n,range(0,n))
cf = map(lambda X:1-pow((float(xmin)/X),a),z)
dat.append( max( map(lambda X: abs(cf[X]-float(X)/n),range(0,n))))
D = min(dat)
xmin = xmins[dat.index(D)]
z = filter(lambda X:X>=xmin,x)
z.sort()
n = len(z)
alpha = 1 + n / sum(map(lambda X: log(float(X)/xmin),z))
if finite: alpha = alpha*float(n-1)/n+1./n # finite-size correction
if n < 50 and not finite and not nowarn:
print '(PLFIT) Warning: finite-size bias may be present.\n'
L = n*log((alpha-1)/xmin) - alpha*sum(map(lambda X: log(float(X)/xmin),z))
elif f_dattype== 'INTS':
x=map(int,x)
if vec==[]:
for X in range(150,351):
vec.append(X/100.) # covers range of most practical
# scaling parameters
zvec = map(zeta, vec)
xmins = unique(x)
xmins.sort()
xmins = xmins[0:-1]
if xminx!=[]:
xmins = [min(filter(lambda X: X>=xminx,xmins))]
if limit!=[]:
limit = round(limit)
xmins=filter(lambda X: X<=limit,xmins)
if xmins==[]: xmins = [min(x)]
if sample!=[]:
step = float(len(xmins))/(sample-1)
index_curr=0
new_xmins=[]
for i in range (0,sample):
if round(index_curr)==len(xmins): index_curr-=1
# helper functions (unique and zeta)
def unique(seq):
# not order preserving
set = {}
map(set.__setitem__, seq, [])
return set.keys()
def _polyval(coeffs, x):
p = coeffs[0]
for c in coeffs[1:]:
p = c + x*p
return p
_zeta_int = [\
-0.5,
0.0,
1.6449340668482264365,1.2020569031595942854,1.0823232337111381915,
1.0369277551433699263,1.0173430619844491397,1.0083492773819228268,
1.0040773561979443394,1.0020083928260822144,1.0009945751278180853,
1.0004941886041194646,1.0002460865533080483,1.0001227133475784891,
1.0000612481350587048,1.0000305882363070205,1.0000152822594086519,
1.0000076371976378998,1.0000038172932649998,1.0000019082127165539,
1.0000009539620338728,1.0000004769329867878,1.0000002384505027277,
1.0000001192199259653,1.0000000596081890513,1.0000000298035035147,
1.0000000149015548284]
_zeta_P = [-3.50000000087575873, -0.701274355654678147,
-0.0672313458590012612, -0.00398731457954257841,
-0.000160948723019303141, -4.67633010038383371e-6,
-1.02078104417700585e-7, -1.68030037095896287e-9,
-1.85231868742346722e-11][::-1]
_zeta_Q = [1.00000000000000000, -0.936552848762465319,
-0.0588835413263763741, -0.00441498861482948666,
-0.000143416758067432622, -5.10691659585090782e-6,
-9.58813053268913799e-8, -1.72963791443181972e-9,
-1.83527919681474132e-11][::-1]
_zeta_1 = [3.03768838606128127e-10, -1.21924525236601262e-8,
2.01201845887608893e-7, -1.53917240683468381e-6,
-5.09890411005967954e-7, 0.000122464707271619326,
-0.000905721539353130232, -0.00239315326074843037,
0.084239750013159168, 0.418938517907442414, 0.500000001921884009]
_zeta_0 = [-3.46092485016748794e-10, -6.42610089468292485e-9,
1.76409071536679773e-7, -1.47141263991560698e-6, -6.38880222546167613e-7,
0.000122641099800668209, -0.000905894913516772796, -0.00239303348507992713,
0.0842396947501199816, 0.418938533204660256, 0.500000000000000052]
def zeta(s):
"""
Riemann zeta function, real argument
"""
if not isinstance(s, (float, int)):
try:
s = float(s)
except (ValueError, TypeError):
try:
s = complex(s)
if not s.imag:
return complex(zeta(s.real))
except (ValueError, TypeError):
pass
raise NotImplementedError
if s == 1:
raise ValueError("zeta(1) pole")
if s >= 27:
return 1.0 + 2.0**(-s) + 3.0**(-s)
n = int(s)
if n == s:
if n >= 0:
return _zeta_int[n]
if not (n % 2):
return 0.0
if s <= 0.0:
return 0
if s <= 2.0:
if s <= 1.0:
return _polyval(_zeta_0,s)/(s-1)
return _polyval(_zeta_1,s)/(s-1)
z = _polyval(_zeta_P,s) / _polyval(_zeta_Q,s)
return 1.0 + 2.0**(-s) + 3.0**(-s) + 4.0**(-s)*z
data = [28,17,12,10,8,6,5,4,2,2,1,1,1,1,1,1]
print plfit(data, 'finite')
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/static-routes/static/next-hops/next-hop/interface-ref/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configured reference to interface / subinterface
"""
__slots__ = ("_path_helper", "_extmethods", "__interface", "__subinterface")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"static-routes",
"static",
"next-hops",
"next-hop",
"interface-ref",
"config",
]
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/next_hops/next_hop/interface_ref/config/interface (leafref)
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/next_hops/next_hop/interface_ref/config/interface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """interface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__interface = t
if hasattr(self, "_set"):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
def _get_subinterface(self):
"""
Getter method for subinterface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/next_hops/next_hop/interface_ref/config/subinterface (leafref)
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
return self.__subinterface
def _set_subinterface(self, v, load=False):
"""
Setter method for subinterface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/next_hops/next_hop/interface_ref/config/subinterface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_subinterface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subinterface() directly.
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subinterface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="subinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__subinterface = t
if hasattr(self, "_set"):
self._set()
def _unset_subinterface(self):
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
interface = __builtin__.property(_get_interface, _set_interface)
subinterface = __builtin__.property(_get_subinterface, _set_subinterface)
_pyangbind_elements = OrderedDict(
[("interface", interface), ("subinterface", subinterface)]
)
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/static-routes/static/next-hops/next-hop/interface-ref/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configured reference to interface / subinterface
"""
__slots__ = ("_path_helper", "_extmethods", "__interface", "__subinterface")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"static-routes",
"static",
"next-hops",
"next-hop",
"interface-ref",
"config",
]
def _get_interface(self):
"""
Getter method for interface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/next_hops/next_hop/interface_ref/config/interface (leafref)
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
return self.__interface
def _set_interface(self, v, load=False):
"""
Setter method for interface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/next_hops/next_hop/interface_ref/config/interface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_interface() directly.
YANG Description: Reference to a base interface. If a reference to a
subinterface is required, this leaf must be specified
to indicate the base interface.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """interface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__interface = t
if hasattr(self, "_set"):
self._set()
def _unset_interface(self):
self.__interface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="interface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
def _get_subinterface(self):
"""
Getter method for subinterface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/next_hops/next_hop/interface_ref/config/subinterface (leafref)
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
return self.__subinterface
def _set_subinterface(self, v, load=False):
"""
Setter method for subinterface, mapped from YANG variable /network_instances/network_instance/protocols/protocol/static_routes/static/next_hops/next_hop/interface_ref/config/subinterface (leafref)
If this variable is read-only (config: false) in the
source YANG file, then _set_subinterface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_subinterface() directly.
YANG Description: Reference to a subinterface -- this requires the base
interface to be specified using the interface leaf in
this container. If only a reference to a base interface
is requuired, this leaf should not be set.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """subinterface must be of a type compatible with leafref""",
"defined-type": "leafref",
"generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="subinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=True)""",
}
)
self.__subinterface = t
if hasattr(self, "_set"):
self._set()
def _unset_subinterface(self):
self.__subinterface = YANGDynClass(
base=six.text_type,
is_leaf=True,
yang_name="subinterface",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="leafref",
is_config=True,
)
interface = __builtin__.property(_get_interface, _set_interface)
subinterface = __builtin__.property(_get_subinterface, _set_subinterface)
_pyangbind_elements = OrderedDict(
[("interface", interface), ("subinterface", subinterface)]
)
|
|
from __future__ import absolute_import, unicode_literals
import warnings
from django.conf import settings
from django.utils.importlib import import_module
from django.utils import six
# Always import this module as follows:
# from debug_toolbar import settings [as dt_settings]
# Don't import directly CONFIG or PANELs, or you will miss changes performed
# with override_settings in tests.
CONFIG_DEFAULTS = {
# Toolbar options
'DISABLE_PANELS': set(['debug_toolbar.panels.redirects.RedirectsPanel']),
'INSERT_BEFORE': '</body>',
'RENDER_PANELS': None,
'RESULTS_STORE_SIZE': 10,
'ROOT_TAG_EXTRA_ATTRS': '',
'SHOW_COLLAPSED': False,
'SHOW_TOOLBAR_CALLBACK': 'debug_toolbar.middleware.show_toolbar',
# Panel options
'EXTRA_SIGNALS': [],
'ENABLE_STACKTRACES': True,
'HIDE_IN_STACKTRACES': (
'socketserver' if six.PY3 else 'SocketServer',
'threading',
'wsgiref',
'debug_toolbar',
'django',
),
'PROFILER_MAX_DEPTH': 10,
'SHOW_TEMPLATE_CONTEXT': True,
'SQL_WARNING_THRESHOLD': 500, # milliseconds
}
USER_CONFIG = getattr(settings, 'DEBUG_TOOLBAR_CONFIG', {})
# Backward-compatibility for 1.0, remove in 2.0.
_RENAMED_CONFIG = {
'RESULTS_STORE_SIZE': 'RESULTS_CACHE_SIZE',
'ROOT_TAG_ATTRS': 'ROOT_TAG_EXTRA_ATTRS',
'HIDDEN_STACKTRACE_MODULES': 'HIDE_IN_STACKTRACES'
}
for old_name, new_name in _RENAMED_CONFIG.items():
if old_name in USER_CONFIG:
warnings.warn(
"%r was renamed to %r. Update your DEBUG_TOOLBAR_CONFIG "
"setting." % (old_name, new_name), DeprecationWarning)
USER_CONFIG[new_name] = USER_CONFIG.pop(old_name)
if 'HIDE_DJANGO_SQL' in USER_CONFIG:
warnings.warn(
"HIDE_DJANGO_SQL was removed. Update your "
"DEBUG_TOOLBAR_CONFIG setting.", DeprecationWarning)
USER_CONFIG.pop('HIDE_DJANGO_SQL')
if 'TAG' in USER_CONFIG:
warnings.warn(
"TAG was replaced by INSERT_BEFORE. Update your "
"DEBUG_TOOLBAR_CONFIG setting.", DeprecationWarning)
USER_CONFIG['INSERT_BEFORE'] = '</%s>' % USER_CONFIG.pop('TAG')
CONFIG = CONFIG_DEFAULTS.copy()
CONFIG.update(USER_CONFIG)
if not isinstance(CONFIG['SHOW_TOOLBAR_CALLBACK'], six.string_types):
warnings.warn(
"SHOW_TOOLBAR_CALLBACK is now a dotted path. Update your "
"DEBUG_TOOLBAR_CONFIG setting.", DeprecationWarning)
PANELS_DEFAULTS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
try:
PANELS = list(settings.DEBUG_TOOLBAR_PANELS)
except AttributeError:
PANELS = PANELS_DEFAULTS
else:
# Backward-compatibility for 1.0, remove in 2.0.
_RENAMED_PANELS = {
'debug_toolbar.panels.version.VersionDebugPanel':
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerDebugPanel':
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings_vars.SettingsDebugPanel':
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeaderDebugPanel':
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request_vars.RequestVarsDebugPanel':
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLDebugPanel':
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.template.TemplateDebugPanel':
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CacheDebugPanel':
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalDebugPanel':
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logger.LoggingDebugPanel':
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.InterceptRedirectsDebugPanel':
'debug_toolbar.panels.redirects.RedirectsPanel',
'debug_toolbar.panels.profiling.ProfilingDebugPanel':
'debug_toolbar.panels.profiling.ProfilingPanel',
}
for index, old_panel in enumerate(PANELS):
new_panel = _RENAMED_PANELS.get(old_panel)
if new_panel is not None:
warnings.warn(
"%r was renamed to %r. Update your DEBUG_TOOLBAR_PANELS "
"setting." % (old_panel, new_panel), DeprecationWarning)
PANELS[index] = new_panel
if 'INTERCEPT_REDIRECTS' in USER_CONFIG:
warnings.warn(
"INTERCEPT_REDIRECTS is deprecated. Please use the "
"DISABLE_PANELS config in the"
"DEBUG_TOOLBAR_CONFIG setting.", DeprecationWarning)
if USER_CONFIG['INTERCEPT_REDIRECTS']:
if 'debug_toolbar.panels.redirects.RedirectsPanel' \
in CONFIG['DISABLE_PANELS']:
# RedirectsPanel should be enabled
try:
CONFIG['DISABLE_PANELS'].remove(
'debug_toolbar.panels.redirects.RedirectsPanel'
)
except KeyError:
# We wanted to remove it, but it didn't exist. This is fine
pass
elif 'debug_toolbar.panels.redirects.RedirectsPanel' \
not in CONFIG['DISABLE_PANELS']:
# RedirectsPanel should be disabled
CONFIG['DISABLE_PANELS'].add(
'debug_toolbar.panels.redirects.RedirectsPanel'
)
PATCH_SETTINGS = getattr(settings, 'DEBUG_TOOLBAR_PATCH_SETTINGS', settings.DEBUG)
# The following functions can monkey-patch settings automatically. Several
# imports are placed inside functions to make it safe to import this module.
def is_toolbar_middleware(middleware_path):
from debug_toolbar.middleware import DebugToolbarMiddleware
# This could be replaced by import_by_path in Django >= 1.6.
try:
mod_path, cls_name = middleware_path.rsplit('.', 1)
mod = import_module(mod_path)
middleware_cls = getattr(mod, cls_name)
except (AttributeError, ImportError, ValueError):
return
return issubclass(middleware_cls, DebugToolbarMiddleware)
def is_toolbar_middleware_installed():
return any(is_toolbar_middleware(middleware)
for middleware in settings.MIDDLEWARE_CLASSES)
def prepend_to_setting(setting_name, value):
"""Insert value at the beginning of a list or tuple setting."""
values = getattr(settings, setting_name)
# Make a list [value] or tuple (value,)
value = type(values)((value,))
setattr(settings, setting_name, value + values)
def patch_internal_ips():
if not settings.INTERNAL_IPS:
prepend_to_setting('INTERNAL_IPS', '127.0.0.1')
prepend_to_setting('INTERNAL_IPS', '::1')
def patch_middleware_classes():
if not is_toolbar_middleware_installed():
prepend_to_setting('MIDDLEWARE_CLASSES',
'debug_toolbar.middleware.DebugToolbarMiddleware')
def patch_root_urlconf():
from django.conf.urls import include, patterns, url
from django.core.urlresolvers import clear_url_caches, reverse, NoReverseMatch
import debug_toolbar
try:
reverse('djdt:render_panel')
except NoReverseMatch:
urlconf_module = import_module(settings.ROOT_URLCONF)
urlconf_module.urlpatterns = patterns('', # noqa
url(r'^__debug__/', include(debug_toolbar.urls)),
) + urlconf_module.urlpatterns
clear_url_caches()
def patch_all():
patch_internal_ips()
patch_middleware_classes()
patch_root_urlconf()
|
|
from binascii import unhexlify
from time import time
from django import forms
from django.conf import settings
from django.forms import Form, ModelForm
from django.utils.translation import gettext_lazy as _
from django_otp.forms import OTPAuthenticationFormMixin
from django_otp.oath import totp
from django_otp.plugins.otp_totp.models import TOTPDevice
from .models import (
PhoneDevice, get_available_methods, get_available_phone_methods,
)
from .utils import totp_digits
from .validators import validate_international_phonenumber
try:
from otp_yubikey.models import RemoteYubikeyDevice, YubikeyDevice
except ImportError:
RemoteYubikeyDevice = YubikeyDevice = None
class MethodForm(forms.Form):
method = forms.ChoiceField(label=_("Method"),
initial='generator',
widget=forms.RadioSelect)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.fields['method'].choices = get_available_methods()
class PhoneNumberMethodForm(ModelForm):
number = forms.CharField(label=_("Phone Number"),
validators=[validate_international_phonenumber])
method = forms.ChoiceField(widget=forms.RadioSelect, label=_('Method'))
class Meta:
model = PhoneDevice
fields = 'number', 'method',
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.fields['method'].choices = get_available_phone_methods()
class PhoneNumberForm(ModelForm):
# Cannot use PhoneNumberField, as it produces a PhoneNumber object, which cannot be serialized.
number = forms.CharField(label=_("Phone Number"),
validators=[validate_international_phonenumber])
class Meta:
model = PhoneDevice
fields = 'number',
class DeviceValidationForm(forms.Form):
token = forms.IntegerField(label=_("Token"), min_value=1, max_value=int('9' * totp_digits()))
token.widget.attrs.update({'autofocus': 'autofocus',
'inputmode': 'numeric',
'autocomplete': 'one-time-code'})
error_messages = {
'invalid_token': _('Entered token is not valid.'),
}
def __init__(self, device, **args):
super().__init__(**args)
self.device = device
def clean_token(self):
token = self.cleaned_data['token']
if not self.device.verify_token(token):
raise forms.ValidationError(self.error_messages['invalid_token'])
return token
class YubiKeyDeviceForm(DeviceValidationForm):
token = forms.CharField(label=_("YubiKey"), widget=forms.PasswordInput())
error_messages = {
'invalid_token': _("The YubiKey could not be verified."),
}
def clean_token(self):
self.device.public_id = self.cleaned_data['token'][:-32]
return super().clean_token()
class TOTPDeviceForm(forms.Form):
token = forms.IntegerField(label=_("Token"), min_value=0, max_value=int('9' * totp_digits()))
token.widget.attrs.update({'autofocus': 'autofocus',
'inputmode': 'numeric',
'autocomplete': 'one-time-code'})
error_messages = {
'invalid_token': _('Entered token is not valid.'),
}
def __init__(self, key, user, metadata=None, **kwargs):
super().__init__(**kwargs)
self.key = key
self.tolerance = 1
self.t0 = 0
self.step = 30
self.drift = 0
self.digits = totp_digits()
self.user = user
self.metadata = metadata or {}
@property
def bin_key(self):
"""
The secret key as a binary string.
"""
return unhexlify(self.key.encode())
def clean_token(self):
token = self.cleaned_data.get('token')
validated = False
t0s = [self.t0]
key = self.bin_key
if 'valid_t0' in self.metadata:
t0s.append(int(time()) - self.metadata['valid_t0'])
for t0 in t0s:
for offset in range(-self.tolerance, self.tolerance):
if totp(key, self.step, t0, self.digits, self.drift + offset) == token:
self.drift = offset
self.metadata['valid_t0'] = int(time()) - t0
validated = True
if not validated:
raise forms.ValidationError(self.error_messages['invalid_token'])
return token
def save(self):
return TOTPDevice.objects.create(user=self.user, key=self.key,
tolerance=self.tolerance, t0=self.t0,
step=self.step, drift=self.drift,
digits=self.digits,
name='default')
class DisableForm(forms.Form):
understand = forms.BooleanField(label=_("Yes, I am sure"))
class AuthenticationTokenForm(OTPAuthenticationFormMixin, Form):
otp_token = forms.IntegerField(label=_("Token"), min_value=1,
max_value=int('9' * totp_digits()))
otp_token.widget.attrs.update({'autofocus': 'autofocus',
'inputmode': 'numeric',
'autocomplete': 'one-time-code'})
# Our authentication form has an additional submit button to go to the
# backup token form. When the `required` attribute is set on an input
# field, that button cannot be used on browsers that implement html5
# validation. For now we'll use this workaround, but an even nicer
# solution would be to move the button outside the `<form>` and into
# its own `<form>`.
use_required_attribute = False
def __init__(self, user, initial_device, **kwargs):
"""
`initial_device` is either the user's default device, or the backup
device when the user chooses to enter a backup token. The token will
be verified against all devices, it is not limited to the given
device.
"""
super().__init__(**kwargs)
self.user = user
# YubiKey generates a OTP of 44 characters (not digits). So if the
# user's primary device is a YubiKey, replace the otp_token
# IntegerField with a CharField.
if RemoteYubikeyDevice and YubikeyDevice and \
isinstance(initial_device, (RemoteYubikeyDevice, YubikeyDevice)):
self.fields['otp_token'] = forms.CharField(label=_('YubiKey'), widget=forms.PasswordInput())
# Add a field to remeber this browser.
if getattr(settings, 'TWO_FACTOR_REMEMBER_COOKIE_AGE', None):
if settings.TWO_FACTOR_REMEMBER_COOKIE_AGE < 3600:
minutes = int(settings.TWO_FACTOR_REMEMBER_COOKIE_AGE / 60)
label = _("Don't ask again on this device for %(minutes)i minutes") % {'minutes': minutes}
elif settings.TWO_FACTOR_REMEMBER_COOKIE_AGE < 3600 * 24:
hours = int(settings.TWO_FACTOR_REMEMBER_COOKIE_AGE / 3600)
label = _("Don't ask again on this device for %(hours)i hours") % {'hours': hours}
else:
days = int(settings.TWO_FACTOR_REMEMBER_COOKIE_AGE / 3600 / 24)
label = _("Don't ask again on this device for %(days)i days") % {'days': days}
self.fields['remember'] = forms.BooleanField(
required=False,
initial=True,
label=label
)
def clean(self):
self.clean_otp(self.user)
return self.cleaned_data
class BackupTokenForm(AuthenticationTokenForm):
otp_token = forms.CharField(label=_("Token"))
|
|
##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import inspect
import sys
import imath
import IECore
import Gaffer
import GafferTest
import GafferDispatch
import GafferDispatchTest
@unittest.skipIf( not IECore.SearchPath( sys.path ).find( "tractor" ), "Tractor not available" )
class TractorDispatcherTest( GafferTest.TestCase ) :
def __dispatcher( self ) :
import GafferTractor
dispatcher = GafferTractor.TractorDispatcher()
dispatcher["jobsDirectory"].setValue( self.temporaryDirectory() + "/testJobDirectory" )
return dispatcher
def __job( self, nodes, dispatcher = None ) :
import GafferTractor
jobs = []
def f( dispatcher, job ) :
jobs.append( job )
c = GafferTractor.TractorDispatcher.preSpoolSignal().connect( f )
if dispatcher is None :
dispatcher = self.__dispatcher()
dispatcher.dispatch( nodes )
return jobs[0]
def testPreSpoolSignal( self ) :
import GafferTractor
s = Gaffer.ScriptNode()
s["n"] = GafferDispatchTest.LoggingTaskNode()
spooled = []
def f( dispatcher, job ) :
spooled.append( ( dispatcher, job ) )
c = GafferTractor.TractorDispatcher.preSpoolSignal().connect( f )
dispatcher = self.__dispatcher()
dispatcher.dispatch( [ s["n"] ] )
self.assertEqual( len( spooled ), 1 )
self.assertTrue( spooled[0][0] is dispatcher )
def testJobScript( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferDispatchTest.LoggingTaskNode()
dispatcher = self.__dispatcher()
dispatcher.dispatch( [ s["n"] ] )
self.assertTrue( os.path.isfile( dispatcher.jobDirectory() + "/job.alf" ) )
def testJobAttributes( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferDispatchTest.LoggingTaskNode()
dispatcher = self.__dispatcher()
dispatcher["jobName"].setValue( "Test Job" )
dispatcher["service"].setValue( "myService" )
dispatcher["envKey"].setValue( "myEnvKey" )
job = self.__job( [ s["n" ] ], dispatcher )
self.assertEqual( job.title, "Test Job" )
self.assertEqual( job.service, "myService" )
self.assertEqual( job.envkey, [ "myEnvKey" ] )
def testTaskAttributes( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferDispatchTest.LoggingTaskNode()
s["n"]["frame"] = Gaffer.StringPlug( defaultValue = "${frame}", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n"]["dispatcher"]["batchSize"].setValue( 10 )
s["n"]["dispatcher"]["tractor"]["tags"].setValue( "myTag1 ${myTagContext2}" )
s["expression"] = Gaffer.Expression()
s["expression"].setExpression( """parent["n"]["dispatcher"]["tractor"]["service"] = context.get("service", "")""" )
# add context variables: myTagContext2 and service
s["context"] = Gaffer.ContextVariables()
s["context"].setup( GafferDispatch.TaskNode.TaskPlug() )
s["context"]["in"].setInput( s["n"]["task"] )
variable = s["context"]["variables"].addMember( "tag", Gaffer.StringPlug() )
variable["name"].setValue("myTagContext2")
variable["value"].setValue("myTag2")
variable = s["context"]["variables"].addMember( "service", Gaffer.StringPlug() )
variable["name"].setValue("service")
variable["value"].setValue("myService")
s["job"] = Gaffer.TaskList()
s["job"]["preTasks"][0].setInput( s["context"]["out"] )
dispatcher = self.__dispatcher()
dispatcher["framesMode"].setValue( dispatcher.FramesMode.CustomRange )
dispatcher["frameRange"].setValue( "1-10" )
job = self.__job( [ s["job"] ], dispatcher )
self.assertEqual( len( job.subtasks ), 10 )
task = job.subtasks[0].subtasks[0]
self.assertEqual( task.title, "n 1-10" )
self.assertEqual( len( task.cmds ), 1 )
command = task.cmds[0]
self.assertEqual( command.service, "myService", "context variables were not expanded correctly" )
self.assertEqual( command.tags, [ "myTag1", "myTag2" ] )
def testPreTasks( self ) :
# n1
# |
# n2 n3
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.LoggingTaskNode()
s["n2"] = GafferDispatchTest.LoggingTaskNode()
s["n3"] = GafferDispatchTest.LoggingTaskNode()
s["n2"]["preTasks"][0].setInput( s["n1"]["task" ] )
job = self.__job( [ s["n2"], s["n3"] ] )
self.assertEqual( len( job.subtasks ), 2 )
self.assertEqual( job.subtasks[0].title, "n2 1" )
self.assertEqual( job.subtasks[1].title, "n3 1" )
self.assertEqual( len( job.subtasks[0].subtasks ), 1 )
self.assertEqual( job.subtasks[0].subtasks[0].title, "n1 1" )
self.assertEqual( len( job.subtasks[1].subtasks ), 0 )
def testSharedPreTasks( self ) :
import tractor.api.author as author
# n1
# / \
# i1 i2
# \ /
# n2
s = Gaffer.ScriptNode()
log = []
s["n1"] = GafferDispatchTest.LoggingTaskNode()
s["i1"] = GafferDispatchTest.LoggingTaskNode()
s["i1"]["preTasks"][0].setInput( s["n1"]["task"] )
s["i2"] = GafferDispatchTest.LoggingTaskNode()
s["i2"]["preTasks"][0].setInput( s["n1"]["task"] )
s["n2"] = GafferDispatchTest.LoggingTaskNode()
s["n2"]["preTasks"][0].setInput( s["i1"]["task"] )
s["n2"]["preTasks"][1].setInput( s["i2"]["task"] )
job = self.__job( [ s["n2" ] ] )
self.assertEqual( len( job.subtasks ), 1 )
self.assertEqual( job.subtasks[0].title, "n2 1" )
self.assertEqual( len( job.subtasks[0].subtasks ), 2 )
self.assertEqual( job.subtasks[0].subtasks[0].title, "i1 1" )
self.assertEqual( job.subtasks[0].subtasks[1].title, "i2 1" )
self.assertEqual( len( job.subtasks[0].subtasks[0].subtasks ), 1 )
self.assertEqual( len( job.subtasks[0].subtasks[1].subtasks ), 1 )
self.assertEqual( job.subtasks[0].subtasks[0].subtasks[0].title, "n1 1" )
self.assertEqual( job.subtasks[0].subtasks[1].subtasks[0].title, "n1 1" )
self.assertTrue( isinstance( job.subtasks[0].subtasks[1].subtasks[0], author.Instance ) )
def testTaskPlugs( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferDispatchTest.LoggingTaskNode()
self.assertTrue( "tractor" in [ x.getName() for x in s["n"]["dispatcher"].children() ] )
self.assertTrue( "tractor1" not in [ x.getName() for x in s["n"]["dispatcher"].children() ] )
s["n"]["dispatcher"]["tractor"]["service"].setValue( "myService" )
s["n"]["dispatcher"]["tractor"]["tags"].setValue( "myTag1 myTag2" )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertTrue( "tractor" in [ x.getName() for x in s2["n"]["dispatcher"].children() ] )
self.assertTrue( "tractor1" not in [ x.getName() for x in s2["n"]["dispatcher"].children() ] )
self.assertEqual( s2["n"]["dispatcher"]["tractor"]["service"].getValue(), "myService" )
self.assertEqual( s2["n"]["dispatcher"]["tractor"]["tags"].getValue(), "myTag1 myTag2" )
def testTypeNamePrefixes( self ) :
import GafferTractor
self.assertTypeNamesArePrefixed( GafferTractor )
def testDefaultNames( self ) :
import GafferTractor
self.assertDefaultNamesAreCorrect( GafferTractor )
def testTasksWithoutCommands( self ) :
s = Gaffer.ScriptNode()
s["systemCommand"] = GafferDispatch.SystemCommand()
s["systemCommand"]["command"].setValue( "ls" )
s["taskList"] = GafferDispatch.TaskList()
s["taskList"]["preTasks"][0].setInput( s["systemCommand"]["task"] )
job = self.__job( [ s["taskList" ] ] )
taskListTask = job.subtasks[0]
self.assertEqual( taskListTask.title, "taskList" )
self.assertEqual( len( taskListTask.cmds ), 0 )
systemCommandTask = taskListTask.subtasks[0]
self.assertEqual( systemCommandTask.title, "systemCommand 1" )
self.assertEqual( len( systemCommandTask.cmds ), 1 )
def testImathContextVariable( self ) :
s = Gaffer.ScriptNode()
s["t"] = GafferDispatchTest.TextWriter()
s["t"]["fileName"].setValue( self.temporaryDirectory() + "/test.txt" )
s["e"] = Gaffer.Expression()
s["e"].setExpression( inspect.cleandoc(
"""
c = context["c"]
parent["t"]["text"] = "{0} {1} {2}".format( *c )
"""
) )
s["v"] = GafferDispatch.TaskContextVariables()
s["v"]["variables"].addChild( Gaffer.NameValuePlug( "c", imath.Color3f( 0, 1, 2 ) ) )
s["v"]["preTasks"][0].setInput( s["t"]["task"] )
job = self.__job( [ s["v" ] ] )
task = job.subtasks[0].subtasks[0]
self.assertIn(
"imath.Color3f( 0, 1, 2 )",
task.cmds[0].argv
)
if __name__ == "__main__":
unittest.main()
|
|
import numpy as np
import re
import os
from pathlib import Path
def get_spill(text):
"""
Extracts spillover matrix from FCS text entry.
:param text: Text value from the $SPILL or $SPILLOVER metadata keyword in an FCS file
:return: A tuple containing: (spillover matrix new_spill, column headers)
"""
spill = text.split(',')
n = int(spill[0])
markers = spill[1:(n + 1)]
markers = [item.strip().replace('\n', '') for item in markers]
items = [item.strip().replace('\n', '') for item in spill[n + 1:]]
new_spill = np.reshape(list(map(float, items)), (n, n))
return new_spill, markers
def _parse_multiline_matrix(matrix_text, fluoro_labels):
# first, we must find a valid header line and we will require that the matrix
# follows on the next lines, ignoring any additional lines before or after
# the header contains labels matching the PnN value(FCS text field)
# and may be tab or comma delimited
# (spaces can't be delimiters b/c they are allowed in the PnN value)
header = None
header_line_index = None
for i, line in enumerate(matrix_text):
# header may begin with a '#' and some white space
match = re.search('^#\\s*', line)
if match is not None:
line = line[match.end():]
line_values = re.split('[\t,]', line)
label_diff = set(fluoro_labels).symmetric_difference(line_values)
if len(label_diff) != 0:
# if any labels are missing or extra ones found, then not a valid header row
continue
else:
header = line_values
header_line_index = i
break
matrix_start = header_line_index + 1
matrix_end = matrix_start + len(fluoro_labels)
if len(matrix_text) < matrix_end:
raise ValueError("Too few rows in compensation")
matrix_text = matrix_text[matrix_start:matrix_end]
matrix_array = []
# convert the matrix text to numpy array
for line in matrix_text:
line_values = re.split('[\t,]', line)
line_values = [float(value) for value in line_values]
if len(line_values) > len(fluoro_labels):
raise ValueError("Too many values in line: %s" % line)
elif len(line_values) < len(fluoro_labels):
raise ValueError("Too few values in line: %s" % line)
else:
matrix_array.append(line_values)
matrix_array = np.array(matrix_array)
return matrix_array, header
def _convert_matrix_text_to_array(matrix_text, fluoro_labels, fluoro_indices):
"""
Converts a compensation matrix from a CSV text string to a NumPy array
:param matrix_text: CSV text string where header contains the fluoro labels
:param fluoro_labels: text labels of the FCS fluorescent channels
:param fluoro_indices: channel indices of the fluorescent channels
:return: NumPy array of the compensation matrix values
"""
# parse the matrix text and validate the number of params match
# the number of fluoro params and that the matrix
# values are numbers (can be exp notation)
matrix_text = matrix_text.splitlines()
if len(matrix_text) == 0:
raise ValueError("matrix text appears to be empty")
elif len(matrix_text) == 1:
# probably a single-line CSV from FCS metadata
matrix, header = get_spill(matrix_text[0])
else:
matrix, header = _parse_multiline_matrix(matrix_text, fluoro_labels)
label_diff = set(fluoro_labels).symmetric_difference(header)
# re-order matrix according to provided fluoro label order
idx_order = [header.index(fluoro_label) for fluoro_label in fluoro_labels]
matrix = matrix[idx_order, :][:, idx_order]
if len(label_diff) > 0:
in_fcs_not_comp = []
in_comp_not_fcs = []
for label in label_diff:
if label in fluoro_labels:
in_fcs_not_comp.append(label)
else:
in_comp_not_fcs.append(label)
error_message = "Matrix labels do not match given fluorescent labels"
if len(in_fcs_not_comp) > 0:
error_message = "\n".join(
[
error_message,
"",
"Labels in FCS file not found in comp matrix (null channels?):",
", ".join(in_fcs_not_comp),
"",
"Null channels can be specified when creating a Sample instance"
]
)
if len(in_comp_not_fcs) > 0:
error_message = "\n".join(
[
error_message,
"",
"Labels in comp matrix not found in FCS file (wrong matrix chosen?):",
", ".join(in_comp_not_fcs)
]
)
raise ValueError(error_message)
header_channel_numbers = []
for h in header:
# check if label is in given fluoro labels, will raise a ValueError if missing
# Note: this will give us the index of the given fluoro labels, which is not the
# index of the fluoro channel in the original FCS file because of scatter channels,
# etc. We look up the 'true' index from given fluoro_indices. Also, we increment
# to match the original PnN numbers that index from 1 (not 0).
# We store the channel number in the first row of the numpy array, as it is more
# reliable to identify parameters than some concatenation of parameter attributes.
fluoro_index = fluoro_labels.index(h)
true_fluoro_index = fluoro_indices[fluoro_index]
header_channel_numbers.append(true_fluoro_index + 1)
matrix_array = np.vstack([header_channel_numbers, matrix])
return matrix_array
def parse_compensation_matrix(compensation, channel_labels, null_channels=None):
"""
Returns a NumPy array with the compensation matrix, where the first row contains
the indices of the fluorescent channels
:param compensation: Compensation matrix: may be a NumPy array, a CSV file
path, a pathlib Path object to a CSV or TSV file or a string of CSV
text. If a string, both multi-line, traditional CSV, and the single
line FCS spill formats are supported. If a NumPy array, we assume the
columns are in the same order as the channel labels
:param channel_labels: Channel labels from the FCS file's PnN fields, must be in
the same order as they appear in the FCS file
:param null_channels: Specify any empty channels that were collected and
present in the channel_labels argument. These will be ignored when
validating and creating the compensation matrix
:return: Compensation matrix as NumPy array where header contains the
channel numbers (not indices!)
"""
non_fluoro_channels = [
'FSC-A',
'FSC-H',
'FSC-W',
'SSC-A',
'SSC-H',
'SSC-W',
'Time'
]
fluoro_indices = []
fluoro_labels = []
if null_channels is not None:
non_fluoro_channels.extend(null_channels)
for i, label in enumerate(channel_labels):
if label not in non_fluoro_channels:
fluoro_indices.append(i)
fluoro_labels.append(label)
# Determine compensation object type
if isinstance(compensation, str):
# if a string, may be a file path or CSV text, we'll first test if file path exists
# and if so try and open as CSV. If this fails, we'll test splitting the string by
# new lines and commas. If this fails, raise a ValueError
if os.path.isfile(compensation):
fh = open(compensation)
matrix_text = fh.read()
fh.close()
else:
# may be a CSV string
matrix_text = compensation
matrix = _convert_matrix_text_to_array(matrix_text, fluoro_labels, fluoro_indices)
elif isinstance(compensation, Path):
fh = compensation.open('r')
matrix_text = fh.read()
fh.close()
matrix = _convert_matrix_text_to_array(matrix_text, fluoro_labels, fluoro_indices)
elif isinstance(compensation, np.ndarray):
matrix = compensation
else:
raise ValueError("Compensation given is not a string or a NumPy array")
# Make sure we have the correct number of rows (the header of matrix adds a row)
if matrix.shape[0] > len(fluoro_labels) + 1:
raise ValueError("Too many rows in compensation matrix")
elif matrix.shape[0] < len(fluoro_labels) + 1:
raise ValueError("Too few rows in compensation matrix")
return matrix
def compensate(event_data, spill_matrix, fluoro_indices=None):
"""
Compensate NumPy event data 'npy' given spillover matrix 'spill'
and marker indices to compensate.
:param event_data: NumPy array of the event data
:param spill_matrix: Compensation matrix as a NumPy array (without headers)
:param fluoro_indices: Optional list of indices of the fluorescent channels (only
these will be extracted & compensated). If None (default), all columns
will be compensated.
:return: NumPy array of compensated event data. If fluoro_indices were given,
the data is returned in the column order given, with the non-fluorescent
columns unmodified.
"""
data = event_data.copy()
if len(fluoro_indices) > 0:
comp_data = data[:, fluoro_indices]
else:
comp_data = data
# this does the actual compensation
comp_data = np.linalg.solve(spill_matrix.T, comp_data.T).T
# Re-insert compensated data columns
if len(fluoro_indices) > 0:
data[:, fluoro_indices] = comp_data
else:
data = comp_data
return data
def inverse_compensate(event_data, spill_matrix, fluoro_indices=None):
"""
Inverse the compensation on NumPy event data 'npy' given spillover matrix 'spill'
and marker indices to "un-compensate".
:param event_data: NumPy array of the event data
:param spill_matrix: Compensation matrix as a NumPy array (without headers)
:param fluoro_indices: Optional list of indices of the fluorescent channels (only
these will be extracted & un-compensated). If None (default), all columns
will be un-compensated.
:return: NumPy array of un-compensated event data. If fluoro_indices were given,
the data is returned in the column order given, with the non-fluorescent
columns unmodified.
"""
data = event_data.copy()
if len(fluoro_indices) > 0:
inv_comp_data = data[:, fluoro_indices]
else:
inv_comp_data = data
inv_comp_data = np.dot(inv_comp_data, spill_matrix)
# Re-insert compensated data columns
if len(fluoro_indices) > 0:
data[:, fluoro_indices] = inv_comp_data
else:
data = inv_comp_data
return data
def gen_spill_matrix(npy, stain_index):
"""
Generates spillover matrix for one FCS file (presumably from beads)
:param npy: the numpy array of the bead data
:param stain_index: index of the stained channel
:return: Compensation matrix as a NumPy array (without headers)
"""
# get the median for all unstained columns, zero for stained index
spill = list()
for column, i in enumerate(npy.T):
if i == stain_index:
spill.append(0.0)
continue
else:
spill.append(np.median(column))
return spill
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Attributes(msrest.serialization.Model):
"""The object attributes managed by the KeyVault service.
Variables are only populated by the server, and will be ignored when sending a request.
:param enabled: Determines whether the object is enabled.
:type enabled: bool
:param not_before: Not before date in UTC.
:type not_before: ~datetime.datetime
:param expires: Expiry date in UTC.
:type expires: ~datetime.datetime
:ivar created: Creation time in UTC.
:vartype created: ~datetime.datetime
:ivar updated: Last updated time in UTC.
:vartype updated: ~datetime.datetime
"""
_validation = {
'created': {'readonly': True},
'updated': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'not_before': {'key': 'nbf', 'type': 'unix-time'},
'expires': {'key': 'exp', 'type': 'unix-time'},
'created': {'key': 'created', 'type': 'unix-time'},
'updated': {'key': 'updated', 'type': 'unix-time'},
}
def __init__(
self,
**kwargs
):
super(Attributes, self).__init__(**kwargs)
self.enabled = kwargs.get('enabled', None)
self.not_before = kwargs.get('not_before', None)
self.expires = kwargs.get('expires', None)
self.created = None
self.updated = None
class BackupSecretResult(msrest.serialization.Model):
"""The backup secret result, containing the backup blob.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The backup blob containing the backed up secret.
:vartype value: bytes
"""
_validation = {
'value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'base64'},
}
def __init__(
self,
**kwargs
):
super(BackupSecretResult, self).__init__(**kwargs)
self.value = None
class SecretBundle(msrest.serialization.Model):
"""A secret consisting of a value, id and its attributes.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The secret value.
:type value: str
:param id: The secret id.
:type id: str
:param content_type: The content type of the secret.
:type content_type: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_1.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:ivar kid: If this is a secret backing a KV certificate, then this field specifies the
corresponding key backing the KV certificate.
:vartype kid: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a secret
backing a certificate, then managed will be true.
:vartype managed: bool
"""
_validation = {
'kid': {'readonly': True},
'managed': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'content_type': {'key': 'contentType', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'kid': {'key': 'kid', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(SecretBundle, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.id = kwargs.get('id', None)
self.content_type = kwargs.get('content_type', None)
self.attributes = kwargs.get('attributes', None)
self.tags = kwargs.get('tags', None)
self.kid = None
self.managed = None
class DeletedSecretBundle(SecretBundle):
"""A Deleted Secret consisting of its previous id, attributes and its tags, as well as information on when it will be purged.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The secret value.
:type value: str
:param id: The secret id.
:type id: str
:param content_type: The content type of the secret.
:type content_type: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_1.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:ivar kid: If this is a secret backing a KV certificate, then this field specifies the
corresponding key backing the KV certificate.
:vartype kid: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a secret
backing a certificate, then managed will be true.
:vartype managed: bool
:param recovery_id: The url of the recovery object, used to identify and recover the deleted
secret.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the secret is scheduled to be purged, in UTC.
:vartype scheduled_purge_date: ~datetime.datetime
:ivar deleted_date: The time when the secret was deleted, in UTC.
:vartype deleted_date: ~datetime.datetime
"""
_validation = {
'kid': {'readonly': True},
'managed': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'content_type': {'key': 'contentType', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'kid': {'key': 'kid', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(
self,
**kwargs
):
super(DeletedSecretBundle, self).__init__(**kwargs)
self.recovery_id = kwargs.get('recovery_id', None)
self.scheduled_purge_date = None
self.deleted_date = None
class SecretItem(msrest.serialization.Model):
"""The secret item containing secret metadata.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Secret identifier.
:type id: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_1.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param content_type: Type of the secret value such as a password.
:type content_type: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a key backing
a certificate, then managed will be true.
:vartype managed: bool
"""
_validation = {
'managed': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'content_type': {'key': 'contentType', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(SecretItem, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.attributes = kwargs.get('attributes', None)
self.tags = kwargs.get('tags', None)
self.content_type = kwargs.get('content_type', None)
self.managed = None
class DeletedSecretItem(SecretItem):
"""The deleted secret item containing metadata about the deleted secret.
Variables are only populated by the server, and will be ignored when sending a request.
:param id: Secret identifier.
:type id: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v7_1.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param content_type: Type of the secret value such as a password.
:type content_type: str
:ivar managed: True if the secret's lifetime is managed by key vault. If this is a key backing
a certificate, then managed will be true.
:vartype managed: bool
:param recovery_id: The url of the recovery object, used to identify and recover the deleted
secret.
:type recovery_id: str
:ivar scheduled_purge_date: The time when the secret is scheduled to be purged, in UTC.
:vartype scheduled_purge_date: ~datetime.datetime
:ivar deleted_date: The time when the secret was deleted, in UTC.
:vartype deleted_date: ~datetime.datetime
"""
_validation = {
'managed': {'readonly': True},
'scheduled_purge_date': {'readonly': True},
'deleted_date': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'content_type': {'key': 'contentType', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
'recovery_id': {'key': 'recoveryId', 'type': 'str'},
'scheduled_purge_date': {'key': 'scheduledPurgeDate', 'type': 'unix-time'},
'deleted_date': {'key': 'deletedDate', 'type': 'unix-time'},
}
def __init__(
self,
**kwargs
):
super(DeletedSecretItem, self).__init__(**kwargs)
self.recovery_id = kwargs.get('recovery_id', None)
self.scheduled_purge_date = None
self.deleted_date = None
class DeletedSecretListResult(msrest.serialization.Model):
"""The deleted secret list result.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A response message containing a list of the deleted secrets in the vault along
with a link to the next page of deleted secrets.
:vartype value: list[~azure.keyvault.v7_1.models.DeletedSecretItem]
:ivar next_link: The URL to get the next set of deleted secrets.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[DeletedSecretItem]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(DeletedSecretListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class Error(msrest.serialization.Model):
"""The key vault server error.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar inner_error: The key vault server error.
:vartype inner_error: ~azure.keyvault.v7_1.models.Error
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'inner_error': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'inner_error': {'key': 'innererror', 'type': 'Error'},
}
def __init__(
self,
**kwargs
):
super(Error, self).__init__(**kwargs)
self.code = None
self.message = None
self.inner_error = None
class KeyVaultError(msrest.serialization.Model):
"""The key vault error exception.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar error: The key vault server error.
:vartype error: ~azure.keyvault.v7_1.models.Error
"""
_validation = {
'error': {'readonly': True},
}
_attribute_map = {
'error': {'key': 'error', 'type': 'Error'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultError, self).__init__(**kwargs)
self.error = None
class SecretAttributes(Attributes):
"""The secret management attributes.
Variables are only populated by the server, and will be ignored when sending a request.
:param enabled: Determines whether the object is enabled.
:type enabled: bool
:param not_before: Not before date in UTC.
:type not_before: ~datetime.datetime
:param expires: Expiry date in UTC.
:type expires: ~datetime.datetime
:ivar created: Creation time in UTC.
:vartype created: ~datetime.datetime
:ivar updated: Last updated time in UTC.
:vartype updated: ~datetime.datetime
:ivar recoverable_days: softDelete data retention days. Value should be >=7 and <=90 when
softDelete enabled, otherwise 0.
:vartype recoverable_days: int
:ivar recovery_level: Reflects the deletion recovery level currently in effect for secrets in
the current vault. If it contains 'Purgeable', the secret can be permanently deleted by a
privileged user; otherwise, only the system can purge the secret, at the end of the retention
interval. Possible values include: "Purgeable", "Recoverable+Purgeable", "Recoverable",
"Recoverable+ProtectedSubscription", "CustomizedRecoverable+Purgeable",
"CustomizedRecoverable", "CustomizedRecoverable+ProtectedSubscription".
:vartype recovery_level: str or ~azure.keyvault.v7_1.models.DeletionRecoveryLevel
"""
_validation = {
'created': {'readonly': True},
'updated': {'readonly': True},
'recoverable_days': {'readonly': True},
'recovery_level': {'readonly': True},
}
_attribute_map = {
'enabled': {'key': 'enabled', 'type': 'bool'},
'not_before': {'key': 'nbf', 'type': 'unix-time'},
'expires': {'key': 'exp', 'type': 'unix-time'},
'created': {'key': 'created', 'type': 'unix-time'},
'updated': {'key': 'updated', 'type': 'unix-time'},
'recoverable_days': {'key': 'recoverableDays', 'type': 'int'},
'recovery_level': {'key': 'recoveryLevel', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecretAttributes, self).__init__(**kwargs)
self.recoverable_days = None
self.recovery_level = None
class SecretListResult(msrest.serialization.Model):
"""The secret list result.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: A response message containing a list of secrets in the key vault along with a link
to the next page of secrets.
:vartype value: list[~azure.keyvault.v7_1.models.SecretItem]
:ivar next_link: The URL to get the next set of secrets.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[SecretItem]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecretListResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class SecretProperties(msrest.serialization.Model):
"""Properties of the key backing a certificate.
:param content_type: The media type (MIME type).
:type content_type: str
"""
_attribute_map = {
'content_type': {'key': 'contentType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecretProperties, self).__init__(**kwargs)
self.content_type = kwargs.get('content_type', None)
class SecretRestoreParameters(msrest.serialization.Model):
"""The secret restore parameters.
All required parameters must be populated in order to send to Azure.
:param secret_bundle_backup: Required. The backup blob associated with a secret bundle.
:type secret_bundle_backup: bytes
"""
_validation = {
'secret_bundle_backup': {'required': True},
}
_attribute_map = {
'secret_bundle_backup': {'key': 'value', 'type': 'base64'},
}
def __init__(
self,
**kwargs
):
super(SecretRestoreParameters, self).__init__(**kwargs)
self.secret_bundle_backup = kwargs['secret_bundle_backup']
class SecretSetParameters(msrest.serialization.Model):
"""The secret set parameters.
All required parameters must be populated in order to send to Azure.
:param value: Required. The value of the secret.
:type value: str
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:param content_type: Type of the secret value such as a password.
:type content_type: str
:param secret_attributes: The secret management attributes.
:type secret_attributes: ~azure.keyvault.v7_1.models.SecretAttributes
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'content_type': {'key': 'contentType', 'type': 'str'},
'secret_attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
}
def __init__(
self,
**kwargs
):
super(SecretSetParameters, self).__init__(**kwargs)
self.value = kwargs['value']
self.tags = kwargs.get('tags', None)
self.content_type = kwargs.get('content_type', None)
self.secret_attributes = kwargs.get('secret_attributes', None)
class SecretUpdateParameters(msrest.serialization.Model):
"""The secret update parameters.
:param content_type: Type of the secret value such as a password.
:type content_type: str
:param secret_attributes: The secret management attributes.
:type secret_attributes: ~azure.keyvault.v7_1.models.SecretAttributes
:param tags: A set of tags. Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
"""
_attribute_map = {
'content_type': {'key': 'contentType', 'type': 'str'},
'secret_attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
**kwargs
):
super(SecretUpdateParameters, self).__init__(**kwargs)
self.content_type = kwargs.get('content_type', None)
self.secret_attributes = kwargs.get('secret_attributes', None)
self.tags = kwargs.get('tags', None)
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
from math import floor, sqrt
import numpy as np
import tensorflow as tf
from tensorboard.plugins.beholder import im_util
from tensorboard.plugins.beholder.shared_config import SECTION_HEIGHT,\
IMAGE_WIDTH, DEFAULT_CONFIG, SECTION_INFO_FILENAME
from tensorboard.plugins.beholder.file_system_tools import write_pickle
MIN_SQUARE_SIZE = 3
class Visualizer(object):
def __init__(self, logdir):
self.logdir = logdir
self.sections_over_time = deque([], DEFAULT_CONFIG['window_size'])
self.config = dict(DEFAULT_CONFIG)
self.old_config = dict(DEFAULT_CONFIG)
def _reshape_conv_array(self, array, section_height, image_width):
'''Reshape a rank 4 array to be rank 2, where each column of block_width is
a filter, and each row of block height is an input channel. For example:
[[[[ 11, 21, 31, 41],
[ 51, 61, 71, 81],
[ 91, 101, 111, 121]],
[[ 12, 22, 32, 42],
[ 52, 62, 72, 82],
[ 92, 102, 112, 122]],
[[ 13, 23, 33, 43],
[ 53, 63, 73, 83],
[ 93, 103, 113, 123]]],
[[[ 14, 24, 34, 44],
[ 54, 64, 74, 84],
[ 94, 104, 114, 124]],
[[ 15, 25, 35, 45],
[ 55, 65, 75, 85],
[ 95, 105, 115, 125]],
[[ 16, 26, 36, 46],
[ 56, 66, 76, 86],
[ 96, 106, 116, 126]]],
[[[ 17, 27, 37, 47],
[ 57, 67, 77, 87],
[ 97, 107, 117, 127]],
[[ 18, 28, 38, 48],
[ 58, 68, 78, 88],
[ 98, 108, 118, 128]],
[[ 19, 29, 39, 49],
[ 59, 69, 79, 89],
[ 99, 109, 119, 129]]]]
should be reshaped to:
[[ 11, 12, 13, 21, 22, 23, 31, 32, 33, 41, 42, 43],
[ 14, 15, 16, 24, 25, 26, 34, 35, 36, 44, 45, 46],
[ 17, 18, 19, 27, 28, 29, 37, 38, 39, 47, 48, 49],
[ 51, 52, 53, 61, 62, 63, 71, 72, 73, 81, 82, 83],
[ 54, 55, 56, 64, 65, 66, 74, 75, 76, 84, 85, 86],
[ 57, 58, 59, 67, 68, 69, 77, 78, 79, 87, 88, 89],
[ 91, 92, 93, 101, 102, 103, 111, 112, 113, 121, 122, 123],
[ 94, 95, 96, 104, 105, 106, 114, 115, 116, 124, 125, 126],
[ 97, 98, 99, 107, 108, 109, 117, 118, 119, 127, 128, 129]]
'''
# E.g. [100, 24, 24, 10]: this shouldn't be reshaped like normal.
if array.shape[1] == array.shape[2] and array.shape[0] != array.shape[1]:
array = np.rollaxis(np.rollaxis(array, 2), 2)
block_height, block_width, in_channels = array.shape[:3]
rows = []
max_element_count = section_height * int(image_width / MIN_SQUARE_SIZE)
element_count = 0
for i in range(in_channels):
rows.append(array[:, :, i, :].reshape(block_height, -1, order='F'))
# This line should be left in this position. Gives it one extra row.
if element_count >= max_element_count and not self.config['show_all']:
break
element_count += block_height * in_channels * block_width
return np.vstack(rows)
def _reshape_irregular_array(self, array, section_height, image_width):
'''Reshapes arrays of ranks not in {1, 2, 4}
'''
section_area = section_height * image_width
flattened_array = np.ravel(array)
if not self.config['show_all']:
flattened_array = flattened_array[:int(section_area/MIN_SQUARE_SIZE)]
cell_count = np.prod(flattened_array.shape)
cell_area = section_area / cell_count
cell_side_length = max(1, floor(sqrt(cell_area)))
row_count = max(1, int(section_height / cell_side_length))
col_count = int(cell_count / row_count)
# Reshape the truncated array so that it has the same aspect ratio as
# the section.
# Truncate whatever remaining values there are that don't fit. Hopefully
# it doesn't matter that the last few (< section count) aren't there.
section = np.reshape(flattened_array[:row_count * col_count],
(row_count, col_count))
return section
def _determine_image_width(self, arrays, show_all):
final_width = IMAGE_WIDTH
if show_all:
for array in arrays:
rank = len(array.shape)
if rank == 1:
width = len(array)
elif rank == 2:
width = array.shape[1]
elif rank == 4:
width = array.shape[1] * array.shape[3]
else:
width = IMAGE_WIDTH
if width > final_width:
final_width = width
return final_width
def _determine_section_height(self, array, show_all):
rank = len(array.shape)
height = SECTION_HEIGHT
if show_all:
if rank == 1:
height = SECTION_HEIGHT
if rank == 2:
height = max(SECTION_HEIGHT, array.shape[0])
elif rank == 4:
height = max(SECTION_HEIGHT, array.shape[0] * array.shape[2])
else:
height = max(SECTION_HEIGHT, np.prod(array.shape) // IMAGE_WIDTH)
return height
def _arrays_to_sections(self, arrays):
'''
input: unprocessed numpy arrays.
returns: columns of the size that they will appear in the image, not scaled
for display. That needs to wait until after variance is computed.
'''
sections = []
sections_to_resize_later = {}
show_all = self.config['show_all']
image_width = self._determine_image_width(arrays, show_all)
for array_number, array in enumerate(arrays):
rank = len(array.shape)
section_height = self._determine_section_height(array, show_all)
if rank == 1:
section = np.atleast_2d(array)
elif rank == 2:
section = array
elif rank == 4:
section = self._reshape_conv_array(array, section_height, image_width)
else:
section = self._reshape_irregular_array(array,
section_height,
image_width)
# Only calculate variance for what we have to. In some cases (biases),
# the section is larger than the array, so we don't want to calculate
# variance for the same value over and over - better to resize later.
# About a 6-7x speedup for a big network with a big variance window.
section_size = section_height * image_width
array_size = np.prod(array.shape)
if section_size > array_size:
sections.append(section)
sections_to_resize_later[array_number] = section_height
else:
sections.append(im_util.resize(section, section_height, image_width))
self.sections_over_time.append(sections)
if self.config['mode'] == 'variance':
sections = self._sections_to_variance_sections(self.sections_over_time)
for array_number, height in sections_to_resize_later.items():
sections[array_number] = im_util.resize(sections[array_number],
height,
image_width)
return sections
def _sections_to_variance_sections(self, sections_over_time):
'''Computes the variance of corresponding sections over time.
Returns:
a list of np arrays.
'''
variance_sections = []
for i in range(len(sections_over_time[0])):
time_sections = [sections[i] for sections in sections_over_time]
variance = np.var(time_sections, axis=0)
variance_sections.append(variance)
return variance_sections
def _sections_to_image(self, sections):
padding_size = 5
sections = im_util.scale_sections(sections, self.config['scaling'])
final_stack = [sections[0]]
padding = np.zeros((padding_size, sections[0].shape[1]))
for section in sections[1:]:
final_stack.append(padding)
final_stack.append(section)
return np.vstack(final_stack).astype(np.uint8)
def _maybe_clear_deque(self):
'''Clears the deque if certain parts of the config have changed.'''
for config_item in ['values', 'mode', 'show_all']:
if self.config[config_item] != self.old_config[config_item]:
self.sections_over_time.clear()
break
self.old_config = self.config
window_size = self.config['window_size']
if window_size != self.sections_over_time.maxlen:
self.sections_over_time = deque(self.sections_over_time, window_size)
def _save_section_info(self, arrays, sections):
infos = []
if self.config['values'] == 'trainable_variables':
names = [x.name for x in tf.trainable_variables()]
else:
names = range(len(arrays))
for array, section, name in zip(arrays, sections, names):
info = {}
info['name'] = name
info['shape'] = str(array.shape)
info['min'] = '{:.3e}'.format(section.min())
info['mean'] = '{:.3e}'.format(section.mean())
info['max'] = '{:.3e}'.format(section.max())
info['range'] = '{:.3e}'.format(section.max() - section.min())
info['height'] = section.shape[0]
infos.append(info)
write_pickle(infos, '{}/{}'.format(self.logdir, SECTION_INFO_FILENAME))
def build_frame(self, arrays):
self._maybe_clear_deque()
arrays = arrays if isinstance(arrays, list) else [arrays]
sections = self._arrays_to_sections(arrays)
self._save_section_info(arrays, sections)
final_image = self._sections_to_image(sections)
final_image = im_util.apply_colormap(final_image, self.config['colormap'])
return final_image
def update(self, config):
self.config = config
|
|
from django.shortcuts import render, get_object_or_404
from django.http import Http404
from django.utils import timezone
from django.db.models import Avg, Max, Min
from myfavteam.models import Team, News, Stadium, Tournament, Schedule, Position
from myfavteam.models import Player, PlayerNews, TournamentTeam, TeamPicture, Website
from myfavteam.models import Standings, BasketballPlayerStats
import datetime
import copy
# Create your views here.
def index(request, team_id = 0):
resp_dict = dict()
cdata = CommonData(resp_dict, team_id)
#news
n = NewsList()
news = n.get_news(cdata.team_id, 5)
resp_dict['latest_news'] = news[0]
resp_dict['news'] = news
#next games
g = Games()
next_games = g.get_next_games(cdata.team_id, 2)
resp_dict['next_game'] = next_games[0]
resp_dict['next_games']= next_games
#last games
last_games = g.get_last_games(cdata.team_id, 2)
resp_dict['last_game'] = last_games[0]
resp_dict['last_games']= last_games
#Roster
r = PlayerList()
resp_dict['roster'] = r.get_roster(cdata.team_id, 5)
#Standings
s = StandingList()
resp_dict['standings'] = s.get_standings_conf(cdata.tournament_id, cdata.conference, 5)
#Stats
st = StatsList()
resp_dict['stats'] = st.get_stats(cdata.team_id, 5)
return render(request, 'myfavteam/index.html', resp_dict)
def news(request, team_id = 0):
resp_dict = dict()
cdata = CommonData(resp_dict, team_id)
n = NewsList()
resp_dict['news'] = n.get_news(cdata.team_id, 50)
return render(request, 'myfavteam/news.html', resp_dict)
def social(request, team_id = 0):
resp_dict = dict()
cdata = CommonData(resp_dict, team_id)
return render(request, 'myfavteam/social.html', resp_dict)
def schedule(request, team_id = 0):
resp_dict = dict()
cdata = CommonData(resp_dict, team_id)
g = Games()
resp_dict['games'] = g.get_all_tournament_games(cdata.team_id, cdata.tournament_id)
return render(request, 'myfavteam/schedule.html', resp_dict)
def standings(request, team_id = 0):
resp_dict = dict()
cdata = CommonData(resp_dict, team_id)
s = StandingList()
resp_dict['standings'] = s.get_standings_conf(cdata.tournament_id, cdata.conference, 32)
return render(request, 'myfavteam/standings.html', resp_dict)
def stats(request, team_id = 0):
resp_dict = dict()
cdata = CommonData(resp_dict, team_id)
st = StatsList()
resp_dict['stats'] = st.get_stats(cdata.team_id, 15)
return render(request, 'myfavteam/stats.html', resp_dict)
def roster(request, team_id = 0):
resp_dict = dict()
cdata = CommonData(resp_dict, team_id)
r = PlayerList()
resp_dict['roster'] = r.get_roster(cdata.team_id, 15)
return render(request, 'myfavteam/roster.html', resp_dict)
def player(request, player_id=1):
resp_dict = dict()
add_navbar_data(resp_dict)
player_id = int(player_id)
p = PlayerList()
resp_dict['player'] = player = p.get_player(player_id)
if player == None:
raise Http404
try:
resp_dict['team_name'] = player.team.short_name
resp_dict['team'] = player.team
except:
resp_dict['team_name'] = player['team']['short_name']
st = StatsList()
resp_dict['stats'] = st.get_player_stats(player_id)
n = NewsList()
resp_dict['news'] = n.get_player_news(player_id, 5)
return render(request, 'myfavteam/player.html', resp_dict)
#---Classes to colect data
class CommonData():
def __init__(self, resp_dict, team_id):
add_navbar_data(resp_dict)
self.team = resp_dict['team'] = get_myteam_or_default_or_404(int(team_id))
self.team_id = get_id_or_0(self.team)
if self.team == None:
self.league = None
self.conference = None
else:
self.league = self.team.league
self.conference = self.team.conference
self.tournament = resp_dict['tournament'] = get_current_tournament(self.league)
self.tournament_id = get_id_or_0(self.tournament)
def add_navbar_data(resp_dict):
#get fav teams for top drop down menu
teams = Team.objects.filter(my_team=True).order_by('created')
resp_dict['teams'] = teams
#The following safe queries
#query_set: single row of a query set result. it can also be a dict in case of default vals
def get_id_or_0(query_set):
id = 0
try:
id = query_set.id
except:
try:
id = query_set['id']
except:
id = 0
return id
def get_current_tournament(league):
try:
tournament = Tournament.objects.filter(league=league).order_by('-start_date')[0]
except:
tname = "Regular Season {}".format(datetime.datetime.now().year)
tournament = {'id': 0, 'name': tname}
return tournament
def get_myteam_or_default_or_404(team_id):
if team_id > 0:
try:
team = Team.objects.get(id = team_id, my_team=True)
except:
raise Http404
elif team_id == 0:
try:
team = Team.objects.filter(my_team=True).order_by('created')[0]
except:
team = None
return team
def get_query_or_def_values(amount, query, ret_list):
assert(amount > 0)
lenght = len(ret_list)
assert(lenght > 0)
try:
count = query.count()
except:
try:
count = len(list(query))
except:
raise Http404
if count >= lenght:
return query[0:amount]
else:
ret_list[0:count] = query[0:count]
if amount > 0:
del ret_list[amount:]
return ret_list
def get_id_val_or_none(query, ret_list, val_id):
count = query.count()
try:
ret = query.filter(id=val_id)[0]
except:
if ret_list == None:
return None
if count >= len(ret_list):
ret = None
else:
try:
ret = ret_list[val_id-1]
except:
ret = None
return ret
"""different in that the query is alreay formed """
def get_one_val_or_none(query, ret_list, val_id):
count = query.count()
try:
ret = query[0]
except:
if ret_list == None:
return None
if count >= len(ret_list):
ret = None
else:
try:
ret = ret_list[val_id-1]
except:
ret = None
return ret
class Games:
'''Object to build all data, task for the carousel on index.html'''
def __init__(self):
now = timezone.now()
self.next_games = [{'team': 'MyFavTeam', 'team_against': 'Opponent1',
'stadium' : {'name': 'Our Stadium', 'city': 'Our City'},
'tournament': 'Tournament A',
'home': True, 'date' : now.replace(year=now.year+1),
'team_score': 0, 'team_against_score': 0,
'recap_link': '#',
},
{'team': 'MyFavTeam', 'team_against': 'Opponent2',
'stadium' : {'name': 'Their Stadium', 'city': 'City'},
'tournament': 'Tournament A',
'home': False, 'date' : now.replace(year=now.year+2),
'team_score': 0, 'team_against_score': 0,
'recap_link': '#',
}
]
self.last_games = copy.deepcopy(self.next_games)
self.last_games.reverse()
self.last_games[0]['date'] = now.replace(year=now.year-1)
self.last_games[1]['date'] = now.replace(year=now.year-2)
self.last_games[0]['team_score'] = 50
self.last_games[0]['team_against_score'] = 49
self.last_games[1]['team_score'] = 99
self.last_games[1]['team_against_score'] = 100
def get_games(self, team_id, amount, next=True):
now = timezone.now()
if next == True:
query = Schedule.objects.filter(team=team_id, date__gt=now).order_by('date')
ret_list = list(self.next_games)
else:
query = Schedule.objects.filter(team=team_id, date__lt=now)
ret_list = list(self.last_games)
return get_query_or_def_values(amount, query, ret_list)
def get_next_games(self, team_id, amount):
return self.get_games(team_id, amount, next=True)
def get_last_games(self, team_id, amount):
return self.get_games(team_id, amount, next=False)
def get_all_tournament_games(self, team_id, tournament_id):
all = list()
all.extend(self.last_games)
all.extend(self.next_games)
#get current or last tournament first
#if tournament == None:
query = Schedule.objects.filter(team=team_id, tournament=tournament_id).order_by('-date')
return get_query_or_def_values(162, query, all)
class NewsList:
'''Object to build all data, task for the carousel on index.html'''
def __init__(self):
now = timezone.now()
if now.minute > 10:
now2 = now.replace(minute=now.minute-10)
else:
now2 = now.replace(minute=0)
self.news = [{'team': 'MyFavTeam',
'title': 'News1: MyFavTeam App colects news from different web sources',
'date' : now,
'link' : '#',
'author': 'myself',
'website': {'name': 'espn', 'link': "http://espn.go.com"},
'image': 'holder.js/90x62/auto/#666:#999/text: image',
# 'image': {'url': 'http://a.espncdn.com/media/motion/2015/0103/dm_150103_NBA_Top_Plays/dm_150103_NBA_Top_Plays.jpg'},
},
{'team': 'MyFavTeam',
'title': 'News2: MyFavTeam App also collects stats from a trusted web source',
'date' : now2,
'link' : '#',
'author': 'myself2',
'website': {'name': 'yahoo sports', 'link': "http://sports.yahoo.com"},
'image': 'holder.js/90x62/auto/#555:#888/text: image',
},
]
def get_news(self, team_id, amount):
query = News.objects.filter(team=team_id).order_by('-date')
ret_list = list(self.news)
return get_query_or_def_values(amount, query, ret_list)
def get_player_news(self, player_id, amount):
query = News.objects.filter(playernews__player=player_id).order_by('-date')
try:
Player.objects.get(id=player_id)
return query[0:amount]
except:
ret_list = list(self.news)
return get_query_or_def_values(amount, query, ret_list)
class PlayerList:
def __init__(self):
self.players = [{'team': {'short_name':'MyFavTeam'},
'first_name': 'John',
'last_name': 'Doe',
'position': {'name': 'Guard', 'acronym' : 'G'},
'birthdate' : datetime.date(1984, 11, 18),
'twitter' : 'johndoe',
'facebook' : "https://www.facebook.com/JhonDoe",
'height' : 6.1,
'weight' : 180.0,
'image': {'url': 'holder.js/290x300/auto/#5983ab:#fff/text: image'},
'salary' : 1200000,
'age': 30,
'jersey_number': 23,
'get_absolute_url': "/player/1/",
},
{'team': {'short_name':'MyFavTeam'},
'first_name': 'David',
'last_name': 'Smith',
'position': {'name': 'Forward', 'acronym' : 'F'},
'birthdate' : datetime.date(1986, 11, 18),
'twitter' : 'davidsmith',
'facebook' : "https://www.facebook.com/DavidSmith",
'height' : 6.7,
'weight' : 210.0,
'image': {'url': 'holder.js/290x300/auto/#5983ab:#fff/text: image'},
'salary' : 1100000,
'age': 28,
'jersey_number': 32,
'get_absolute_url': '/player/2/',
},
{'team': {'short_name':'MyFavTeam'},
'first_name': 'Tim',
'last_name': 'Brown',
'position': {'name': 'Center', 'acronym' : 'C'},
'birthdate' : datetime.date(1988, 11, 18),
'twitter' : 'timbrown',
'facebook' : "https://www.facebook.com/TimBrown",
'height' : 6.11,
'weight' : 230.0,
'image': {'url': 'holder.js/290x300/auto/#5983ab:#fff/text: image'},
'salary' : 600000,
'age': 26,
'jersey_number': 10,
'get_absolute_url': '/player/3/',
},
]
def get_roster(self, team_id, amount):
query = Player.objects.filter(team=team_id).order_by('-salary')
ret_list = list(self.players)
return get_query_or_def_values(amount, query, ret_list)
def get_player(self, player_id):
query = Player.objects
ret_list = list(self.players)
return get_id_val_or_none(query, ret_list, player_id)
class StandingList:
'''Object to build all data, task for the carousel on index.html'''
def __init__(self):
self.teams = [{'tournament': 'Regular Season 2014',
'team': 'MyFavTeam',
'wins' : 14,
'losses': 6,
'ties': 0,
'win_pct': 0.7,
'games_behind': 0,
},
{'tournament': 'Regular Season 2014',
'team': 'Rival',
'wins' : 12,
'losses': 8,
'ties': 0,
'win_pct': 0.6,
'games_behind': 2,
},
{'tournament': 'Regular Season 2014',
'team': 'DecentContender',
'wins' : 10,
'losses': 10,
'ties': 0,
'win_pct': 0.5,
'games_behind': 4,
},
{'tournament': 'Regular Season 2014',
'team': 'aBadTeam',
'wins' : 6,
'losses': 14,
'ties': 0,
'win_pct': 0.3,
'games_behind': 8,
},
]
def get_standings(self, tournament_id, amount):
#django annotate doesn't support substraction
diff = Standings.objects.raw(
'SELECT *, MAX(wins-losses) AS max FROM myfavteam_standings ' \
'WHERE tournament_id = %s ', [tournament_id])[0].max
if diff == None:
diff = 0
query = Standings.objects.raw(
'SELECT *, 1.0*wins/(wins + losses + 0.5*ties) AS win_pct, '\
'(%s - (wins-losses)) / 2.0 AS games_behind ' \
'FROM myfavteam_standings WHERE tournament_id = %s ' \
'ORDER BY -win_pct', [diff, tournament_id])
ret_list = list(self.teams)
return get_query_or_def_values(amount, query, ret_list)
def get_standings_conf(self, tournament_id, conference, amount):
try :
best_pct = Standings.objects.extra(
select = {'win_pct' : '1.0*wins/(wins + losses + 0.5*ties)'},
where = ['tournament_id = %s',
'myfavteam_team.conference=%s',
'team_id=myfavteam_team.id'],
params = [tournament_id, conference],
tables = ['myfavteam_team'],
order_by=['-win_pct'])[0]
diff = best_pct.wins - best_pct.losses
except:
diff = 0
query = Standings.objects.extra(
select = {'win_pct' : '1.0*wins/(wins + losses + 0.5*ties)',
'games_behind' : "(%s - (wins-losses))/2.0"},
select_params =[diff],
where = ['tournament_id = %s',
'myfavteam_team.conference=%s',
'team_id=myfavteam_team.id'],
params = [tournament_id, conference],
tables = ['myfavteam_team'],
order_by=['-win_pct'])
ret_list = list(self.teams)
return get_query_or_def_values(amount, query, ret_list)
class StatsList(PlayerList):
def __init__(self):
PlayerList.__init__(self)
ppg = 26.0
rpg = 6.0
apg = 7.0
mpg = 35.0
for i in range(len(self.players)):
self.players[i]['points_per_game'] = ppg + 2*i
self.players[i]['rebounds_per_game'] = rpg + 2*i
self.players[i]['assists_per_game'] = apg - 2*i
self.players[i]['steals_per_game'] = mpg - i
def get_stats(self, team_id, amount):
#change if sport is not BasketBall
#I tried a lot but using raw was the fastest way
query = Player.objects.raw(
'SELECT myfavteam_player.*, ' \
'myfavteam_basketballplayerstats.points_per_game AS points_per_game,' \
'myfavteam_basketballplayerstats.rebounds_per_game AS rebounds_per_game,' \
'myfavteam_basketballplayerstats.assists_per_game AS assists_per_game, ' \
'myfavteam_basketballplayerstats.steals_per_game AS steals_per_game ' \
'FROM myfavteam_player LEFT JOIN myfavteam_basketballplayerstats ' \
'ON myfavteam_player.id = myfavteam_basketballplayerstats.player_id '\
'WHERE myfavteam_player.team_id = %s AND ' \
'myfavteam_player.id = myfavteam_basketballplayerstats.player_id '\
'ORDER BY points_per_game DESC',
[team_id])
ret_list = list(self.players)
return get_query_or_def_values(amount, query, ret_list)
def get_player_stats(self, player_id):
query = BasketballPlayerStats.objects.filter(player_id = player_id)
#first verify the player exist in db
try:
Player.objects.get(id=player_id)
ret_list = None
except:
ret_list = list(self.players)
return get_one_val_or_none(query, ret_list, player_id)
class Carousel:
'''Object to build all data, task for the carousel on index.html'''
def __init__(self):
self.num_of_pics = 3
self.pic = ['holder.js/' + '1140x900' + '/auto/#666:#999/text:',
'holder.js/' + '720x400' + '/auto/#777:#999/text:',
'holder.js/' + '540x300' + '/auto/#888:#999/text:']
def get_pics(self, team_id):
for i in range(self.num_of_pics):
try:
pic = TeamPicture.objects.filter(team=team_id).order_by('-uploaded')[i].image.url
except:
pic = self.pic[i]
self.pic[i] = pic
def load_data(self, team_id, resp):
self.get_pics(team_id)
for i in range(self.num_of_pics):
str = "carousel_pic_{}".format(i)
resp[str] = self.pic[i]
|
|
"""
A Weld wrapper for pandas.DataFrame.
"""
import numpy as np
import pandas as pd
import weld.encoders.numpy as wenp
import weld.grizzly.weld.ops as weldops
from weld.lazy import WeldLazy, identity
from weld.grizzly.core.forwarding import Forwarding
from weld.grizzly.core.generic import GrizzlyBase
from weld.grizzly.core.indexes import ColumnIndex
from weld.grizzly.core.series import GrizzlySeries
from weld.types import *
class GrizzlyDataFrame(Forwarding, GrizzlyBase):
"""
An API-compatible DataFrame backed by Weld.
DataFrames are dictionary-like containers of Series of the same length.
Each Series can be a different data type. Operations on DataFrames align on
column name. Unlike pandas, DataFrame operations do not align on row
indexes.
Examples
--------
>>> df = GrizzlyDataFrame({'name': ['mike', 'sam', 'sally'], 'age': [20, 22, 56]})
>>> df
name age
0 mike 20
1 sam 22
2 sally 56
>>> df2 = GrizzlyDataFrame({'nom': ['jacques', 'kelly', 'marie'], 'age': [50, 60, 70]})
>>> df.add(df2).to_pandas()
age name nom
0 70 NaN NaN
1 82 NaN NaN
2 126 NaN NaN
"""
# Indicates that the length of a DataFrame is not known. Certain operations on DataFrames
# with this length are disallowed, since Grizzly (like Pandas) assumes each column in a DataFrame
# is of the same length.
UNKNOWN_LENGTH = -1
_encoder = wenp.NumPyWeldEncoder()
_decoder = wenp.NumPyWeldDecoder()
# ------------------- GrizzlyBase methods ----------------------
@property
def weld_value(self):
if hasattr(self, "weld_value_"):
return self.weld_value_
if len(self.data) == 0:
raise GrizzlyError("weld_value cannot be accessed on DataFrame with no data")
output_type = WeldStruct([col.weld_value.output_type for col in self.data])
self.weld_value_ = weldops.make_struct(*[col.weld_value for col in self.data])(output_type, GrizzlyDataFrame._decoder)
return self.weld_value_
@property
def is_value(self):
"""
Returns whether this DataFrame is a physical value.
If this is True, evaluate() is guaranteed to be a no-op.
Examples
--------
>>> df = GrizzlyDataFrame({'name': ['sam', 'sally'], 'age': [25, 50]})
>>> df.is_value
True
>>> df = df.eq(df)
>>> df.is_value
False
"""
return self.pandas_df is not None or\
all([child.is_identity for child in self.children])
def evaluate(self):
"""
Evaluates this `GrizzlyDataFrame` and returns itself.
Evaluation reduces the currently stored computation to a physical value
by compiling and running a Weld program. If this `GrizzlyDataFrame` refers
to a physical value and no computation, no program is compiled, and this
method returns `self` unmodified.
Returns
-------
GrizzlyDataFrame
"""
if not self.is_value:
if len(self.data) == 0:
# We're an empty DataFrame
self.pandas_df = pd.DataFrame()
return
# Collect each vector into a struct rather than evaluating each Series individually:
# this is more efficient so computations shared among vectors can be optimized.
result = self.weld_value.evaluate()
columns = result[0]
new_data = []
length = None
for column in columns:
data = column.copy2numpy()
column_length = len(data)
if length is not None:
assert column_length == length, "invalid DataFrame produced after evaluation"
else:
length = column_length
series = GrizzlySeries(data)
new_data.append(series)
# Columns is unchanged
self.pandas_df = None
self.data = new_data
self.length = length
# Reset the weld representation.
delattr(self, "weld_value_")
assert self.is_value
return self
def to_pandas(self, copy=False):
"""
Evaluate and convert this GrizzlyDataFrame to a pandas DataFrame.
Parameters
----------
copy : bool
whether to copy the data into the new DataFrame. This only guarantees
that a copy _will_ occur; it does not guarantee that a copy will not.
Returns
-------
pandas.DataFrame
"""
self.evaluate()
if self.pandas_df is not None:
return self.pandas_df
col_to_data = dict()
for col in self.columns:
col_to_data[col] = self._col(col).values
self.pandas_df = pd.DataFrame(data=col_to_data, copy=copy)
return self.pandas_df
# ------------------- Initialization ----------------------
def __init__(self, data, columns=None, _length=UNKNOWN_LENGTH, _fastpath=False):
if _fastpath:
assert all([isinstance(d, GrizzlySeries) for d in data])
assert isinstance(columns, ColumnIndex)
self.data = data
self.columns = columns
self.length = _length
self.pandas_df = None
return
self.data = []
column_index = []
# Keep a reference to the Pandas DataFrame. This should not consume any extra memory, since GrizzlySeries
# just keep references ot the same values. The only exception is for string data, where a copy will be created
# to force use of the 'S' dtype.
#
# TODO(shoumik): Convert string data to dtype 'S' here so it doesn't get copied when wrapping the Series as
# GrizzlySeries.
if isinstance(data, pd.DataFrame):
self.pandas_df = data
else:
self.pandas_df = pd.DataFrame(data, columns=columns)
self.length = len(self.pandas_df)
for (i, col) in enumerate(self.pandas_df):
grizzly_series = GrizzlySeries(self.pandas_df[col], name=self.pandas_df[col].name)
if not isinstance(grizzly_series, GrizzlySeries):
raise TypeError("Unsupported Series in DataFrame: {}".format(self.pandas_df[col]))
self.data.append(grizzly_series)
column_index.append(col)
self.columns = ColumnIndex(column_index)
def _col(self, col):
"""
Returns the column associated with the column name 'col'.
"""
return self.data[self.columns[col]]
# ------------------- Getting and Setting Items ----------------------
# TODO!
# ------------------- Ops ----------------------
def explain(self):
"""
Prints a string that describes the operations to compute each column.
If this DataFrame is a value, prints the data.
"""
if self.pandas_df is not None:
print(self.pandas_df)
else:
for col in self.columns:
code = self._col(col).code
code = code.replace("\n", "\n\t")
print("{}: {}".format(col, code))
def _arithmetic_binop_impl(self, other, series_op, fill=np.nan):
"""
Apply the binary operation between this DataFrame and other.
Parameters
----------
other : DataFrame, scalar, GrizzlySeries
If other is a DataFrame, aligns on column name. If the binary
operator is not supported on any column, raises a TypeError.
series_op : func
The binary operation to apply.
compare : bool
whether this is a comparison operation.
Returns
-------
GrizzlyDataFrame
"""
new_data = []
if not isinstance(other, GrizzlyDataFrame):
for data in self.data:
new_data.append(series_op(data, other))
return GrizzlyDataFrame(new_data,
columns=copy.deepcopy(self.columns),
_length=self.length,
_fastpath=True)
new_cols = []
for (col, left_slot, right_slot) in self.columns.zip(other.columns):
new_cols.append(col)
if left_slot is None or right_slot is None:
# TODO(shoumik): Handle this case by making a lazy computation.
assert self.length != GrizzlyDataFrame.UNKNOWN_LENGTH
dtype = np.array([fill]).dtype
vals = np.empty(self.length, dtype=dtype)
vals[:] = fill
new_data.append(GrizzlySeries(vals))
else:
new_data.append(series_op(self.data[left_slot], other.data[right_slot]))
return GrizzlyDataFrame(new_data,
columns=ColumnIndex(new_cols),
_length=self.length,
_fastpath=True)
def add(self, other):
return self._arithmetic_binop_impl(other, GrizzlySeries.add)
def sub(self, other):
return self._arithmetic_binop_impl(other, GrizzlySeries.sub)
def mod(self, other):
return self._arithmetic_binop_impl(other, GrizzlySeries.mod)
def mul(self, other):
return self._arithmetic_binop_impl(other, GrizzlySeries.mul)
def truediv(self, other):
return self._arithmetic_binop_impl(other, GrizzlySeries.truediv)
def divide(self, other):
return self.truediv(other)
def div(self, other):
return self.truediv(other)
def eq(self, other):
return self._arithmetic_binop_impl(other, GrizzlySeries.eq, fill=False)
def ne(self, other):
# Fill with True on this one.
return self._arithmetic_binop_impl(other, GrizzlySeries.ne, fill=True)
def ge(self, other):
return self._arithmetic_binop_impl(other, GrizzlySeries.ge, fill=False)
def gt(self, other):
return self._arithmetic_binop_impl(other, GrizzlySeries.gt, fill=False)
def le(self, other):
return self._arithmetic_binop_impl(other, GrizzlySeries.le, fill=False)
def lt(self, other):
return self._arithmetic_binop_impl(other, GrizzlySeries.lt, fill=False)
def __add__(self, other):
return self.add(other)
def __sub__(self, other):
return self.sub(other)
def __mul__(self, other):
return self.mul(other)
def __truediv__(self, other):
return self.truediv(other)
def __divmod__(self, other):
return self.divmod(other)
def __mod__(self, other):
return self.mod(other)
def __eq__(self, other):
return self.eq(other)
def __ne__(self, other):
return self.ne(other)
def __ge__(self, other):
return self.ge(other)
def __gt__(self, other):
return self.gt(other)
def __le__(self, other):
return self.le(other)
def __lt__(self, other):
return self.lt(other)
def __str__(self):
if self.pandas_df is not None:
return str(self.pandas_df)
else:
return repr(self)
def __repr__(self):
if self.pandas_df is not None:
return repr(self.pandas_df)
else:
return "GrizzlyDataFrame(lazy, {})".format([name for name in self.columns.columns])
|
|
import _pyio as _original_pyio
import errno
import os as _original_os
import socket as _original_socket
from io import (
BufferedRandom as _OriginalBufferedRandom,
BufferedReader as _OriginalBufferedReader,
BufferedWriter as _OriginalBufferedWriter,
DEFAULT_BUFFER_SIZE,
TextIOWrapper as _OriginalTextIOWrapper,
IOBase as _OriginalIOBase,
)
from types import FunctionType
from eventlet.greenio.base import (
_operation_on_closed_file,
greenpipe_doc,
set_nonblocking,
SOCKET_BLOCKING,
)
from eventlet.hubs import notify_close, notify_opened, IOClosed, trampoline
from eventlet.support import get_errno, six
__all__ = ['_fileobject', 'GreenPipe']
# TODO get rid of this, it only seems like the original _fileobject
_fileobject = _original_socket.SocketIO
# Large part of the following code is copied from the original
# eventlet.greenio module
class GreenFileIO(_OriginalIOBase):
def __init__(self, name, mode='r', closefd=True, opener=None):
if isinstance(name, int):
fileno = name
self._name = "<fd:%d>" % fileno
else:
assert isinstance(name, six.string_types)
with open(name, mode) as fd:
self._name = fd.name
fileno = _original_os.dup(fd.fileno())
notify_opened(fileno)
self._fileno = fileno
self._mode = mode
self._closed = False
set_nonblocking(self)
self._seekable = None
@property
def closed(self):
return self._closed
def seekable(self):
if self._seekable is None:
try:
_original_os.lseek(self._fileno, 0, _original_os.SEEK_CUR)
except IOError as e:
if get_errno(e) == errno.ESPIPE:
self._seekable = False
else:
raise
else:
self._seekable = True
return self._seekable
def readable(self):
return 'r' in self._mode or '+' in self._mode
def writable(self):
return 'w' in self._mode or '+' in self._mode
def fileno(self):
return self._fileno
def read(self, size=-1):
if size == -1:
return self.readall()
while True:
try:
return _original_os.read(self._fileno, size)
except OSError as e:
if get_errno(e) not in SOCKET_BLOCKING:
raise IOError(*e.args)
self._trampoline(self, read=True)
def readall(self):
buf = []
while True:
try:
chunk = _original_os.read(self._fileno, DEFAULT_BUFFER_SIZE)
if chunk == b'':
return b''.join(buf)
buf.append(chunk)
except OSError as e:
if get_errno(e) not in SOCKET_BLOCKING:
raise IOError(*e.args)
self._trampoline(self, read=True)
def readinto(self, b):
up_to = len(b)
data = self.read(up_to)
bytes_read = len(data)
b[:bytes_read] = data
return bytes_read
def isatty(self):
try:
return _original_os.isatty(self.fileno())
except OSError as e:
raise IOError(*e.args)
def _trampoline(self, fd, read=False, write=False, timeout=None, timeout_exc=None):
if self._closed:
# Don't trampoline if we're already closed.
raise IOClosed()
try:
return trampoline(fd, read=read, write=write, timeout=timeout,
timeout_exc=timeout_exc,
mark_as_closed=self._mark_as_closed)
except IOClosed:
# Our fileno has been obsoleted. Defang ourselves to
# prevent spurious closes.
self._mark_as_closed()
raise
def _mark_as_closed(self):
""" Mark this socket as being closed """
self._closed = True
def write(self, data):
view = memoryview(data)
datalen = len(data)
offset = 0
while offset < datalen:
try:
written = _original_os.write(self._fileno, view[offset:])
except OSError as e:
if get_errno(e) not in SOCKET_BLOCKING:
raise IOError(*e.args)
trampoline(self, write=True)
else:
offset += written
return offset
def close(self):
if not self._closed:
self._closed = True
_original_os.close(self._fileno)
notify_close(self._fileno)
for method in [
'fileno', 'flush', 'isatty', 'next', 'read', 'readinto',
'readline', 'readlines', 'seek', 'tell', 'truncate',
'write', 'xreadlines', '__iter__', '__next__', 'writelines']:
setattr(self, method, _operation_on_closed_file)
def truncate(self, size=-1):
if size == -1:
size = self.tell()
try:
rv = _original_os.ftruncate(self._fileno, size)
except OSError as e:
raise IOError(*e.args)
else:
self.seek(size) # move position&clear buffer
return rv
def seek(self, offset, whence=_original_os.SEEK_SET):
try:
return _original_os.lseek(self._fileno, offset, whence)
except OSError as e:
raise IOError(*e.args)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
_open_environment = dict(globals())
_open_environment.update(dict(
BufferedRandom=_OriginalBufferedRandom,
BufferedWriter=_OriginalBufferedWriter,
BufferedReader=_OriginalBufferedReader,
TextIOWrapper=_OriginalTextIOWrapper,
FileIO=GreenFileIO,
os=_original_os,
))
_open = FunctionType(
six.get_function_code(_original_pyio.open),
_open_environment,
)
def GreenPipe(name, mode="r", buffering=-1, encoding=None, errors=None,
newline=None, closefd=True, opener=None):
try:
fileno = name.fileno()
except AttributeError:
pass
else:
fileno = _original_os.dup(fileno)
name.close()
name = fileno
return _open(name, mode, buffering, encoding, errors, newline, closefd, opener)
GreenPipe.__doc__ = greenpipe_doc
|
|
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.db import connection
from django.db.models import Prefetch
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import six
from .models import (Author, Book, Reader, Qualification, Teacher, Department,
TaggedItem, Bookmark, AuthorAddress, FavoriteAuthors, AuthorWithAge,
BookWithYear, BookReview, Person, House, Room, Employee, Comment,
LessonEntry, WordEntry, Author2)
class PrefetchRelatedTests(TestCase):
def setUp(self):
self.book1 = Book.objects.create(title="Poems")
self.book2 = Book.objects.create(title="Jane Eyre")
self.book3 = Book.objects.create(title="Wuthering Heights")
self.book4 = Book.objects.create(title="Sense and Sensibility")
self.author1 = Author.objects.create(name="Charlotte",
first_book=self.book1)
self.author2 = Author.objects.create(name="Anne",
first_book=self.book1)
self.author3 = Author.objects.create(name="Emily",
first_book=self.book1)
self.author4 = Author.objects.create(name="Jane",
first_book=self.book4)
self.book1.authors.add(self.author1, self.author2, self.author3)
self.book2.authors.add(self.author1)
self.book3.authors.add(self.author3)
self.book4.authors.add(self.author4)
self.reader1 = Reader.objects.create(name="Amy")
self.reader2 = Reader.objects.create(name="Belinda")
self.reader1.books_read.add(self.book1, self.book4)
self.reader2.books_read.add(self.book2, self.book4)
def test_m2m_forward(self):
with self.assertNumQueries(2):
lists = [list(b.authors.all()) for b in Book.objects.prefetch_related('authors')]
normal_lists = [list(b.authors.all()) for b in Book.objects.all()]
self.assertEqual(lists, normal_lists)
def test_m2m_reverse(self):
with self.assertNumQueries(2):
lists = [list(a.books.all()) for a in Author.objects.prefetch_related('books')]
normal_lists = [list(a.books.all()) for a in Author.objects.all()]
self.assertEqual(lists, normal_lists)
def test_foreignkey_forward(self):
with self.assertNumQueries(2):
books = [a.first_book for a in Author.objects.prefetch_related('first_book')]
normal_books = [a.first_book for a in Author.objects.all()]
self.assertEqual(books, normal_books)
def test_foreignkey_reverse(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors')]
self.assertQuerysetEqual(self.book2.authors.all(), ["<Author: Charlotte>"])
def test_onetoone_reverse_no_match(self):
# Regression for #17439
with self.assertNumQueries(2):
book = Book.objects.prefetch_related('bookwithyear').all()[0]
with self.assertNumQueries(0):
with self.assertRaises(BookWithYear.DoesNotExist):
book.bookwithyear
def test_survives_clone(self):
with self.assertNumQueries(2):
[list(b.first_time_authors.all())
for b in Book.objects.prefetch_related('first_time_authors').exclude(id=1000)]
def test_len(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
len(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_bool(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
bool(qs)
[list(b.first_time_authors.all()) for b in qs]
def test_count(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.count() for b in qs]
def test_exists(self):
with self.assertNumQueries(2):
qs = Book.objects.prefetch_related('first_time_authors')
[b.first_time_authors.exists() for b in qs]
def test_in_and_prefetch_related(self):
"""
Regression test for #20242 - QuerySet "in" didn't work the first time
when using prefetch_related. This was fixed by the removal of chunked
reads from QuerySet iteration in
70679243d1786e03557c28929f9762a119e3ac14.
"""
qs = Book.objects.prefetch_related('first_time_authors')
self.assertTrue(qs[0] in qs)
def test_clear(self):
"""
Test that we can clear the behavior by calling prefetch_related()
"""
with self.assertNumQueries(5):
with_prefetch = Author.objects.prefetch_related('books')
without_prefetch = with_prefetch.prefetch_related(None)
[list(a.books.all()) for a in without_prefetch]
def test_m2m_then_m2m(self):
"""
Test we can follow a m2m and another m2m
"""
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_overriding_prefetch(self):
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books', 'books__read_by')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
with self.assertNumQueries(3):
qs = Author.objects.prefetch_related('books__read_by', 'books')
lists = [[[six.text_type(r) for r in b.read_by.all()]
for b in a.books.all()]
for a in qs]
self.assertEqual(lists,
[
[["Amy"], ["Belinda"]], # Charlotte - Poems, Jane Eyre
[["Amy"]], # Anne - Poems
[["Amy"], []], # Emily - Poems, Wuthering Heights
[["Amy", "Belinda"]], # Jane - Sense and Sense
])
def test_get(self):
"""
Test that objects retrieved with .get() get the prefetch behavior.
"""
# Need a double
with self.assertNumQueries(3):
author = Author.objects.prefetch_related('books__read_by').get(name="Charlotte")
lists = [[six.text_type(r) for r in b.read_by.all()]
for b in author.books.all()]
self.assertEqual(lists, [["Amy"], ["Belinda"]]) # Poems, Jane Eyre
def test_foreign_key_then_m2m(self):
"""
Test we can follow an m2m relation after a relation like ForeignKey
that doesn't have many objects
"""
with self.assertNumQueries(2):
qs = Author.objects.select_related('first_book').prefetch_related('first_book__read_by')
lists = [[six.text_type(r) for r in a.first_book.read_by.all()]
for a in qs]
self.assertEqual(lists, [["Amy"],
["Amy"],
["Amy"],
["Amy", "Belinda"]])
def test_attribute_error(self):
qs = Reader.objects.all().prefetch_related('books_read__xyz')
with self.assertRaises(AttributeError) as cm:
list(qs)
self.assertTrue('prefetch_related' in str(cm.exception))
def test_invalid_final_lookup(self):
qs = Book.objects.prefetch_related('authors__name')
with self.assertRaises(ValueError) as cm:
list(qs)
self.assertTrue('prefetch_related' in str(cm.exception))
self.assertTrue("name" in str(cm.exception))
class CustomPrefetchTests(TestCase):
@classmethod
def traverse_qs(cls, obj_iter, path):
"""
Helper method that returns a list containing a list of the objects in the
obj_iter. Then for each object in the obj_iter, the path will be
recursively travelled and the found objects are added to the return value.
"""
ret_val = []
if hasattr(obj_iter, 'all'):
obj_iter = obj_iter.all()
try:
iter(obj_iter)
except TypeError:
obj_iter = [obj_iter]
for obj in obj_iter:
rel_objs = []
for part in path:
if not part:
continue
rel_objs.extend(cls.traverse_qs(getattr(obj, part[0]), [part[1:]]))
ret_val.append((obj, rel_objs))
return ret_val
def setUp(self):
self.person1 = Person.objects.create(name="Joe")
self.person2 = Person.objects.create(name="Mary")
self.house1 = House.objects.create(address="123 Main St", owner=self.person1)
self.house2 = House.objects.create(address="45 Side St", owner=self.person1)
self.house3 = House.objects.create(address="6 Downing St", owner=self.person2)
self.house4 = House.objects.create(address="7 Regents St", owner=self.person2)
self.room1_1 = Room.objects.create(name="Dining room", house=self.house1)
self.room1_2 = Room.objects.create(name="Lounge", house=self.house1)
self.room1_3 = Room.objects.create(name="Kitchen", house=self.house1)
self.room2_1 = Room.objects.create(name="Dining room", house=self.house2)
self.room2_2 = Room.objects.create(name="Lounge", house=self.house2)
self.room2_3 = Room.objects.create(name="Kitchen", house=self.house2)
self.room3_1 = Room.objects.create(name="Dining room", house=self.house3)
self.room3_2 = Room.objects.create(name="Lounge", house=self.house3)
self.room3_3 = Room.objects.create(name="Kitchen", house=self.house3)
self.room4_1 = Room.objects.create(name="Dining room", house=self.house4)
self.room4_2 = Room.objects.create(name="Lounge", house=self.house4)
self.room4_3 = Room.objects.create(name="Kitchen", house=self.house4)
self.person1.houses.add(self.house1, self.house2)
self.person2.houses.add(self.house3, self.house4)
def test_traverse_qs(self):
qs = Person.objects.prefetch_related('houses')
related_objs_normal = [list(p.houses.all()) for p in qs],
related_objs_from_traverse = [[inner[0] for inner in o[1]]
for o in self.traverse_qs(qs, [['houses']])]
self.assertEqual(related_objs_normal, (related_objs_from_traverse,))
def test_ambiguous(self):
# Ambiguous.
with self.assertRaises(ValueError):
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', Prefetch('houses', queryset=House.objects.all())),
[['houses', 'rooms']]
)
with self.assertRaises(AttributeError):
self.traverse_qs(
Person.objects.prefetch_related('houses_list__rooms', Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')),
[['houses', 'rooms']]
)
# Not ambiguous.
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', 'houses'),
[['houses', 'rooms']]
)
self.traverse_qs(
Person.objects.prefetch_related('houses__rooms', Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')),
[['houses', 'rooms']]
)
def test_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses'),
[['houses']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses')),
[['houses']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst')),
[['houses_lst']]
)
self.assertEqual(lst1, lst2)
def test_reverse_m2m(self):
# Control lookups.
with self.assertNumQueries(2):
lst1 = self.traverse_qs(
House.objects.prefetch_related('occupants'),
[['occupants']]
)
# Test lookups.
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants')),
[['occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(2):
lst2 = self.traverse_qs(
House.objects.prefetch_related(Prefetch('occupants', to_attr='occupants_lst')),
[['occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_fk(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Room.objects.prefetch_related('house__occupants'),
[['house', 'occupants']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants')),
[['house', 'occupants']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Room.objects.prefetch_related(Prefetch('house__occupants', to_attr='occupants_lst')),
[['house', 'occupants_lst']]
)
self.assertEqual(lst1, lst2)
def test_m2m_through_gfk(self):
TaggedItem.objects.create(tag="houses", content_object=self.house1)
TaggedItem.objects.create(tag="houses", content_object=self.house2)
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
TaggedItem.objects.filter(tag='houses').prefetch_related('content_object__rooms'),
[['content_object', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
TaggedItem.objects.prefetch_related(
Prefetch('content_object'),
Prefetch('content_object__rooms', to_attr='rooms_lst')
),
[['content_object', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_o2m_through_m2m(self):
# Control lookups.
with self.assertNumQueries(3):
lst1 = self.traverse_qs(
Person.objects.prefetch_related('houses', 'houses__rooms'),
[['houses', 'rooms']]
)
# Test lookups.
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), 'houses__rooms'),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses'), Prefetch('houses__rooms')),
[['houses', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(Prefetch('houses', to_attr='houses_lst'), 'houses_lst__rooms'),
[['houses_lst', 'rooms']]
)
self.assertEqual(lst1, lst2)
with self.assertNumQueries(3):
lst2 = self.traverse_qs(
Person.objects.prefetch_related(
Prefetch('houses', to_attr='houses_lst'),
Prefetch('houses_lst__rooms', to_attr='rooms_lst')
),
[['houses_lst', 'rooms_lst']]
)
self.assertEqual(lst1, lst2)
def test_generic_rel(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, favorite=bookmark, tag='python')
# Control lookups.
with self.assertNumQueries(4):
lst1 = self.traverse_qs(
Bookmark.objects.prefetch_related('tags', 'tags__content_object', 'favorite_tags'),
[['tags', 'content_object'], ['favorite_tags']]
)
# Test lookups.
with self.assertNumQueries(4):
lst2 = self.traverse_qs(
Bookmark.objects.prefetch_related(
Prefetch('tags', to_attr='tags_lst'),
Prefetch('tags_lst__content_object'),
Prefetch('favorite_tags'),
),
[['tags_lst', 'content_object'], ['favorite_tags']]
)
self.assertEqual(lst1, lst2)
def test_custom_qs(self):
# Test basic.
with self.assertNumQueries(2):
lst1 = list(Person.objects.prefetch_related('houses'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.all(), to_attr='houses_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses']]),
self.traverse_qs(lst2, [['houses_lst']])
)
# Test queryset filtering.
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.filter(pk__in=[self.house1.pk, self.house3.pk]), to_attr='houses_lst')))
self.assertEqual(len(lst2[0].houses_lst), 1)
self.assertEqual(lst2[0].houses_lst[0], self.house1)
self.assertEqual(len(lst2[1].houses_lst), 1)
self.assertEqual(lst2[1].houses_lst[0], self.house3)
# Test flattened.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__rooms'))
with self.assertNumQueries(3):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses__rooms', queryset=Room.objects.all(), to_attr='rooms_lst')))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'rooms']]),
self.traverse_qs(lst2, [['houses', 'rooms_lst']])
)
# Test inner select_related.
with self.assertNumQueries(3):
lst1 = list(Person.objects.prefetch_related('houses__owner'))
with self.assertNumQueries(2):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=House.objects.select_related('owner'))))
self.assertEqual(
self.traverse_qs(lst1, [['houses', 'owner']]),
self.traverse_qs(lst2, [['houses', 'owner']])
)
# Test inner prefetch.
inner_rooms_qs = Room.objects.filter(pk__in=[self.room1_1.pk, self.room1_2.pk])
houses_qs_prf = House.objects.prefetch_related(
Prefetch('rooms', queryset=inner_rooms_qs, to_attr='rooms_lst'))
with self.assertNumQueries(3):
lst2 = list(Person.objects.prefetch_related(
Prefetch('houses', queryset=houses_qs_prf.filter(pk=self.house1.pk), to_attr='houses_lst')))
self.assertEqual(len(lst2[0].houses_lst[0].rooms_lst), 2)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[0], self.room1_1)
self.assertEqual(lst2[0].houses_lst[0].rooms_lst[1], self.room1_2)
self.assertEqual(len(lst2[1].houses_lst), 0)
class DefaultManagerTests(TestCase):
def setUp(self):
self.qual1 = Qualification.objects.create(name="BA")
self.qual2 = Qualification.objects.create(name="BSci")
self.qual3 = Qualification.objects.create(name="MA")
self.qual4 = Qualification.objects.create(name="PhD")
self.teacher1 = Teacher.objects.create(name="Mr Cleese")
self.teacher2 = Teacher.objects.create(name="Mr Idle")
self.teacher3 = Teacher.objects.create(name="Mr Chapman")
self.teacher1.qualifications.add(self.qual1, self.qual2, self.qual3, self.qual4)
self.teacher2.qualifications.add(self.qual1)
self.teacher3.qualifications.add(self.qual2)
self.dept1 = Department.objects.create(name="English")
self.dept2 = Department.objects.create(name="Physics")
self.dept1.teachers.add(self.teacher1, self.teacher2)
self.dept2.teachers.add(self.teacher1, self.teacher3)
def test_m2m_then_m2m(self):
with self.assertNumQueries(3):
# When we prefetch the teachers, and force the query, we don't want
# the default manager on teachers to immediately get all the related
# qualifications, since this will do one query per teacher.
qs = Department.objects.prefetch_related('teachers')
depts = "".join(["%s department: %s\n" %
(dept.name, ", ".join(six.text_type(t) for t in dept.teachers.all()))
for dept in qs])
self.assertEqual(depts,
"English department: Mr Cleese (BA, BSci, MA, PhD), Mr Idle (BA)\n"
"Physics department: Mr Cleese (BA, BSci, MA, PhD), Mr Chapman (BSci)\n")
class GenericRelationTests(TestCase):
def setUp(self):
book1 = Book.objects.create(title="Winnie the Pooh")
book2 = Book.objects.create(title="Do you like green eggs and spam?")
book3 = Book.objects.create(title="Three Men In A Boat")
reader1 = Reader.objects.create(name="me")
reader2 = Reader.objects.create(name="you")
reader3 = Reader.objects.create(name="someone")
book1.read_by.add(reader1, reader2)
book2.read_by.add(reader2)
book3.read_by.add(reader3)
self.book1, self.book2, self.book3 = book1, book2, book3
self.reader1, self.reader2, self.reader3 = reader1, reader2, reader3
def test_prefetch_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="great", content_object=self.reader1)
TaggedItem.objects.create(tag="stupid", content_object=self.book2)
TaggedItem.objects.create(tag="amazing", content_object=self.reader3)
# 1 for TaggedItem table, 1 for Book table, 1 for Reader table
with self.assertNumQueries(3):
qs = TaggedItem.objects.prefetch_related('content_object')
list(qs)
def test_prefetch_GFK_nonint_pk(self):
Comment.objects.create(comment="awesome", content_object=self.book1)
# 1 for Comment table, 1 for Book table
with self.assertNumQueries(2):
qs = Comment.objects.prefetch_related('content_object')
[c.content_object for c in qs]
def test_traverse_GFK(self):
"""
Test that we can traverse a 'content_object' with prefetch_related() and
get to related objects on the other side (assuming it is suitably
filtered)
"""
TaggedItem.objects.create(tag="awesome", content_object=self.book1)
TaggedItem.objects.create(tag="awesome", content_object=self.book2)
TaggedItem.objects.create(tag="awesome", content_object=self.book3)
TaggedItem.objects.create(tag="awesome", content_object=self.reader1)
TaggedItem.objects.create(tag="awesome", content_object=self.reader2)
ct = ContentType.objects.get_for_model(Book)
# We get 3 queries - 1 for main query, 1 for content_objects since they
# all use the same table, and 1 for the 'read_by' relation.
with self.assertNumQueries(3):
# If we limit to books, we know that they will have 'read_by'
# attributes, so the following makes sense:
qs = TaggedItem.objects.filter(content_type=ct, tag='awesome').prefetch_related('content_object__read_by')
readers_of_awesome_books = set([r.name for tag in qs
for r in tag.content_object.read_by.all()])
self.assertEqual(readers_of_awesome_books, set(["me", "you", "someone"]))
def test_nullable_GFK(self):
TaggedItem.objects.create(tag="awesome", content_object=self.book1,
created_by=self.reader1)
TaggedItem.objects.create(tag="great", content_object=self.book2)
TaggedItem.objects.create(tag="rubbish", content_object=self.book3)
with self.assertNumQueries(2):
result = [t.created_by for t in TaggedItem.objects.prefetch_related('created_by')]
self.assertEqual(result,
[t.created_by for t in TaggedItem.objects.all()])
def test_generic_relation(self):
bookmark = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=bookmark, tag='django')
TaggedItem.objects.create(content_object=bookmark, tag='python')
with self.assertNumQueries(2):
tags = [t.tag for b in Bookmark.objects.prefetch_related('tags')
for t in b.tags.all()]
self.assertEqual(sorted(tags), ["django", "python"])
def test_charfield_GFK(self):
b = Bookmark.objects.create(url='http://www.djangoproject.com/')
TaggedItem.objects.create(content_object=b, tag='django')
TaggedItem.objects.create(content_object=b, favorite=b, tag='python')
with self.assertNumQueries(3):
bookmark = Bookmark.objects.filter(pk=b.pk).prefetch_related('tags', 'favorite_tags')[0]
self.assertEqual(sorted([i.tag for i in bookmark.tags.all()]), ["django", "python"])
self.assertEqual([i.tag for i in bookmark.favorite_tags.all()], ["python"])
class MultiTableInheritanceTest(TestCase):
def setUp(self):
self.book1 = BookWithYear.objects.create(
title="Poems", published_year=2010)
self.book2 = BookWithYear.objects.create(
title="More poems", published_year=2011)
self.author1 = AuthorWithAge.objects.create(
name='Jane', first_book=self.book1, age=50)
self.author2 = AuthorWithAge.objects.create(
name='Tom', first_book=self.book1, age=49)
self.author3 = AuthorWithAge.objects.create(
name='Robert', first_book=self.book2, age=48)
self.authorAddress = AuthorAddress.objects.create(
author=self.author1, address='SomeStreet 1')
self.book2.aged_authors.add(self.author2, self.author3)
self.br1 = BookReview.objects.create(
book=self.book1, notes="review book1")
self.br2 = BookReview.objects.create(
book=self.book2, notes="review book2")
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = AuthorWithAge.objects.prefetch_related('addresses')
addresses = [[six.text_type(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[six.text_type(self.authorAddress)], [], []])
def test_foreignkey_to_inherited(self):
with self.assertNumQueries(2):
qs = BookReview.objects.prefetch_related('book')
titles = [obj.book.title for obj in qs]
self.assertEqual(titles, ["Poems", "More poems"])
def test_m2m_to_inheriting_model(self):
qs = AuthorWithAge.objects.prefetch_related('books_with_year')
with self.assertNumQueries(2):
lst = [[six.text_type(book) for book in author.books_with_year.all()]
for author in qs]
qs = AuthorWithAge.objects.all()
lst2 = [[six.text_type(book) for book in author.books_with_year.all()]
for author in qs]
self.assertEqual(lst, lst2)
qs = BookWithYear.objects.prefetch_related('aged_authors')
with self.assertNumQueries(2):
lst = [[six.text_type(author) for author in book.aged_authors.all()]
for book in qs]
qs = BookWithYear.objects.all()
lst2 = [[six.text_type(author) for author in book.aged_authors.all()]
for book in qs]
self.assertEqual(lst, lst2)
def test_parent_link_prefetch(self):
with self.assertNumQueries(2):
[a.author for a in AuthorWithAge.objects.prefetch_related('author')]
@override_settings(DEBUG=True)
def test_child_link_prefetch(self):
with self.assertNumQueries(2):
l = [a.authorwithage for a in Author.objects.prefetch_related('authorwithage')]
# Regression for #18090: the prefetching query must include an IN clause.
# Note that on Oracle the table name is upper case in the generated SQL,
# thus the .lower() call.
self.assertIn('authorwithage', connection.queries[-1]['sql'].lower())
self.assertIn(' IN ', connection.queries[-1]['sql'])
self.assertEqual(l, [a.authorwithage for a in Author.objects.all()])
class ForeignKeyToFieldTest(TestCase):
def setUp(self):
self.book = Book.objects.create(title="Poems")
self.author1 = Author.objects.create(name='Jane', first_book=self.book)
self.author2 = Author.objects.create(name='Tom', first_book=self.book)
self.author3 = Author.objects.create(name='Robert', first_book=self.book)
self.authorAddress = AuthorAddress.objects.create(
author=self.author1, address='SomeStreet 1'
)
FavoriteAuthors.objects.create(author=self.author1,
likes_author=self.author2)
FavoriteAuthors.objects.create(author=self.author2,
likes_author=self.author3)
FavoriteAuthors.objects.create(author=self.author3,
likes_author=self.author1)
def test_foreignkey(self):
with self.assertNumQueries(2):
qs = Author.objects.prefetch_related('addresses')
addresses = [[six.text_type(address) for address in obj.addresses.all()]
for obj in qs]
self.assertEqual(addresses, [[six.text_type(self.authorAddress)], [], []])
def test_m2m(self):
with self.assertNumQueries(3):
qs = Author.objects.all().prefetch_related('favorite_authors', 'favors_me')
favorites = [(
[six.text_type(i_like) for i_like in author.favorite_authors.all()],
[six.text_type(likes_me) for likes_me in author.favors_me.all()]
) for author in qs]
self.assertEqual(
favorites,
[
([six.text_type(self.author2)], [six.text_type(self.author3)]),
([six.text_type(self.author3)], [six.text_type(self.author1)]),
([six.text_type(self.author1)], [six.text_type(self.author2)])
]
)
class LookupOrderingTest(TestCase):
"""
Test cases that demonstrate that ordering of lookups is important, and
ensure it is preserved.
"""
def setUp(self):
self.person1 = Person.objects.create(name="Joe")
self.person2 = Person.objects.create(name="Mary")
self.house1 = House.objects.create(address="123 Main St")
self.house2 = House.objects.create(address="45 Side St")
self.house3 = House.objects.create(address="6 Downing St")
self.house4 = House.objects.create(address="7 Regents St")
self.room1_1 = Room.objects.create(name="Dining room", house=self.house1)
self.room1_2 = Room.objects.create(name="Lounge", house=self.house1)
self.room1_3 = Room.objects.create(name="Kitchen", house=self.house1)
self.room2_1 = Room.objects.create(name="Dining room", house=self.house2)
self.room2_2 = Room.objects.create(name="Lounge", house=self.house2)
self.room3_1 = Room.objects.create(name="Dining room", house=self.house3)
self.room3_2 = Room.objects.create(name="Lounge", house=self.house3)
self.room3_3 = Room.objects.create(name="Kitchen", house=self.house3)
self.room4_1 = Room.objects.create(name="Dining room", house=self.house4)
self.room4_2 = Room.objects.create(name="Lounge", house=self.house4)
self.person1.houses.add(self.house1, self.house2)
self.person2.houses.add(self.house3, self.house4)
def test_order(self):
with self.assertNumQueries(4):
# The following two queries must be done in the same order as written,
# otherwise 'primary_house' will cause non-prefetched lookups
qs = Person.objects.prefetch_related('houses__rooms',
'primary_house__occupants')
[list(p.primary_house.occupants.all()) for p in qs]
class NullableTest(TestCase):
def setUp(self):
boss = Employee.objects.create(name="Peter")
Employee.objects.create(name="Joe", boss=boss)
Employee.objects.create(name="Angela", boss=boss)
def test_traverse_nullable(self):
# Because we use select_related() for 'boss', it doesn't need to be
# prefetched, but we can still traverse it although it contains some nulls
with self.assertNumQueries(2):
qs = Employee.objects.select_related('boss').prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.select_related('boss')
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_prefetch_nullable(self):
# One for main employee, one for boss, one for serfs
with self.assertNumQueries(3):
qs = Employee.objects.prefetch_related('boss__serfs')
co_serfs = [list(e.boss.serfs.all()) if e.boss is not None else []
for e in qs]
qs2 = Employee.objects.all()
co_serfs2 = [list(e.boss.serfs.all()) if e.boss is not None else [] for e in qs2]
self.assertEqual(co_serfs, co_serfs2)
def test_in_bulk(self):
"""
In-bulk does correctly prefetch objects by not using .iterator()
directly.
"""
boss1 = Employee.objects.create(name="Peter")
boss2 = Employee.objects.create(name="Jack")
with self.assertNumQueries(2):
# Check that prefetch is done and it does not cause any errors.
bulk = Employee.objects.prefetch_related('serfs').in_bulk([boss1.pk, boss2.pk])
for b in bulk.values():
list(b.serfs.all())
class MultiDbTests(TestCase):
multi_db = True
def test_using_is_honored_m2m(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Jane Eyre")
book3 = B.create(title="Wuthering Heights")
book4 = B.create(title="Sense and Sensibility")
author1 = A.create(name="Charlotte", first_book=book1)
author2 = A.create(name="Anne", first_book=book1)
author3 = A.create(name="Emily", first_book=book1)
author4 = A.create(name="Jane", first_book=book4)
book1.authors.add(author1, author2, author3)
book2.authors.add(author1)
book3.authors.add(author3)
book4.authors.add(author4)
# Forward
qs1 = B.prefetch_related('authors')
with self.assertNumQueries(2, using='other'):
books = "".join(["%s (%s)\n" %
(book.title, ", ".join(a.name for a in book.authors.all()))
for book in qs1])
self.assertEqual(books,
"Poems (Charlotte, Anne, Emily)\n"
"Jane Eyre (Charlotte)\n"
"Wuthering Heights (Emily)\n"
"Sense and Sensibility (Jane)\n")
# Reverse
qs2 = A.prefetch_related('books')
with self.assertNumQueries(2, using='other'):
authors = "".join(["%s: %s\n" %
(author.name, ", ".join(b.title for b in author.books.all()))
for author in qs2])
self.assertEqual(authors,
"Charlotte: Poems, Jane Eyre\n"
"Anne: Poems\n"
"Emily: Poems, Wuthering Heights\n"
"Jane: Sense and Sensibility\n")
def test_using_is_honored_fkey(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Forward
with self.assertNumQueries(2, using='other'):
books = ", ".join(a.first_book.title for a in A.prefetch_related('first_book'))
self.assertEqual("Poems, Sense and Sensibility", books)
# Reverse
with self.assertNumQueries(2, using='other'):
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related('first_time_authors'))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
def test_using_is_honored_inheritance(self):
B = BookWithYear.objects.using('other')
A = AuthorWithAge.objects.using('other')
book1 = B.create(title="Poems", published_year=2010)
B.create(title="More poems", published_year=2011)
A.create(name='Jane', first_book=book1, age=50)
A.create(name='Tom', first_book=book1, age=49)
# parent link
with self.assertNumQueries(2, using='other'):
authors = ", ".join(a.author.name for a in A.prefetch_related('author'))
self.assertEqual(authors, "Jane, Tom")
# child link
with self.assertNumQueries(2, using='other'):
ages = ", ".join(str(a.authorwithage.age) for a in A.prefetch_related('authorwithage'))
self.assertEqual(ages, "50, 49")
def test_using_is_honored_custom_qs(self):
B = Book.objects.using('other')
A = Author.objects.using('other')
book1 = B.create(title="Poems")
book2 = B.create(title="Sense and Sensibility")
A.create(name="Charlotte Bronte", first_book=book1)
A.create(name="Jane Austen", first_book=book2)
# Implicit hinting
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.all())
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on the same db.
with self.assertNumQueries(2, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('other'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems (Charlotte Bronte)\n"
"Sense and Sensibility (Jane Austen)\n")
# Explicit using on a different db.
with self.assertNumQueries(1, using='default'), self.assertNumQueries(1, using='other'):
prefetch = Prefetch('first_time_authors', queryset=Author.objects.using('default'))
books = "".join("%s (%s)\n" %
(b.title, ", ".join(a.name for a in b.first_time_authors.all()))
for b in B.prefetch_related(prefetch))
self.assertEqual(books,
"Poems ()\n"
"Sense and Sensibility ()\n")
class Ticket19607Tests(TestCase):
def setUp(self):
for id, name1, name2 in [
(1, 'einfach', 'simple'),
(2, 'schwierig', 'difficult'),
]:
LessonEntry.objects.create(id=id, name1=name1, name2=name2)
for id, lesson_entry_id, name in [
(1, 1, 'einfach'),
(2, 1, 'simple'),
(3, 2, 'schwierig'),
(4, 2, 'difficult'),
]:
WordEntry.objects.create(id=id, lesson_entry_id=lesson_entry_id, name=name)
def test_bug(self):
list(WordEntry.objects.prefetch_related('lesson_entry', 'lesson_entry__wordentry_set'))
class Ticket21410Tests(TestCase):
def setUp(self):
self.book1 = Book.objects.create(title="Poems")
self.book2 = Book.objects.create(title="Jane Eyre")
self.book3 = Book.objects.create(title="Wuthering Heights")
self.book4 = Book.objects.create(title="Sense and Sensibility")
self.author1 = Author2.objects.create(name="Charlotte",
first_book=self.book1)
self.author2 = Author2.objects.create(name="Anne",
first_book=self.book1)
self.author3 = Author2.objects.create(name="Emily",
first_book=self.book1)
self.author4 = Author2.objects.create(name="Jane",
first_book=self.book4)
self.author1.favorite_books.add(self.book1, self.book2, self.book3)
self.author2.favorite_books.add(self.book1)
self.author3.favorite_books.add(self.book2)
self.author4.favorite_books.add(self.book3)
def test_bug(self):
list(Author2.objects.prefetch_related('first_book', 'favorite_books'))
|
|
#!/usr/bin/env/python
"""
add_sponsors.py -- From a data file consisting of sponsors of
research, update sponsors and add sponsors
Version 0.1 MC 2014-03-10
-- Working as expected
Version 0.2 MC 2014-03-16
-- Added abbreviations, shortcuts
Version 0.3 MC 2014-03-27
-- Handle lang tags and datatypes in dictionary and RDF
Version 0.4 MC 2014-03-28
-- Handle unicode from VIVO
Version 0.5 MC 2014-04-04
-- Fix Extention Units
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2014, University of Florida"
__license__ = "BSD 3-Clause license"
__version__ = "0.4"
__harvest_text__ = "Python Sponsors " + __version__
from vivotools import vivo_sparql_query
from vivotools import get_vivo_uri
from vivotools import assert_resource_property
from vivotools import assert_data_property
from vivotools import update_data_property
from vivotools import read_csv
from vivotools import rdf_header
from vivotools import rdf_footer
from vivotools import get_value
from vivotools import get_triples
import os
import sys
from datetime import datetime
def make_sponsor_dict(debug=False):
"""
Extract all the sponsors in VIVO and organize them into a dictionary
keyed by sponsor number value URI.
"""
query = """
SELECT ?uri ?number
WHERE {
?uri ufVivo:sponsorID ?number
}"""
result = vivo_sparql_query(query)
sponsor_dict = {}
if 'results' in result and 'bindings' in result['results']:
rows = result["results"]["bindings"]
else:
return sponsor_dict
if debug:
print query
if len(rows) >= 2:
print rows[0], rows[1]
elif len(rows) == 1:
print rows[0]
for row in rows:
number = row['number']['value']
uri = row['uri']['value']
sponsor_dict[number] = uri
if debug:
print sponsor_dict.items()[1:3]
return sponsor_dict
def improve_sponsor_name(s):
"""
DSP uses a series of abbreviations to sponsor names into database fields.
Spell them out as best we can.
"""
if s == "":
return s
if s[len(s)-1] == ',':
s = s[0:len(s)-1]
if s[len(s)-1] == ',':
s = s[0:len(s)-1]
s = s.lower() # convert to lower
s = s.title() # uppercase each word
s = s + ' ' # add a trailing space so we can find these abbreviated
# words throughout the string
t = s.replace(", ,", ",")
t = t.replace(" ", " ")
t = t.replace(" & ", " and ")
t = t.replace(" &", " and ")
t = t.replace("&", " and ")
t = t.replace("/", " @")
t = t.replace("/", " @") # might be two slashes in the input
t = t.replace(",", " !")
t = t.replace(",", " !") # might be two commas in input
t = t.replace("-", " #")
t = t.replace("Acad ", "Academy ")
t = t.replace("Adm ", "Administration ")
t = t.replace("Admn ", "Administration ")
t = t.replace("Ag ", "Agriculture ")
t = t.replace("Agcy ", "Agency ")
t = t.replace("Agri ", "Agricultural ")
t = t.replace("Am ", "American ")
t = t.replace("Amer ", "American ")
t = t.replace("And ", "and ")
t = t.replace("Asso ", "Association ")
t = t.replace("Assoc ", "Association ")
t = t.replace("Bd ", "Board ")
t = t.replace("Brd ", "Board ")
t = t.replace("Bur ", "Bureau ")
t = t.replace("Bwxt ", "BWXT ")
t = t.replace("Char ", "Charitable ")
t = t.replace("Cncl ", "Council ")
t = t.replace("Cntr ", "Center ")
t = t.replace("Cnty ", "County ")
t = t.replace("Co ", "Company ")
t = t.replace("Coll ", "College ")
t = t.replace("Corp ", "Corporation ")
t = t.replace("Ctr ", "Center ")
t = t.replace("Dept ", "Department ")
t = t.replace("Dev ", "Development ")
t = t.replace("Edu ", "Education ")
t = t.replace("Fed ", "Federal ")
t = t.replace("Fl ", "Florida ")
t = t.replace("For ", "for ")
t = t.replace("Fdtn ", "Foundation ")
t = t.replace("Fou ", "Foundation ")
t = t.replace("Gmbh ", "GmbH ")
t = t.replace("Gov ", "Government ")
t = t.replace("Hlth ", "Health ")
t = t.replace("Hosp ", "Hospital ")
t = t.replace("Hsp ", "Hospital ")
t = t.replace("Ibm ", "IBM ")
t = t.replace("Inst ", "Institute ")
t = t.replace("Intl ", "International ")
t = t.replace("Llc ", "LLC ")
t = t.replace("Md ", "MD ")
t = t.replace("Med ", "Medical ")
t = t.replace("Mem ", "Memorial ")
t = t.replace("Mgmt ", "Management ")
t = t.replace("Nasa ", "NASA ")
t = t.replace("Nat ", "Natural ")
t = t.replace("Natl ", "National ")
t = t.replace("Nc ", "NC ")
t = t.replace("Ne ", "NE ")
t = t.replace("Nw ", "NW ")
t = t.replace("Ny ", "NY ")
t = t.replace("Of ", "of ")
t = t.replace("Ofc ", "Office ")
t = t.replace("On ", "on ")
t = t.replace("Plc ", "PLC ")
t = t.replace("Reg ", "Regional ")
t = t.replace("Res ", "Research ")
t = t.replace("Sa ", "SA ")
t = t.replace("Sci ", "Science ")
t = t.replace("Se ", "Southeast ")
t = t.replace("So ", "South ")
t = t.replace("Soc ", "Society ")
t = t.replace("Sw ", "SW ")
t = t.replace("Tech ", "Technology ")
t = t.replace("The ", "the ")
t = t.replace("To ", "to ")
t = t.replace("Uf ", "University of Florida ")
t = t.replace("Uk ", "UK ")
t = t.replace("Univ ", "University ")
t = t.replace("Us ", "US ")
t = t.replace("Usa ", "USA ")
t = t.replace("'S ", "'s ")
t = t.replace(" @", "/") # restore /
t = t.replace(" @", "/")
t = t.replace(" !", ",") # restore ,
t = t.replace(" !", ",") # restore ,
t = t.replace(" #", "-") # restore -
t = t[0].upper() + t[1:-1]
return t
def add_sponsor(sponsor_id):
"""
Add a sponsor entity. All attributes come via update
"""
ardf = ""
sponsor_uri = get_vivo_uri()
add = assert_resource_property(sponsor_uri, "rdf:type",
"http://xmlns.com/foaf/0.1/Organization")
ardf = ardf + add
add = assert_resource_property(sponsor_uri, "rdf:type",
"http://vivoweb.org/ontology/core#FundingOrganization")
ardf = ardf + add
add = assert_data_property(sponsor_uri, "ufVivo:sponsorID",
sponsor_id)
ardf = ardf + add
return [ardf, sponsor_uri]
def get_types(uri):
"""
Given a VIVO URI, return its types
"""
types = []
triples = get_triples(uri)
if 'results' in triples and 'bindings' in triples['results']:
rows = triples['results']['bindings']
for row in rows:
p = row['p']['value']
o = row['o']['value']
if p == "http://www.w3.org/1999/02/22-rdf-syntax-ns#type":
types.append(o)
return types
def update_org_types(org_uri, org_string):
"""
Given a VIVO org URI and collection of sponsor types, create add and
sub RDF to update the VIVO org types
"""
org_okay = ['http://xmlns.com/foaf/0.1/Organization',
'http://www.w3.org/2002/07/owl#Thing',
'http://xmlns.com/foaf/0.1/Agent']
org_letters = {
'A': 'http://vivoweb.org/ontology/core#Association',
'C': 'http://vivoweb.org/ontology/core#Company',
'U': 'http://vivoweb.org/ontology/core#University',
'B': 'http://vivoweb.org/ontology/core#Laboratory',
'S': 'http://vivoweb.org/ontology/core#School',
'M': 'http://vivoweb.org/ontology/core#Museum',
'Y': 'http://vivoweb.org/ontology/core#Library',
'H': 'http://vivoweb.org/ontology/core#Publisher',
'T': 'http://vivoweb.org/ontology/core#Center',
'E': 'http://vivoweb.org/ontology/core#College',
'X': 'http://vivoweb.org/ontology/core#ExtensionUnit',
'V': 'http://vivoweb.org/ontology/core#Division',
'P': 'http://vivoweb.org/ontology/core#Program',
'D': 'http://vivoweb.org/ontology/core#Department',
'F': 'http://vivoweb.org/ontology/core#Foundation',
'N': 'http://vivoweb.org/ontology/core#FundingOrganization',
'R': 'http://vivoweb.org/ontology/core#ResearchOrganization',
'G': 'http://vivoweb.org/ontology/core#GovernmentAgency',
'I': 'http://vivoweb.org/ontology/core#Institute',
'L': 'http://vivoweb.org/ontology/core#ClinicalOrganization'
}
ardf = ""
srdf = ""
vivo_types = get_types(org_uri)
org_types = []
for org_char in org_string:
if org_char in org_letters:
org_types.append(org_letters[org_char])
for org_type in vivo_types:
if (len(org_types) == 0 or org_type not in org_types) and \
org_type not in org_okay:
sub = assert_resource_property(org_uri, "rdf:type", org_type)
srdf = srdf + sub
for org_type in org_types:
if len(vivo_types) == 0 or org_type not in vivo_types:
add = assert_resource_property(org_uri, "rdf:type", org_type)
ardf = ardf + add
return [ardf, srdf]
def update_sponsor(sponsor_uri, sponsor_data):
"""
Given the VIVO URI of a sponsor and a dictionary of sponsor data,
update the attrbutes in VIVO with the data from the dictionary
"""
ardf = ""
srdf = ""
vivo_sponsor_label = get_value(sponsor_uri, "rdfs:label")
[add, sub] = update_data_property(sponsor_uri, "rdfs:label",
vivo_sponsor_label,
sponsor_data['sponsor_label'])
ardf = ardf + add
srdf = srdf + sub
[add, sub] = update_org_types(sponsor_uri, \
sponsor_data['SponsorTypes']+'N')
ardf = ardf + add
srdf = srdf + sub
if ardf != "" or srdf != "":
vivo_date_harvested = get_value(sponsor_uri,
"ufVivo:dateHarvested")
[add, sub] = update_data_property(sponsor_uri, "ufVivo:dateHarvested",
vivo_date_harvested,
datetime.now().isoformat())
ardf = ardf + add
srdf = srdf + sub
vivo_harvested_by = get_value(sponsor_uri, "ufVivo:harvestedBy")
[add, sub] = update_data_property(sponsor_uri, "ufVivo:harvestedBy",
vivo_harvested_by,
__harvest_text__)
ardf = ardf + add
srdf = srdf + sub
return [ardf, srdf]
# Start here
if len(sys.argv) > 1:
dsp_file_name = str(sys.argv[1])
else:
dsp_file_name = "test_sponsor_data.txt"
file_name, file_extension = os.path.splitext(dsp_file_name)
add_file = open(file_name+"_add.rdf", "w")
sub_file = open(file_name+"_sub.rdf", "w")
log_file = open(file_name+"_log.txt", "w")
print >>log_file, datetime.now(), "Start"
print >>log_file, datetime.now(), "Make sponsor dictionary"
sponsor_dict = make_sponsor_dict(debug=True)
print >>log_file, datetime.now(), "sponsor dictionary has ", \
len(sponsor_dict), "entries"
print >>log_file, datetime.now(), "Read sponsor file"
uf_sponsors = read_csv(dsp_file_name)
sponsors = {}
for row in uf_sponsors.values():
row['sponsor_label'] = improve_sponsor_name(row['SponsorName'])
sponsors[row['SponsorID']] = row
print >>log_file, datetime.now(), "sponsor file has ", len(sponsors.items()),\
"entries"
print >>log_file, datetime.now(), "Begin processing"
ardf = rdf_header()
srdf = rdf_header()
sponsor_found = 0
sponsor_not_found = 0
sponsor_not_in_uf_data = 0
all_numbers = sponsors.keys()
for sponsor_number in all_numbers:
if sponsor_number not in all_numbers:
all_numbers.append(sponsor_number)
for sponsor_number in all_numbers:
if sponsor_number in sponsor_dict and sponsor_number in sponsors:
# Case 1. Sponsor in Sponsor data and VIVO. Update VIVO
sponsor_found = sponsor_found + 1
sponsor_uri = sponsor_dict[sponsor_number]
[add, sub] = update_sponsor(sponsor_uri, sponsors[sponsor_number])
ardf = ardf + add
srdf = srdf + sub
elif sponsor_number not in sponsor_dict:
# Sponsor in Sponsor Data, but not in VIVO. Add to VIVO
sponsor_not_found = sponsor_not_found + 1
[add, sponsor_uri] = add_sponsor(sponsor_number)
ardf = ardf + add
[add, sub] = update_sponsor(sponsor_uri, sponsors[sponsor_number])
ardf = ardf + add
srdf = srdf + sub
else:
# Sponsor is only in VIVO, not in Sponsor data. Nothing to do
sponsor_not_in_uf_data = sponsor_not_in_uf_data + 1
print >>log_file, datetime.now(), "Found in VIVO =", sponsor_found, " will be updated"
print >>log_file, datetime.now(), "Not Found in VIVO =", \
sponsor_not_found, " will be added"
print >>log_file, datetime.now(), "Found only in VIVO =", \
sponsor_not_in_uf_data, "No action to be taken"
print >>log_file, datetime.now(), "Write files"
adrf = ardf + rdf_footer()
srdf = srdf + rdf_footer()
print >>add_file, adrf.encode('ascii','xmlcharrefreplace')
print >>sub_file, srdf.encode('ascii','xmlcharrefreplace')
add_file.close()
sub_file.close()
print >>log_file, datetime.now(), "Finished"
|
|
import logging
import bisect
import itertools as iter
import h5py
import numpy
import scipy
import csv
import pdb
import itertools
from operator import itemgetter
from abc import ABCMeta, abstractmethod, abstractproperty
log = logging.getLogger(__name__)
def _get_dtype_from_format(format):
dtype = 'int8'
if format == 'float':
dtype = 'float32'
elif format == 'nucleotides':
dtype ='S1'
return(dtype)
def _get_snps_from_row(format,snps):
dtype = _get_dtype_from_format(format)
if format == 'S1':
map(lambda x:nt_decoder[x],snps)
return(numpy.array(snps,dtype=dtype))
def parse_genotype_csv_file(csv_file,format='binary', missingVal='NA'):
log.info('Parsing Genotype file (CSV) %s in format %s' % (csv_file, format))
retval ={}
with open(csv_file) as f:
dialect = csv.Sniffer().sniff(f.read(40))
f.seek(0)
reader = csv.reader(f, dialect)
header = next(reader)
if header[0] != 'Chromosome' or (header[1] != 'Positions' and header[1] != 'Position'):
raise Exception('First two columns must be in form Chromosome, Positions')
dtype = _get_dtype_from_format(format)
# accessions = [ header[ef].strip() for ef in range(2, len(header))]
accessions = list(map(lambda x: x.strip(),header[2:]))
log.info('%s accessions found %s' % (len(accessions),accessions))
first_line = next(reader)
snps = []
positions = []
old_chr = first_line[0]
positions.append(int(first_line[1]))
snps.append(_get_snps_from_row(format,first_line[2:]))
data = []
retval = {'format':format,"accessions":accessions}
for row in reader:
chr = row[0]
if chr != old_chr: #Then save snps_data
log.info('Chromosome %s finished. %s SNPs found' % (old_chr,len(snps)))
data.append({'snps':snps,'positions':positions,'chr':old_chr})
positions = []
snps = []
old_chr = chr
positions.append(int(row[1]))
snps.append(_get_snps_from_row(format,row[2:]))
data.append({'snps':snps,'positions':positions,'chr':chr})
retval['data'] = data
log.info('Finished parsing Genotype file')
return retval
def load_hdf5_genotype_data(hdf5_file):
return HDF5Genotype(hdf5_file)
def load_csv_genotype_data(csv_files,format='binary'):
log.info("Loading Genotype file")
snps = []
positions = []
chr_regions = []
chrs = []
accessions = None
if type(csv_files) is list:
log.info("Genotype split in %s files" % len(csv_files))
for i,csv_file in enumerate(csv_files):
log.info("Loading %s " % csv_file)
data = parse_genotype_csv_file(csv_file,format)
if accessions is None:
accessions = data['accessions']
if data['accessions'] != accessions:
raise Exception('Accessions must match')
for chr_data in data['data']:
num_snps = len(chr_data['positions'])
chr_regions.append((len(positions),len(positions)+num_snps))
chrs.append(chr_data['chr'])
positions.extend(chr_data['positions'])
snps.extend(chr_data['snps'])
log.info("Finished loading %s SNPs for Chr %s" % (len(positions),chr_data['chr']))
else:
data = parse_genotype_csv_file(csv_files,format)
accessions = data['accessions']
for chr_data in data['data']:
num_snps = len(chr_data['positions'])
chr_regions.append((len(positions),len(positions)+num_snps))
chrs.append(chr_data['chr'])
positions.extend(chr_data['positions'])
snps.extend(chr_data['snps'])
log.info("Finished loading %s SNPs for Chr %s" % (len(positions),chr_data['chr']))
log.info("Finished loading Genotype file: %s SNPs %s accessions %s chromosomes" % (len(positions),len(accessions),len(chrs)))
return Genotype(snps,positions, accessions,chr_regions,chrs,format)
def calculate_ld(snps):
#filter non binary snps
snps_t = scipy.transpose(snps)
snps_stand = scipy.transpose((snps_t - scipy.mean(snps, 1)) / scipy.std(snps, 1))
r2_values =scipy.dot(snps_stand, scipy.transpose(snps_stand))
r2_values *= (1.0 / snps.shape[1])
r2_values **= 2
return r2_values
class AbstractGenotype(object):
__metaclass__ = ABCMeta
@abstractproperty
def data_format(self):
pass
@abstractmethod
def get_snps_iterator(self,chr=None,is_chunked=False,chunk_size=1000):
pass
def get_snp_at(self,chr,position):
chr_ix = chr -1
chr_region = self.chr_regions[chr_ix]
i = bisect.bisect_left(self.positions[chr_region[0]:chr_region[1]], position)
if i != chr_region[1]:
snp_ix = i + chr_region[0]
if self.positions[snp_ix] == position:
return self.snps[snp_ix]
return None
def get_chr_region_ix(self,chr):
# some genotype datasets have chromosomes as integers instead of strings
if self.chrs.dtype.kind == 'i':
return numpy.where(self.chrs == int(chr))[0][0]
return numpy.where(self.chrs == str(chr))[0][0]
@abstractproperty
def positions(self):
pass
@abstractproperty
def accessions(self):
pass
@abstractproperty
def snps(self):
pass
@property
def chromosomes(self):
chromosomes = []
for i,chr_region in enumerate(self.chr_regions):
chromosomes.extend([self.chrs[i]] * (chr_region[1] - chr_region[0]))
return(chromosomes)
@abstractproperty
def chrs(self):
pass
@abstractproperty
def num_snps(self):
pass
@abstractproperty
def chr_regions(self):
pass
def get_snps_from_pos(self,chr_pos):
"""
Returns a list of snps based on a list of (chr,pos) tuples
"""
snps = []
indices = []
if chr_pos is None or len(chr_pos) == 0:
return (indices,snps)
chr_pos_ix = map(list,zip(*sorted(zip(chr_pos,range(len(chr_pos))))))
# group by chr for efficient sequentielly iteration over snps generator
it_ix = 0
filtered_chr_pos_ix = [[],[]]
for chr,positions in iter.groupby(chr_pos_ix[0],lambda x:x[0]):
pos_indices = []
it = self.get_snps_iterator(chr)
for position in positions:
pos_ix = self.get_pos_ix(chr,position[1])
if pos_ix[2] == False:
it_ix+=1
continue
filtered_chr_pos_ix[0].append(chr_pos_ix[0][it_ix])
filtered_chr_pos_ix[1].append(chr_pos_ix[1][it_ix])
pos_indices.append(pos_ix[1])
indices.append(pos_ix[0])
it_ix+=1
for i,ix in enumerate(pos_indices):
previous_ix = 0 if i == 0 else pos_indices[i-1] +1
snps.append(next(iter.islice(it,ix-previous_ix,None),None))
return(map(list,zip(*sorted(zip(filtered_chr_pos_ix[1],indices))))[1],map(list,zip(*sorted(zip(filtered_chr_pos_ix[1],snps))))[1])
def get_pos_ix(self,chr,position):
"""
Returns the index of chr,position using bisect
"""
chr_region = self.chr_regions[self.get_chr_region_ix(chr)]
positions = self.positions[chr_region[0]:chr_region[1]]
i = bisect.bisect_left(positions, position)
abs_ix = i + chr_region[0]
found = False
if abs_ix != len(self.positions) and self.positions[abs_ix] == position:
found = True
return(abs_ix,i,found)
def get_chr_region_from_index(self,ix):
for i,chr_region in enumerate(self.chr_regions):
if chr_region[0] <= ix and chr_region[1] >= ix:
return i, chr_region
return(None)
def get_chr_pos_from_index(self,ix):
return(self.get_chr_from_index(ix),self.positions[ix])
def get_chr_from_index(self,ix):
for i,chr_region in enumerate(self.chr_regions):
if chr_region[0] <= ix and chr_region[1] >= ix:
return(int(self.chrs[i]))
raise(Exception('Index %s outside of chr_regions' %ix))
def get_mafs(self):
macs = []
mafs = []
num_nts = len(self.accessions)
if self.data_format in ['binary', 'int']:
for snp in self.get_snps_iterator():
l = scipy.bincount(snp)
mac = min(l)
macs.append(mac)
mafs.append(mac / float(num_nts))
elif self.data_format == 'diploid_int':
for snp in self.get_snps_iterator():
bin_counts = scipy.bincount(snp, minlength=3)
l = scipy.array([bin_counts[0], bin_counts[2]]) + bin_counts[1] / 2.0
mac = l.min()
macs.append(mac)
mafs.append(mac / float(num_nts))
else:
raise(NotImplementedError)
return {"macs":macs, "mafs":mafs}
@abstractproperty
def genome_length(self):
pass
def filter_accessions(self,accession_ids):
sd_indices_to_keep = set()
pd_indices_to_keep = []
for i, acc in enumerate(self.accessions):
for j, et in enumerate(accession_ids):
if str(et) == str(acc):
sd_indices_to_keep.add(i)
pd_indices_to_keep.append(j)
sd_indices_to_keep = list(sd_indices_to_keep)
sd_indices_to_keep.sort()
self.filter_accessions_ix(sd_indices_to_keep)
return(sd_indices_to_keep,pd_indices_to_keep)
@abstractmethod
def filter_accessions_ix(self,accessions_ix):
pass
@abstractmethod
def filter_snps_ix(self,accessions_ix):
pass
@abstractmethod
def convert_data_format(self,target_format='binary'):
pass
def save_as_csv(self,csv_file,chunk_size=1000):
log.info('Writing genotype to CSV file %s' % csv_file)
with open(csv_file,'w') as csvfile:
csv_writer = csv.writer(csvfile,delimiter=',')
header = ['Chromosome','Positions']
header.extend(self.accessions)
csv_writer.writerow(header)
snp_iterator = self.get_snps_iterator(is_chunked=True,chunk_size=chunk_size)
chromosomes = self.chromosomes
positions = self.positions
num = len(positions)
log_iteration_count = num/(chunk_size*10)
for i,snps in enumerate(snp_iterator):
current_ix = i* chunk_size
if i % log_iteration_count == 0:
log.info('Line %s/%s of Genotype written' % (current_ix,num))
rows = numpy.hstack((numpy.asarray(zip(chromosomes[current_ix:current_ix+chunk_size],positions[current_ix:current_ix+chunk_size])),snps))
csv_writer.writerows(rows.tolist())
log.info('Finished writing genotype ')
def save_as_hdf5(self, hdf5_file):
log.info('Writing genotype to HDF5 file %s' %hdf5_file)
if self.data_format in ['binary', 'diploid_int']:
h5file = h5py.File(hdf5_file, 'w')
num_snps = self.num_snps
num_accessions = len(self.accessions)
h5file.create_dataset('accessions', data=self.accessions,shape=(num_accessions,))
h5file.create_dataset('positions', data=self.positions,shape=(num_snps,),dtype='i4')
h5file['positions'].attrs['chrs'] = self.chrs
h5file['positions'].attrs['chr_regions'] = self.chr_regions
h5file.create_dataset('snps', shape=(num_snps, len(self.accessions)),
dtype='int8', compression='lzf', chunks=((1000, num_accessions)),data=list(self.get_snps_iterator()))
h5file['snps'].attrs['data_format'] = self.data_format
h5file['snps'].attrs['num_snps'] = num_snps
h5file['snps'].attrs['num_accessions'] = num_accessions
h5file.close()
log.info("Finished writing genotype to HDF5 file")
else:
raise NotImplementedError
def filter_monomorphic_snps(self):
"""
Removes SNPs from the data which are monomorphic.
"""
snps_ix = []
num_snps = self.num_snps
for i,snps in enumerate(self.get_snps_iterator()):
bc = numpy.unique(snps)
if len(numpy.unique(snps)) <= 1:
snps_ix.append(i)
numRemoved = len(snps_ix)
self.filter_snps_ix(snps_ix)
log.info("Removed %d monomoprhic SNPs, leaving %d SNPs in total." % (numRemoved, num_snps))
return(num_snps,numRemoved)
def filter_non_binary(self):
"""
Removes all but binary SNPs. (I.e. monomorphic, tertiary and quaternary alleles SNPs are removed.)
"""
num_snps = self.num_snps
snps_ix = []
num_accessions = len(self.accessions)
# Faster (2.2 ms) than doing numpy.bincount (91.1ms per loop)
# TODO chunk size is hardcoded
for i,snps in enumerate(self.get_snps_iterator(is_chunked=True)):
sm = numpy.sum(snps,axis=1)
snps_ix.extend(numpy.where( (sm == 0) | (sm == num_accessions))[0]+i*1000)
numRemoved = len(snps_ix)
self.filter_snps_ix(snps_ix)
log.info("Removed %d non-binary SNPs, leaving %d SNPs in total." % (numRemoved, self.num_snps))
return((num_snps,numRemoved))
def calculate_ld(self,chr_pos):
# sort chr_pos first
chr_pos = sorted(chr_pos,key=itemgetter(0,1))
indices,snps = self.get_snps_from_pos(chr_pos)
return(calculate_ld(numpy.vstack(snps)))
class Genotype(AbstractGenotype):
"""
A class that encompasses multiple _SnpsData_ chromosomes objects (chromosomes), and can deal with them as a whole.
This object should eventually replace the snpsdata lists..
"""
def __init__(self, snps,positions, accessions,chr_regions, chrs,data_format=None):
self._snps = snps
self._positions = positions
self._chr_regions = chr_regions
self._chrs = chrs
self._accessions = accessions
self._data_format = data_format # binary, diploid_ints, floats, int
@property
def data_format(self):
return(self._data_format)
@property
def accessions(self):
return(self._accessions)
@property
def snps(self):
return(self._snps)
@property
def positions(self):
return(self._positions)
@property
def chrs(self):
return(self._chrs)
@property
def num_snps(self):
return(len(self._snps))
@property
def chr_regions(self):
return(self._chr_regions)
@property
def genome_length(self):
return(self.chr_regions[-1][0])
def filter_snps_ix(self,snps_ix):
self._snps = snps[snps_ix]
self._positions = snps[snps_ix]
def get_snps_iterator(self,chr=None,is_chunked=False,chunk_size=1000):
start = 0
end = None
if chr is not None:
chr_region = self.chr_regions[self.get_chr_region_ix(chr)]
start = chr_region[0]
end = chr_region[1]
if is_chunked:
for i in xrange(start,end,chunk_size):
stop_i = min(i + chunk_size, end)
yield(self._snps[i:stop_i])
else:
for snp in self._snps[start:end]:
yield(snp)
def convert_data_format(self,target_format='binary',reference_ecotype='6909'):
"""
Converts the underlying raw data format to a binary one, i.e. A,C,G,T,NA,etc. are converted to 0,1,-1
"""
log.info('Converting from %s to %s' % (self.data_format,target_format))
if self.data_format == target_format:
log.warning("Data appears to be already in %s format!" % target_format)
else:
if self.data_format == 'nucleotides' and target_format == 'binary':
self._convert_snps_to_binary(reference_ecotype = reference_ecotype)
else:
raise NotImplementedError
def _convert_snps_to_binary(self,reference_ecotype='6909'):
missingVal = 'NA'
decoder = {missingVal:-1} #Might cause errors somewhere???!!!
coding_fun = scipy.vectorize(lambda x: decoder[x], otypes=['int8'])
if reference_ecotype in self.accessions:
ref_i = self.accessions.index(reference_ecotype)
else:
ref_i = 0
log.warning("Given reference ecotype %s wasn't found, using %s as 0-reference." % \
(reference_ecotype, self.accessions[ref_i]))
snps = []
num = len(self.positions)
positions = []
chr_region_removed_snps = [0] * len(self.chr_regions)
for snp_i, (snp, pos) in enumerate(itertools.izip(self._snps, self.positions)):
chr_region_ix,chr_region = self.get_chr_region_from_index(snp_i)
if snp_i % (num / 10) == 0:
log.info('Converted %s/%s of SNPs.' % (snp_i,num))
unique_nts = scipy.unique(snp).tolist()
if missingVal in unique_nts:
if len(unique_nts) != 3:
chr_region_removed_snps[chr_region_ix] +=1
continue #Skipping non-binary SNP
else:
unique_nts.remove(self.missingVal)
else:
if len(unique_nts) != 2:
chr_region_removed_snps[chr_region_ix] +=1
continue #Skipping non-binary SNP
if snp[ref_i] != missingVal:
ref_nt = snp[ref_i]
decoder[ref_nt] = 0
unique_nts.remove(ref_nt)
decoder[unique_nts[0]] = 1
else:
decoder[unique_nts[0]] = 0
decoder[unique_nts[1]] = 1
snps.append(coding_fun(snp))
positions.append(pos)
log.info('Removed %d non-binary SNPs out of %d, when converting to binary SNPs.'\
% (len(self.positions) - len(positions), len(self.positions)))
assert len(snps) == len(positions), 'Somthing odd with the lengths.'
chr_regions = self.chr_regions[:]
sum_removed_snps = 0
for i,num_snps in enumerate(chr_region_removed_snps):
sum_removed_snps +=num_snps
if i == 0:
chr_regions[0] = (0,chr_regions[0][1] - num_snps)
else:
chr_regions[i] = (chr_regions[i-1][1],chr_regions[i][1] - sum_removed_snps)
self._chr_regions = chr_regions
self._snps = snps
self._positions = positions
self._data_format = 'binary'
def filter_accessions_ix(self,indicesToKeep):
"""
Removes accessions from the data.
"""
num_accessions = len(self.accessions)
newAccessions = []
newArrayIds = []
for i in indicesToKeep:
newAccessions.append(self.accessions[i])
for i in range(len(self.snps)):
snp = self.snps[i]
newSnp = []
for j in indicesToKeep:
newSnp.append(snp[j])
self.snps[i] = newSnp
self.accessions = newAccessions
# TODO update chr_regions
log.debug("Removed %d accessions, leaving %d in total." % (num_accessions - len(indicesToKeep), len(indicesToKeep)))
class HDF5Genotype(AbstractGenotype):
def __init__(self,hdf5_file):
self.h5file = h5py.File(hdf5_file, 'r')
self.filter_snps = None
self.accession_filter = None
def __del__(self):
if self.h5file is not None:
self.h5file.close()
def get_snps(self):
return self.h5file['snps']
@property
def snps(self):
return self.h5file['snps']
@property
def data_format(self):
return self.h5file['snps'].attrs['data_format']
@property
def accessions(self):
if self.accession_filter is None or len(self.accession_filter) == 0:
return self.h5file['accessions'][:]
else:
return self.h5file['accessions'][self.accession_filter]
def convert_data_format(self,target_format='binary'):
raise NotImplementedError
def _get_snps_(self, start=0,end=None,chunk_size=1000):
"""
An generator/iterator for SNP chunks.
"""
if end is None:
end = self.original_num_snps
for i in xrange(start,end,chunk_size):
stop_i = min(i + chunk_size, end)
if self.accession_filter is None or len(self.accession_filter) == 0:
if self.filter_snps is None:
yield self.h5file['snps'][i:stop_i]
else:
yield self.h5file['snps'][self.filter_snps[i:stop_i],:]
else:
if self.filter_snps is None:
# first read the entire row and then filter columns (7.91ms vs 94.3ms)
yield self.h5file['snps'][i:stop_i][:,self.accession_filter]
else:
filter_chunk = self.filter_snps[i:stop_i]
# first read the entire row and then filter columns (7.91ms vs 94.3ms)
snps_chunk = self.h5file['snps'][i:stop_i][:,self.accession_filter]
yield snps_chunk[filter_chunk]
def get_snps_iterator(self,chr=None,is_chunked=False,chunk_size=1000):
"""
Returns an generator containing a chunked generator.
If chr is passed the generator will only iterate over the specified chr
"""
start = 0
end = None
if chr is not None:
# use unfiltered chr_regions because filtering happens in _get_snps_
chr_region = self.h5file['positions'].attrs['chr_regions'][self.get_chr_region_ix(chr)]
start = chr_region[0]
end = chr_region[1]
for snp_chunk in self._get_snps_(start=start,end=end,chunk_size=chunk_size):
if is_chunked:
yield snp_chunk
else:
for snp in snp_chunk:
yield snp
@property
def original_num_snps(self):
return self.h5file['snps'].attrs['num_snps']
@property
def positions(self):
if self.filter_snps is not None:
return self.h5file['positions'][self.filter_snps]
return self.h5file['positions'][:]
@property
def chrs(self):
return self.h5file['positions'].attrs['chrs']
@property
def num_snps(self):
if self.filter_snps is not None:
return self.filter_snps.sum()
return self.original_num_snps
@property
def chr_regions(self):
if self.filter_snps is not None:
return self.filtered_chr_regions
return self.h5file['positions'].attrs['chr_regions']
@property
def genome_length(self):
return self.chr_regions[-1][0]
def filter_accessions_ix(self,indicesToKeep):
"""
Removes accessions from the data.
"""
num_accessions = len(self.accessions)
self.accession_filter = indicesToKeep
log.debug("Removed %d accessions, leaving %d in total." % (num_accessions - len(indicesToKeep), len(indicesToKeep)))
def filter_snps_ix(self,snps_ix):
if snps_ix is None or len(snps_ix) == 0:
self.filter_snps = None
self.filtered_chr_regions = None
else:
self.filter_snps = numpy.ones((self.num_snps,),dtype=numpy.bool)
self.filter_snps[snps_ix] = 0
self.filtered_chr_regions = self._get_filtered_regons()
def _get_filtered_regons(self):
filtered_chr_regions = []
if self.filter_snps is None:
return None
start_ix = 0
end_ix = 0
for chr_region in self.h5file['positions'].attrs['chr_regions']:
end_ix = start_ix + self.filter_snps[chr_region[0]:chr_region[1]].sum()
filtered_chr_regions.append((start_ix,end_ix))
start_ix = end_ix
return filtered_chr_regions
|
|
"""
Generic command module. Pretty much every command should go here for
now.
"""
import time
from django.conf import settings
from src.server.sessionhandler import SESSIONS
from src.utils import utils, search
from src.objects.models import ObjectNick as Nick
from src.commands.default.muxcommand import MuxCommand, MuxCommandOOC
# limit symbol import for API
__all__ = ("CmdHome", "CmdLook", "CmdPassword", "CmdNick",
"CmdInventory", "CmdGet", "CmdDrop", "CmdGive", "CmdQuit", "CmdWho",
"CmdSay", "CmdPose", "CmdEncoding", "CmdAccess",
"CmdOOCLook", "CmdIC", "CmdOOC", "CmdColorTest")
AT_SEARCH_RESULT = utils.variable_from_module(*settings.SEARCH_AT_RESULT.rsplit('.', 1))
BASE_PLAYER_TYPECLASS = settings.BASE_PLAYER_TYPECLASS
class CmdHome(MuxCommand):
"""
home
Usage:
home
Teleports you to your home location.
"""
key = "home"
locks = "cmd:perm(home) or perm(Builders)"
def func(self):
"Implement the command"
caller = self.caller
home = caller.home
if not home:
caller.msg("You have no home!")
elif home == caller.location:
caller.msg("You are already home!")
else:
caller.move_to(home)
caller.msg("There's no place like home ...")
class CmdLook(MuxCommand):
"""
look
Usage:
look
look <obj>
look *<player>
Observes your location or objects in your vicinity.
"""
key = "look"
aliases = ["l", "ls"]
locks = "cmd:all()"
arg_regex = r"\s.*?|$"
def func(self):
"""
Handle the looking.
"""
caller = self.caller
args = self.args
if args:
# Use search to handle duplicate/nonexistant results.
looking_at_obj = caller.search(args, use_nicks=True)
if not looking_at_obj:
return
else:
looking_at_obj = caller.location
if not looking_at_obj:
caller.msg("You have no location to look at!")
return
if not hasattr(looking_at_obj, 'return_appearance'):
# this is likely due to us having a player instead
looking_at_obj = looking_at_obj.character
if not looking_at_obj.access(caller, "view"):
caller.msg("Could not find '%s'." % args)
return
# get object's appearance
caller.msg(looking_at_obj.return_appearance(caller))
# the object's at_desc() method.
looking_at_obj.at_desc(looker=caller)
class CmdPassword(MuxCommand):
"""
@password - set your password
Usage:
@password <old password> = <new password>
Changes your password. Make sure to pick a safe one.
"""
key = "@password"
locks = "cmd:all()"
def func(self):
"hook function."
caller = self.caller
if hasattr(caller, "player"):
caller = caller.player
if not self.rhs:
caller.msg("Usage: @password <oldpass> = <newpass>")
return
oldpass = self.lhslist[0] # this is already stripped by parse()
newpass = self.rhslist[0] # ''
try:
uaccount = caller.user
except AttributeError:
caller.msg("This is only applicable for players.")
return
if not uaccount.check_password(oldpass):
caller.msg("The specified old password isn't correct.")
elif len(newpass) < 3:
caller.msg("Passwords must be at least three characters long.")
else:
uaccount.set_password(newpass)
uaccount.save()
caller.msg("Password changed.")
class CmdNick(MuxCommand):
"""
Define a personal alias/nick
Usage:
nick[/switches] <nickname> = [<string>]
alias ''
Switches:
object - alias an object
player - alias a player
clearall - clear all your aliases
list - show all defined aliases (also "nicks" works)
Examples:
nick hi = say Hello, I'm Sarah!
nick/object tom = the tall man
A 'nick' is a personal shortcut you create for your own use. When
you enter the nick, the alternative string will be sent instead.
The switches control in which situations the substitution will
happen. The default is that it will happen when you enter a
command. The 'object' and 'player' nick-types kick in only when
you use commands that requires an object or player as a target -
you can then use the nick to refer to them.
Note that no objects are actually renamed or changed by this
command - the nick is only available to you. If you want to
permanently add keywords to an object for everyone to use, you
need build privileges and to use the @alias command.
"""
key = "nick"
aliases = ["nickname", "nicks", "@nick", "alias"]
locks = "cmd:all()"
def func(self):
"Create the nickname"
caller = self.caller
switches = self.switches
nicks = Nick.objects.filter(db_obj=caller.dbobj).exclude(db_type="channel")
if 'list' in switches:
string = "{wDefined Nicks:{n"
cols = [["Type"],["Nickname"],["Translates-to"] ]
for nick in nicks:
cols[0].append(nick.db_type)
cols[1].append(nick.db_nick)
cols[2].append(nick.db_real)
for ir, row in enumerate(utils.format_table(cols)):
if ir == 0:
string += "\n{w" + "".join(row) + "{n"
else:
string += "\n" + "".join(row)
caller.msg(string)
return
if 'clearall' in switches:
nicks.delete()
caller.msg("Cleared all aliases.")
return
if not self.args or not self.lhs:
caller.msg("Usage: nick[/switches] nickname = [realname]")
return
nick = self.lhs
real = self.rhs
if real == nick:
caller.msg("No point in setting nick same as the string to replace...")
return
# check so we have a suitable nick type
if not any(True for switch in switches if switch in ("object", "player", "inputline")):
switches = ["inputline"]
string = ""
for switch in switches:
oldnick = Nick.objects.filter(db_obj=caller.dbobj, db_nick__iexact=nick, db_type__iexact=switch)
if not real:
# removal of nick
if oldnick:
# clear the alias
string += "\nNick '%s' (= '%s') was cleared." % (nick, oldnick[0].db_real)
caller.nicks.delete(nick, nick_type=switch)
else:
string += "\nNo nick '%s' found, so it could not be removed." % nick
else:
# creating new nick
if oldnick:
string += "\nNick %s changed from '%s' to '%s'." % (nick, oldnick[0].db_real, real)
else:
string += "\nNick set: '%s' = '%s'." % (nick, real)
caller.nicks.add(nick, real, nick_type=switch)
caller.msg(string)
class CmdInventory(MuxCommand):
"""
inventory
Usage:
inventory
inv
Shows your inventory.
"""
key = "inventory"
aliases = ["inv", "i"]
locks = "cmd:all()"
def func(self):
"check inventory"
items = self.caller.contents
if not items:
string = "You are not carrying anything."
else:
# format item list into nice collumns
cols = [[],[]]
for item in items:
cols[0].append(item.name)
desc = item.db.desc
if not desc:
desc = ""
cols[1].append(utils.crop(str(desc)))
# auto-format the columns to make them evenly wide
ftable = utils.format_table(cols)
string = "You are carrying:"
for row in ftable:
string += "\n " + "{C%s{n - %s" % (row[0], row[1])
self.caller.msg(string)
class CmdGet(MuxCommand):
"""
get
Usage:
get <obj>
Picks up an object from your location and puts it in
your inventory.
"""
key = "get"
aliases = "grab"
locks = "cmd:all()"
def func(self):
"implements the command."
caller = self.caller
if not self.args:
caller.msg("Get what?")
return
obj = caller.search(self.args, location=caller.location)
if not obj:
return
if caller == obj:
caller.msg("You can't get yourself.")
return
#print obj, obj.location, caller, caller==obj.location
if caller == obj.location:
caller.msg("You already hold that.")
return
if not obj.access(caller, 'get'):
if obj.db.get_err_msg:
caller.msg(obj.db.get_err_msg)
else:
caller.msg("You can't get that.")
return
obj.move_to(caller, quiet=True)
caller.msg("You pick up %s." % obj.name)
caller.location.msg_contents("%s picks up %s." %
(caller.name,
obj.name),
exclude=caller)
# calling hook method
obj.at_get(caller)
class CmdDrop(MuxCommand):
"""
drop
Usage:
drop <obj>
Lets you drop an object from your inventory into the
location you are currently in.
"""
key = "drop"
locks = "cmd:all()"
def func(self):
"Implement command"
caller = self.caller
if not self.args:
caller.msg("Drop what?")
return
# Because the DROP command by definition looks for items
# in inventory, call the search function using location = caller
results = caller.search(self.args, location=caller, ignore_errors=True)
# now we send it into the error handler (this will output consistent
# error messages if there are problems).
obj = AT_SEARCH_RESULT(caller, self.args, results, False,
nofound_string="You don't carry %s." % self.args,
multimatch_string="You carry more than one %s:" % self.args)
if not obj:
return
obj.move_to(caller.location, quiet=True)
caller.msg("You drop %s." % (obj.name,))
caller.location.msg_contents("%s drops %s." %
(caller.name, obj.name),
exclude=caller)
# Call the object script's at_drop() method.
obj.at_drop(caller)
class CmdGive(MuxCommand):
"""
give away things
Usage:
give <inventory obj> = <target>
Gives an items from your inventory to another character,
placing it in their inventory.
"""
key = "give"
locks = "cmd:all()"
def func(self):
"Implement give"
caller = self.caller
if not self.args or not self.rhs:
caller.msg("Usage: give <inventory object> = <target>")
return
to_give = caller.search(self.lhs)
target = caller.search(self.rhs)
if not (to_give and target):
return
if target == caller:
caller.msg("You keep %s to yourself." % to_give.key)
return
if not to_give.location == caller:
caller.msg("You are not holding %s." % to_give.key)
return
# give object
to_give.location = target
caller.msg("You give %s to %s." % (to_give.key, target.key))
target.msg("%s gives you %s." % (caller.key, to_give.key))
class CmdQuit(MuxCommand):
"""
quit
Usage:
@quit
Gracefully disconnect from the game.
"""
key = "@quit"
locks = "cmd:all()"
def func(self):
"hook function"
for session in self.caller.sessions:
session.msg("{RQuitting{n. Hope to see you soon again.")
session.session_disconnect()
class CmdWho(MuxCommand):
"""
who
Usage:
who
doing
Shows who is currently online. Doing is an alias that limits info
also for those with all permissions.
"""
key = "who"
aliases = "doing"
locks = "cmd:all()"
def func(self):
"""
Get all connected players by polling session.
"""
caller = self.caller
session_list = SESSIONS.get_sessions()
if self.cmdstring == "doing":
show_session_data = False
else:
show_session_data = caller.check_permstring("Immortals") or caller.check_permstring("Wizards")
if show_session_data:
table = [["Player Name"], ["On for"], ["Idle"], ["Room"], ["Cmds"], ["Host"]]
else:
table = [["Player Name"], ["On for"], ["Idle"]]
for session in session_list:
if not session.logged_in:
continue
delta_cmd = time.time() - session.cmd_last_visible
delta_conn = time.time() - session.conn_time
plr_pobject = session.get_character()
if not plr_pobject:
plr_pobject = session.get_player()
show_session_data = False
table = [["Player Name"], ["On for"], ["Idle"]]
if show_session_data:
table[0].append(plr_pobject.name[:25])
table[1].append(utils.time_format(delta_conn, 0))
table[2].append(utils.time_format(delta_cmd, 1))
table[3].append(plr_pobject.location and plr_pobject.location.id or "None")
table[4].append(session.cmd_total)
table[5].append(session.address[0])
else:
table[0].append(plr_pobject.name[:25])
table[1].append(utils.time_format(delta_conn,0))
table[2].append(utils.time_format(delta_cmd,1))
stable = []
for row in table: # prettify values
stable.append([str(val).strip() for val in row])
ftable = utils.format_table(stable, 5)
string = ""
for ir, row in enumerate(ftable):
if ir == 0:
string += "\n" + "{w%s{n" % ("".join(row))
else:
string += "\n" + "".join(row)
nplayers = (SESSIONS.player_count())
if nplayers == 1:
string += '\nOne player logged in.'
else:
string += '\n%d players logged in.' % nplayers
caller.msg(string)
class CmdSay(MuxCommand):
"""
say
Usage:
say <message>
Talk to those in your current location.
"""
key = "say"
aliases = ['"', "'"]
locks = "cmd:all()"
def func(self):
"Run the say command"
caller = self.caller
if not self.args:
caller.msg("Say what?")
return
speech = self.args
# calling the speech hook on the location
speech = caller.location.at_say(caller, speech)
# Feedback for the object doing the talking.
caller.msg('You say, "%s{n"' % speech)
# Build the string to emit to neighbors.
emit_string = '{c%s{n says, "%s{n"' % (caller.name,
speech)
caller.location.msg_contents(emit_string,
exclude=caller)
class CmdPose(MuxCommand):
"""
pose - strike a pose
Usage:
pose <pose text>
pose's <pose text>
Example:
pose is standing by the wall, smiling.
-> others will see:
Tom is standing by the wall, smiling.
Describe an action being taken. The pose text will
automatically begin with your name.
"""
key = "pose"
aliases = [":", "emote"]
locks = "cmd:all()"
def parse(self):
"""
Custom parse the cases where the emote
starts with some special letter, such
as 's, at which we don't want to separate
the caller's name and the emote with a
space.
"""
args = self.args
if args and not args[0] in ["'", ",", ":"]:
args = " %s" % args.strip()
self.args = args
def func(self):
"Hook function"
if not self.args:
msg = "What do you want to do?"
self.caller.msg(msg)
else:
msg = "%s%s" % (self.caller.name, self.args)
self.caller.location.msg_contents(msg)
class CmdEncoding(MuxCommand):
"""
encoding - set a custom text encoding
Usage:
@encoding/switches [<encoding>]
Switches:
clear - clear your custom encoding
This sets the text encoding for communicating with Evennia. This is mostly an issue only if
you want to use non-ASCII characters (i.e. letters/symbols not found in English). If you see
that your characters look strange (or you get encoding errors), you should use this command
to set the server encoding to be the same used in your client program.
Common encodings are utf-8 (default), latin-1, ISO-8859-1 etc.
If you don't submit an encoding, the current encoding will be displayed instead.
"""
key = "@encoding"
aliases = "@encode"
locks = "cmd:all()"
def func(self):
"""
Sets the encoding.
"""
caller = self.caller
if hasattr(caller, 'player'):
caller = caller.player
if 'clear' in self.switches:
# remove customization
old_encoding = caller.db.encoding
if old_encoding:
string = "Your custom text encoding ('%s') was cleared." % old_encoding
else:
string = "No custom encoding was set."
del caller.db.encoding
elif not self.args:
# just list the encodings supported
pencoding = caller.db.encoding
string = ""
if pencoding:
string += "Default encoding: {g%s{n (change with {w@encoding <encoding>{n)" % pencoding
encodings = settings.ENCODINGS
if encodings:
string += "\nServer's alternative encodings (tested in this order):\n {g%s{n" % ", ".join(encodings)
if not string:
string = "No encodings found."
else:
# change encoding
old_encoding = caller.db.encoding
encoding = self.args
caller.db.encoding = encoding
string = "Your custom text encoding was changed from '%s' to '%s'." % (old_encoding, encoding)
caller.msg(string.strip())
class CmdAccess(MuxCommand):
"""
access - show access groups
Usage:
access
This command shows you the permission hierarchy and
which permission groups you are a member of.
"""
key = "access"
aliases = ["groups", "hierarchy"]
locks = "cmd:all()"
def func(self):
"Load the permission groups"
caller = self.caller
hierarchy_full = settings.PERMISSION_HIERARCHY
string = "\n{wPermission Hierarchy{n (climbing):\n %s" % ", ".join(hierarchy_full)
#hierarchy = [p.lower() for p in hierarchy_full]
if self.caller.player.is_superuser:
cperms = "<Superuser>"
pperms = "<Superuser>"
else:
cperms = ", ".join(caller.permissions)
pperms = ", ".join(caller.player.permissions)
string += "\n{wYour access{n:"
string += "\nCharacter {c%s{n: %s" % (caller.key, cperms)
if hasattr(caller, 'player'):
string += "\nPlayer {c%s{n: %s" % (caller.player.key, pperms)
caller.msg(string)
# OOC commands
class CmdOOCLook(MuxCommandOOC, CmdLook):
"""
ooc look
Usage:
look
This is an OOC version of the look command. Since a
Player doesn't have an in-game existence, there is no
concept of location or "self". If we are controlling
a character, pass control over to normal look.
"""
key = "look"
aliases = ["l", "ls"]
locks = "cmd:all()"
help_category = "General"
def func(self):
"implement the ooc look command"
if not self.character:
string = "You are out-of-character (OOC). "
string += "Use {w@ic{n to get back to the game, {whelp{n for more info."
self.caller.msg(string)
else:
self.caller = self.character # we have to put this back for normal look to work.
super(CmdOOCLook, self).func()
class CmdIC(MuxCommandOOC):
"""
Switch control to an object
Usage:
@ic <character>
Go in-character (IC) as a given Character.
This will attempt to "become" a different object assuming you have
the right to do so. Note that it's the PLAYER character that puppets
characters/objects and which needs to have the correct permission!
You cannot become an object that is already controlled by another
player. In principle <character> can be any in-game object as long
as you the player have access right to puppet it.
"""
key = "@ic"
locks = "cmd:all()" # must be all() or different puppeted objects won't be able to access it.
aliases = "@puppet"
help_category = "General"
def func(self):
"""
Simple puppet method
"""
caller = self.caller
old_character = self.character
new_character = None
if not self.args:
new_character = caller.db.last_puppet
if not new_character:
caller.msg("Usage: @ic <character>")
return
if not new_character:
# search for a matching character
new_character = search.objects(self.args, caller)
if new_character:
new_character = new_character[0]
else:
# the search method handles error messages etc.
return
if new_character.player:
if new_character.player == caller:
caller.msg("{RYou already are {c%s{n." % new_character.name)
else:
caller.msg("{c%s{r is already acted by another player.{n" % new_character.name)
return
if not new_character.access(caller, "puppet"):
caller.msg("{rYou may not become %s.{n" % new_character.name)
return
if caller.swap_character(new_character):
new_character.msg("\n{gYou become {c%s{n.\n" % new_character.name)
caller.db.last_puppet = old_character
if not new_character.location:
# this might be due to being hidden away at logout; check
loc = new_character.db.prelogout_location
if not loc: # still no location; use home
loc = new_character.home
new_character.location = loc
if new_character.location:
new_character.location.msg_contents("%s has entered the game." % new_character.key, exclude=[new_character])
new_character.location.at_object_receive(new_character, new_character.location)
new_character.execute_cmd("look")
else:
caller.msg("{rYou cannot become {C%s{n." % new_character.name)
class CmdOOC(MuxCommandOOC):
"""
@ooc - go ooc
Usage:
@ooc
Go out-of-character (OOC).
This will leave your current character and put you in a incorporeal OOC state.
"""
key = "@ooc"
locks = "cmd:all()" # this must be all(), or different puppeted objects won't be able to access it.
aliases = "@unpuppet"
help_category = "General"
def func(self):
"Implement function"
caller = self.caller
if utils.inherits_from(caller, "src.objects.objects.Object"):
caller = self.caller.player
if not caller.character:
string = "You are already OOC."
caller.msg(string)
return
caller.db.last_puppet = caller.character
# save location as if we were disconnecting from the game entirely.
if caller.character.location:
caller.character.location.msg_contents("%s has left the game." % caller.character.key, exclude=[caller.character])
caller.character.db.prelogout_location = caller.character.location
caller.character.location = None
# disconnect
caller.character.player = None
caller.character = None
caller.msg("\n{GYou go OOC.{n\n")
caller.execute_cmd("look")
class CmdColorTest(MuxCommand):
"""
testing colors
Usage:
@color ansi|xterm256
Print a color map along with in-mud color codes, while testing what is supported in your client.
Choices are 16-color ansi (supported in most muds) or the 256-color xterm256 standard.
No checking is done to determine your client supports color - if not you will
see rubbish appear.
"""
key = "@color"
locks = "cmd:all()"
help_category = "General"
def func(self):
"Show color tables"
if not self.args or not self.args in ("ansi", "xterm256"):
self.caller.msg("Usage: @color ansi|xterm256")
return
if self.args == "ansi":
from src.utils import ansi
ap = ansi.ANSI_PARSER
# ansi colors
# show all ansi color-related codes
col1 = ["%s%s{n" % (code, code.replace("{","{{")) for code, _ in ap.ext_ansi_map[:-1]]
hi = "%ch"
col2 = ["%s%s{n" % (code, code.replace("%", "%%")) for code, _ in ap.mux_ansi_map[:-2]]
col3 = ["%s%s{n" % (hi+code, (hi+code).replace("%", "%%")) for code, _ in ap.mux_ansi_map[:-2]]
table = utils.format_table([col1, col2, col3], extra_space=1)
string = "ANSI colors:"
for row in table:
string += "\n" + "".join(row)
print string
self.caller.msg(string)
self.caller.msg("{{X and %%cx are black-on-black)")
elif self.args == "xterm256":
table = [[],[],[],[],[],[],[],[],[],[],[],[]]
for ir in range(6):
for ig in range(6):
for ib in range(6):
# foreground table
table[ir].append("%%c%i%i%i%s{n" % (ir,ig,ib, "{{%i%i%i" % (ir,ig,ib)))
# background table
table[6+ir].append("%%cb%i%i%i%%c%i%i%i%s{n" % (ir,ig,ib,
5-ir,5-ig,5-ib,
"{{b%i%i%i" % (ir,ig,ib)))
table = utils.format_table(table)
string = "Xterm256 colors:"
for row in table:
string += "\n" + "".join(row)
self.caller.msg(string)
self.caller.msg("(e.g. %%c123 and %%cb123 also work)")
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Tract.B08301_001E'
db.add_column('census_tract', 'B08301_001E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_002E'
db.add_column('census_tract', 'B08301_002E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_003E'
db.add_column('census_tract', 'B08301_003E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_004E'
db.add_column('census_tract', 'B08301_004E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_005E'
db.add_column('census_tract', 'B08301_005E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_006E'
db.add_column('census_tract', 'B08301_006E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_007E'
db.add_column('census_tract', 'B08301_007E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_008E'
db.add_column('census_tract', 'B08301_008E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_009E'
db.add_column('census_tract', 'B08301_009E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_010E'
db.add_column('census_tract', 'B08301_010E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_011E'
db.add_column('census_tract', 'B08301_011E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_012E'
db.add_column('census_tract', 'B08301_012E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_013E'
db.add_column('census_tract', 'B08301_013E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_014E'
db.add_column('census_tract', 'B08301_014E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_015E'
db.add_column('census_tract', 'B08301_015E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_016E'
db.add_column('census_tract', 'B08301_016E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_017E'
db.add_column('census_tract', 'B08301_017E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_018E'
db.add_column('census_tract', 'B08301_018E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_019E'
db.add_column('census_tract', 'B08301_019E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_020E'
db.add_column('census_tract', 'B08301_020E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
# Adding field 'Tract.B08301_021E'
db.add_column('census_tract', 'B08301_021E',
self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=12, decimal_places=2, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Tract.B08301_001E'
db.delete_column('census_tract', 'B08301_001E')
# Deleting field 'Tract.B08301_002E'
db.delete_column('census_tract', 'B08301_002E')
# Deleting field 'Tract.B08301_003E'
db.delete_column('census_tract', 'B08301_003E')
# Deleting field 'Tract.B08301_004E'
db.delete_column('census_tract', 'B08301_004E')
# Deleting field 'Tract.B08301_005E'
db.delete_column('census_tract', 'B08301_005E')
# Deleting field 'Tract.B08301_006E'
db.delete_column('census_tract', 'B08301_006E')
# Deleting field 'Tract.B08301_007E'
db.delete_column('census_tract', 'B08301_007E')
# Deleting field 'Tract.B08301_008E'
db.delete_column('census_tract', 'B08301_008E')
# Deleting field 'Tract.B08301_009E'
db.delete_column('census_tract', 'B08301_009E')
# Deleting field 'Tract.B08301_010E'
db.delete_column('census_tract', 'B08301_010E')
# Deleting field 'Tract.B08301_011E'
db.delete_column('census_tract', 'B08301_011E')
# Deleting field 'Tract.B08301_012E'
db.delete_column('census_tract', 'B08301_012E')
# Deleting field 'Tract.B08301_013E'
db.delete_column('census_tract', 'B08301_013E')
# Deleting field 'Tract.B08301_014E'
db.delete_column('census_tract', 'B08301_014E')
# Deleting field 'Tract.B08301_015E'
db.delete_column('census_tract', 'B08301_015E')
# Deleting field 'Tract.B08301_016E'
db.delete_column('census_tract', 'B08301_016E')
# Deleting field 'Tract.B08301_017E'
db.delete_column('census_tract', 'B08301_017E')
# Deleting field 'Tract.B08301_018E'
db.delete_column('census_tract', 'B08301_018E')
# Deleting field 'Tract.B08301_019E'
db.delete_column('census_tract', 'B08301_019E')
# Deleting field 'Tract.B08301_020E'
db.delete_column('census_tract', 'B08301_020E')
# Deleting field 'Tract.B08301_021E'
db.delete_column('census_tract', 'B08301_021E')
models = {
'census.tract': {
'B01001_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_002E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_003E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_004E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_005E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_006E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_007E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_008E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_009E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_010E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_011E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_012E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_013E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_014E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_015E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_016E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_017E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_018E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_019E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_020E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_021E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_022E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_023E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_024E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_025E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_026E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_027E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_028E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_029E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_030E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_031E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_032E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_033E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_034E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_035E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_036E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_037E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_038E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_039E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_040E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_041E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_042E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_043E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_044E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_045E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_046E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_047E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_048E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01001_049E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01002_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B01003_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_002E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_003E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_004E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_005E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_006E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_007E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_008E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_009E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_010E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_011E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_012E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_013E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_014E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_015E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_016E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_017E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_018E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_019E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_020E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_021E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_022E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_023E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_024E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_025E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_026E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_027E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_028E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_029E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_030E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_031E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_032E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_033E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_034E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_035E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_036E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_037E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_038E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_039E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_040E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_041E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_042E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_043E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_044E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_045E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_046E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_047E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_048E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_049E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_050E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_051E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_052E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_053E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_054E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_055E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_056E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_057E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_058E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_059E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_060E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_061E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_062E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_063E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_064E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_065E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_066E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_067E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_068E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_069E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_070E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_071E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_072E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_073E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_074E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_075E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_076E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_077E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_078E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_079E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_080E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_081E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_082E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_083E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_084E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_085E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_086E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_087E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_088E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_089E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_090E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_091E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_092E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_093E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_094E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_095E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_096E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_097E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_098E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_099E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_100E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_101E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_102E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_103E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_104E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_105E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_106E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_107E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B04003_108E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_002E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_003E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_004E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_005E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_006E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_007E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_008E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_009E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_010E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_011E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_012E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_013E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_014E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_015E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_016E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_017E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_018E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_019E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_020E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B08301_021E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B11005_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B11005_002E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_002E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_003E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_004E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_005E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_006E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_007E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_008E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_009E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_010E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_011E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_012E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_013E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_014E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_015E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_016E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19001_017E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B19013_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B25003_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B25003_002E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B25003_003E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B25058_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B25064_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'B25077_001E': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'Meta': {'ordering': "('state', 'county', 'tract')", 'object_name': 'Tract'},
'county': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'tract': ('django.db.models.fields.CharField', [], {'max_length': '12'})
}
}
complete_apps = ['census']
|
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from astropy.cosmology import w0waCDM
def _hacccosmologydict(haccinputdict ) :
cosmologyvariables = ['HUBBLE', 'OMEGA_CDM','DEUT','OMEGA_NU' ,'W_DE','WA_DE', 'SS8', 'NS']
hacccosmologydict = {}
for key in haccinputdict :
if key in cosmologyvariables :
hacccosmologydict[key] = float(haccinputdict[key])
return hacccosmologydict
class FCPL (w0waCDM) :
def __init__ ( self,
H0 = 70. ,
Om0 = 0.3,
Ob0 = 0.02256,
Ok0 = 0.0 ,
ns = 0.96 ,
As = None ,
sigma8 = None ,
w0 = -1.,
wa = 0.,
sigmamnu = 0.0 ,
Tcmb0=2.725,
Neff=3.04,
TFfilelist = [] ,
TFredshiftlist = [],
setvaluesfrom = None ,
name='FCPL'):
from astropy.cosmology import FlatwCDM
tmpmodel = FlatwCDM(
H0 = H0,
Om0 = Om0,
#Ok0 = 0.,
w0 = w0 ,
Tcmb0 = Tcmb0,
Neff =Neff,
name = 'tmp')
Ode0= 1.0 - Om0 - tmpmodel.Ogamma0 - tmpmodel.Onu0 - Ok0
w0waCDM.__init__(self,
H0 = H0,
Om0 = Om0,
Ode0 = Ode0 ,
#Ok0 = 0.,
w0 = w0 ,
wa = wa ,
Tcmb0 = Tcmb0,
Neff =Neff,
name = name)
self._H0 = float(H0)
self._Ob0 = float (Ob0)
self._sigmamnu = float(sigmamnu)
self._As = As
self._sigma8 = sigma8
self._ns = float(ns)
self._providedtransferfiles = {}
self._transferredshifts = None
if len(TFfilelist ) != len(TFredshiftlist) :
print "number of files in list must equal number of redshifts in list\n"
raise
if len(TFredshiftlist) > 0:
for i, z in enumerate(TFredshiftlist) :
self._providedtransferfiles[z] = TFfilelist[i]
self._transferredshifts = sorted (self._providedtransferfiles)
@property
def transferredshifts( self ):
return self._transferredshifts
@property
def transferfilesforredshift(self ) :
return self._providedtransferfiles
@property
def transferfile ( self , z ) :
if redshift in transferredshifts :
return self.transferfilesforredshift( z )
else :
return None
@property
def Ob0(self):
return self._Ob0
@property
def sigmamnu(self) :
return self._sigmamnu
@property
def On0 (self) :
#For now
On0 = self._sigmamnu / 94.0 /self.h /self.h
return On0
@property
def Oc0 (self) :
#print type(self.Om0), self._Ob0 , self._sigmamnu /94.0
Oc0 = self.Om0 - self._Ob0 - self.On0
return Oc0
@property
def As(self):
return self._As
@property
def sigma8 (self) :
return self._sigma8
@property
def ns(self):
return self._ns
def setamplitude(self, sigma8=None, As=None):
print sigma8
print As
self._sigma8 = sigma8
self._As = As
#new methods:
def summary(self ) :
h = self.h
s = "******************************************************\n"
s += "======================Cosmology=======================\n"
s += " h = " + str(self.h) +"\n"
s += "================Relativistic Components===============\n"
s += "\Omega_\gamma = " + str(self.Ogamma0) + "\n"
s += "\Omega_nu_0 = " + str(self.Onu0) + "\n"
s += "------------------------------------------------------\n"
relenergydensity = self.Ogamma0 + self.Onu0
s += "sum = " + str(relenergydensity) + "\n"
s += "\omega_{nu} (massive) = "+ str(self.sigmamnu/94.0) + "\n"
s += "sum of masses (eV ) = "+ str(self.sigmamnu)
s += "sum of masses of nu /94.0/h^2 = "+ str(self.sigmamnu/94.0/h/h) + "\n"
s += "\Omega_{nu} (massive) = "+ str(self.On0) + "\n"
s += "\omega_{cdm} = "+ str(h*h*self.Oc0) + "\n"
s += "\omega_{b} = "+ str(h*h*self.Ob0) + "\n"
s += "\Omega_{cdm} = "+ str(self.Oc0) + "\n"
s += "\Omega_{b} = "+ str(self.Ob0) + "\n"
s += "------------------------------------------------------\n"
#s += "--------------------------------\n"
s += "sum = " + str(self.Oc0 + self.Ob0 + self.On0) +"\n"
s += "should be the same as \Omega_m = " + str(self.Om0) + "\n"
s += "\Omega_{de} = " + str(self.Ode0) + "\n"
s += "\Omega_k = " + str(self.Ok0) + "\n"
s += "------------------------------------------------------\n"
s += "sum of components -1.0 = " + str(self.Oc0 + self.Ob0 + self.On0 +self.Ode0 + relenergydensity -1.) +"\n"
#s += "=======================\n"
s += "=========Primordial Fluctuations =====================\n"
if self.ns !=None:
s += "\\n_s = " + str(self.ns) \
+ "\n"
else:
s += "\\n_s = " + \
"Not provided \n"
if self.sigma8 !=None:
s += "\sigma_8 = " \
+ str(self.sigma8) + "\n"
else:
s += "\sigma_8 = " + \
"Not provided \n"
if self.As !=None:
s += "A_s = " + str(self.As) \
+ "\n"
else:
s += "A_s = " + \
"Not provided \n"
return s
def growth(self, z ):
"""
returns the linear growth function D0, and the derivative D1
of the linear growth function with respect to the scale factor
(CHECK) for the cosmology at redshift z
args:
z: array-like, make sure they are floats not ints
returns:
tuple of D0, and D1 values at the requested redshifts
"""
import growthfunction as gf
h = self.h
omegacb = self.Ob0 + self.Oc0
#print "line 93", type(self.On0), type(h)
omeganuh2 = self.On0 * h * h
avals, D, info = gf.growth(Omegam = omegacb ,
w0 = self.w0 , wa = self.wa , Tcmb = self.Tcmb0 ,
h = self.h , Neff = self.Neff ,
omeganuh2val = omeganuh2 )
D , logD = gf.interp_D( z, avals, D[:,0], D[:,1])
return D, logD
def growthsourced(self, z ):
"""
returns the linear growth function D0, and the derivative D1
of the linear growth function with respect to the scale factor
(CHECK) for the cosmology at redshift z
args:
z: array-like, make sure they are floats not ints
returns:
tuple of D0, and D1 values at the requested redshifts
"""
import growthfunction as gf
h = self.h
omegacb = self.Ob0 + self.Oc0
#print "line 93", type(self.On0), type(h)
omeganuh2 = self.On0 * h * h
avals, D, info = gf.growth(Omegam = omegacb + self.On0,
w0 = self.w0 , wa = self.wa , Tcmb = self.Tcmb0 ,
h = self.h , Neff = self.Neff ,
omeganuh2val = omeganuh2 )
D , logD = gf.interp_D( z, avals, D[:,0], D[:,1])
return D, logD
if __name__=="__main__":
f = FCPL ( H0 = 65, Om0 = 0.99 , w0 = -1.0, wa =0.)
g = FCPL ( H0 = 65, Om0 = 0.8 , w0 = -1.0, wa =0.)
r = FCPL ( H0 = 65, Om0 = 0.3 , w0 = -1.0, wa =0.)
f = FCPL ( H0 = 65, Om0 = 0.99 , w0 = -1.0, wa =0., TFfilelist = ["example_data/cmbM000.tf"], TFredshiftlist = [0.0])
#f.setfromHACCinput("example_data/indat.params")
print "results \n"
print f.sigmamnu
print f.As
print f.sigma8
print f.h
print "H0 ",f.H0
print f.Om0
print "get baryon ", f.Ob0
print "get w0", f.w0
print "get TCMB ", f.Tcmb0
print "transfer function file name ", f.transferfilesforredshift[0.0]
#print "transfer function file name ", f.transferfilesforredshift[0.4]
#import inspect
#print inspect.getmembers(f)
z = np.arange(0.,5., 0.1)
plt.plot ( 1.0/(1.0 + z) , f.growth(z)[0] , 'r', label = "Om0 = 0.99")
plt.plot ( 1.0/(1.0 + z) , g.growth(z)[0] , 'k', label = "Om0 = 0.8")
plt.plot ( 1.0/(1.0 + z) , r.growth(z)[0] , 'b', label = "Om0 = 0.3")
plt.plot ( 1.0/(1.0 + z) ,1.0/(1.0 + z) , ls = 'dashed')
#plt.xlim(0.5,1.0)
plt.legend(loc ="best")
plt.show()
|
|
# @MUNTJAC_COPYRIGHT@
# @MUNTJAC_LICENSE@
"""Defines a text editor component."""
from warnings import warn
from muntjac.ui.abstract_text_field import AbstractTextField
from muntjac.data.property import IProperty
class TextField(AbstractTextField):
"""A text editor component that can be bound to any bindable IProperty.
The text editor supports both multiline and single line modes, default
is one-line mode.
Since C{TextField} extends C{AbstractField} it implements the
L{IBuffered} interface. A C{TextField} is in write-through mode by default,
so L{AbstractField.setWriteThrough} must be called to enable buffering.
@author: Vaadin Ltd.
@author: Richard Lincoln
@version: @VERSION@
"""
CLIENT_WIDGET = None #ClientWidget(VTextField, LoadStyle.EAGER)
def __init__(self, *args):
"""Constructs a C{TextField} with optional caption, dataSource and/or
value.
@param args: tuple of the form
- ()
- (caption)
1. the caption string for the editor
- (dataSource)
1. the IProperty to be edited with this editor
- (caption, dataSource)
1. the caption string for the editor
2. the IProperty to be edited with this editor
- (caption, text)
1. the caption string for the editor
2. the initial text content of the editor
"""
#: Tells if input is used to enter sensitive information that is not
# echoed to display. Typically passwords.
self._secret = False
#: Number of visible rows in a multiline TextField. Value 0 implies a
# single-line text-editor.
self._rows = 0
#: Tells if word-wrapping should be used in multiline mode.
self._wordwrap = True
nargs = len(args)
if nargs == 0:
super(TextField, self).__init__()
self.setValue('')
elif nargs == 1:
if isinstance(args[0], IProperty):
super(TextField, self).__init__()
dataSource, = args
self.setPropertyDataSource(dataSource)
else:
caption, = args
TextField.__init__(self)
self.setCaption(caption)
elif nargs == 2:
if isinstance(args[1], IProperty):
caption, dataSource = args
TextField.__init__(self, dataSource)
self.setCaption(caption)
else:
super(TextField, self).__init__()
caption, value = args
self.setValue(value)
self.setCaption(caption)
else:
raise ValueError, 'too many arguments'
def isSecret(self):
"""Gets the secret property. If a field is used to enter secret
information the information is not echoed to display.
@return: C{True} if the field is used to enter secret
information, C{False} otherwise.
@deprecated: Use L{PasswordField} instead for secret text input.
"""
warn('use PasswordField instead', DeprecationWarning)
return self._secret
def setSecret(self, secret):
"""Sets the secret property on and off. If a field is used to enter
secret information the information is not echoed to display.
@param secret:
the value specifying if the field is used to enter secret
information.
@deprecated: Use L{PasswordField} instead for secret text input.
"""
warn('use PasswordField instead', DeprecationWarning)
if self._secret != secret:
self._secret = secret
self.requestRepaint()
def paintContent(self, target):
if self.isSecret():
target.addAttribute('secret', True)
rows = self.getRows()
if rows != 0:
target.addAttribute('rows', rows)
target.addAttribute('multiline', True)
if not self.isWordwrap():
# Wordwrap is only painted if turned off to minimize
# communications
target.addAttribute('wordwrap', False)
super(TextField, self).paintContent(target)
def getRows(self):
"""Gets the number of rows in the editor. If the number of rows is set
to 0, the actual number of displayed rows is determined implicitly by
the adapter.
@return: number of explicitly set rows.
@deprecated: Use L{TextArea} for a multi-line text input.
"""
warn('use TextArea for a multi-line text input', DeprecationWarning)
return self._rows
def setRows(self, rows):
"""Sets the number of rows in the editor.
@param rows:
the number of rows for this editor.
@deprecated: Use L{TextArea} for a multi-line text input.
"""
warn('use TextArea for a multi-line text input', DeprecationWarning)
if rows < 0:
rows = 0
if self._rows != rows:
self._rows = rows
self.requestRepaint()
def isWordwrap(self):
"""Tests if the editor is in word-wrap mode.
@return: C{True} if the component is in the word-wrap mode,
C{False} if not.
@deprecated: Use L{TextArea} for a multi-line text input.
"""
warn('use TextArea for a multi-line text input', DeprecationWarning)
return self._wordwrap
def setWordwrap(self, wordwrap):
"""Sets the editor's word-wrap mode on or off.
@param wordwrap:
the boolean value specifying if the editor should be in
word-wrap mode after the call or not.
@deprecated: Use L{TextArea} for a multi-line text input.
"""
warn('use TextArea for a multi-line text input', DeprecationWarning)
if self._wordwrap != wordwrap:
self._wordwrap = wordwrap
self.requestRepaint()
def setHeight(self, height, unit=None):
"""Sets the height of the L{TextField} instance.
Setting height for L{TextField} also has a side-effect that puts
L{TextField} into multiline mode (aka "textarea"). Multiline mode
can also be achieved by calling L{setRows}. The height
value overrides the number of rows set by L{setRows}.
If you want to set height of single line L{TextField}, call
L{setRows} with value 0 after setting the height. Setting
rows to 0 resets the side-effect.
You should use L{TextArea} instead of L{TextField} for multiline
text input.
"""
if unit is None:
# will call setHeight(float, int) the actually does the magic.
# Method is overridden just to document side-effects.
super(TextField, self).setHeight(height)
else:
super(TextField, self).setHeight(height, unit)
if height > 1 and self.__class__ == TextField:
# In html based terminals we most commonly want to make
# component to be textarea if height is defined. Setting row
# field above 0 will render component as textarea.
self.setRows(2)
|
|
import nrrd
import json
import numpy as np
import pickle
import os
import scipy.stats as stats
from scipy.optimize import fsolve
from scipy.special import iv
from scipy.special import factorial2, factorial
from scipy.special import hyp1f1
import matplotlib.pyplot as plt
def rician_eqn(p, measured_mean, measured_variance):
A, sigma = p
nu = A**2/ (4.0 * sigma**2.0)
b = (1+2.0*nu)*iv(0, nu) + 2.0*nu*iv(1,nu)
mean = sigma *np.sqrt(np.pi/2.0)*np.exp(-nu)*(b) - measured_mean
var = A + 2.0*sigma**2.0 - np.pi*sigma**2.0/2.0*np.exp(-2.0*nu)*b**2.0 - measured_variance
return (mean, var)
def beta_N(N):
""" return the Beta_N function
@param: N the number of MRA channels
"""
return np.sqrt(0.5*np.pi)*factorial2(2*N-1) / ( (2**(N-1)) * factorial(N-1) )
def xi(theta, N):
""" returns the correction factor of the multi-channel MRI signal
@param: theta the SNR of the guassian
@param: N the number of MRA channels
"""
return 2.0*N + theta**2 - (beta_N(N)**2.0*(hyp1f1(-0.5, N, -0.5*theta**2))**2)
def g_theta(theta, N, r):
""" returns the guassian SNR value as a function of itself
@param: theta the guassian SNR
@param: N the number of MRA channels
@param: r the measure signal to noise ratio
"""
return np.sqrt(xi(theta, N)*(1.0 + r**2.0) - 2.0*N)
def koay_next(t_n, N, r):
""" returns the n+1 guassian SNR value given an estimate
@param: t_n estimate of the guassian SNR
@param: N the number of MRA channels
@param: r the measure signal to noise ratio
"""
g_n = g_theta(t_n, N, r)
b_n = beta_N(N)
f1_a = hyp1f1(-0.5, N, -0.5*t_n**2.0)
f1_b = hyp1f1(0.5, N+1, -0.5*t_n**2.0)
return t_n - (g_n*(g_n - t_n) ) / (t_n*(1.0+r**2.0)*(1.0 - (0.5*b_n**2.0 / N) * f1_a * f1_b) - g_n)
def koay_test(M, s_r, theta):
# doesn't work
l_term = (1.0+0.5*theta**2.0)*iv(0, 0.25*theta**2.0) + 0.5*theta**2.0*iv(1, 0.25*theta**2.0)
psi = 0.5*(np.sqrt(0.5*np.pi)*np.exp(-0.25*theta**2.0)*(l_term))
s_g_m = M / psi
xi_root = np.sqrt( theta**2.0 + 2.0 - 0.125*np.pi*np.exp(-0.5*theta**2.0)*(l_term**2.0))
s_g_s = s_r / xi_root
return s_g_m, s_g_s
def koay_test2(M, s_r, theta, N):
l_term = hyp1f1(-0.5, N, -0.5*theta**2)
beta_l_term = (beta_N(N)*l_term)
s_g_m = M / (beta_l_term)
xi_root = np.sqrt( 2.0*N + theta**2.0 - beta_N(N)**2.0*l_term**2.0 )
s_g_s = s_r / xi_root
#s_g_new = max(s_g_s, s_g_m) # get the maximum deviation
#M_g_new = s_g_new * beta_l_term
return s_g_m, s_g_s
def koay_test3(M, s_r, theta, N):
l_term = hyp1f1(-0.5, N, -0.5*theta**2)
xi = 2.0*N + theta**2.0 - beta_N(N)**2.0*l_term**2.0
M_n_new = np.sqrt( M**2.0 + (1.0 - 2.0*N/xi)*s_r**2.0)
s_g_new = M_n_new / theta
#s_g_new = max(s_g_s, s_g_m) # get the maximum deviation
#M_g_new = s_g_new * beta_l_term
return M_n_new, s_g_new
def lower_bound(N):
""" return the lower bound of the estimation
@param: N the number of MRA channels
"""
return np.sqrt(2.0*N / xi(0.0, N) - 1.0)
def newton_koay(r, N, iterations=500, tolerance = 1.0E-9):
""" returns newton iteration solve to the Koay derived noise estimator
@param: r the measured signal to noise ratio
@param: N the number of MRA channels
@param: iterations the maximum iterations for the newton solve
@param tolerance the numerical tolerance of the newton iterations
"""
#
#https://www.sciencedirect.com/science/article/pii/S109078070600019X/
it_default = np.copy(iterations)
lb = lower_bound(N)
if (np.isscalar(r)):
if (r <= lb):
t_1 = 0.0
err = 0.0
else:
t_0 = r - lb # initial guess of the guassian SNR theta
t_1 = koay_next(t_0, N, r)
err = np.absolute( t_1 - t_0)
while (err > tolerance and iterations >= 0):
t_0 = np.copy(t_1)
t_1 = koay_next(t_0, N, r)
err = np.absolute( t_1 - t_0)
iterations -= 1
if (iterations < 0):
print("{0} iterations before reaching error tolerance, error: {1} tolerance:{2}".format(it_default, err, tolerance ))
#else:
#t_1 = np.empty(r.shape)
#err = np.ones(r.shape)
#indx = np.where(r <= lb)
#t_1[indx] = 0.0
#t_0 = r - lb
#t_1[indx] = koay_next(t_0[indx], N, r[indx])
#while (err.any() > tolerance and iterations >= 0):
#t_0 = np.copy(t_1)
#t_1[indx] = koay_next(t_0[indx], N, r[indx])
#err = np.absolute( t_1 - t_0)
#iterations -= 1
#if (iterations < 0):
#print("{0} iterations before reaching error tolerance, error: {1} tolerance:{2}".format(it_default, err, tolerance ))
return t_1, err
def bootstrap_resample(X, n=None, percent=0.01):
""" Bootstrap resample an array_like
Parameters
----------
X : numpy array_like
data to resample
n : int, optional
length of resampled array, equal to len(X) if n==None
Results
percent: float, optional
use a percentage of the data for the resample
-------
returns X_resamples based on percentage or number of samples
defaults to the percentage
p_n the number of sample used
"""
if (n == None and percent == None):
p_n = np.floor(0.01*X.shape[0])
else:
if ( n == None):
p_n = np.floor(percent*X.shape[0]).astype(int)
else:
p_n = n
#print(n, X.shape)
X_resample = np.random.choice(X,p_n) # sampling with replacement
return X_resample, p_n
def VWI_Enhancement(post, pre, mean_post_vent, mean_pre_vent, kind = "E1",
std_post_vent = None, std_pre_vent = None, return_parts = False):
""" calculate enhancement
Parameters
----------
post : numpy array_like post contrast VWI
pre : numpy array_like pre contrast VWI
mean_post_vent : mean of post contrast ventricle
mean_pre_vent : mean of pre contrast ventricle
kind : which enhancement to calculate
std_post_vent : mean of post contrast ventricle
std_pre_vent : mean of pre contrast ventricle
-------
returns the enhancement calculation, numpy array_like
"""
if kind == "E1":
#"E = xi_vent / eta_vent * eta - xi"
post_ = (mean_pre_vent / mean_post_vent * post)
pre_ = pre
elif kind == "E2":
#"E = eta / eta_vent - xi / xi_vent"
post_ = (post / mean_post_vent)
pre_ = (pre / mean_pre_vent)
elif kind == "E3":
#"E = ( eta - mean_eta_vent) / stddev(eta_vent) - (xi - mean_xi_vent) / stddev(xi_vent)"
post_ = (post - mean_post_vent) / std_post_vent
pre_ = (pre - mean_pre_vent) / std_pre_vent
elif kind == "E5":
# ratio of normalized things, similar to E3
E = ( std_pre_vent / std_post_vent ) * (post - mean_post_vent) / (pre - mean_pre_vent)
return E
elif kind == "E4":
# ratio of normalized things, similar to E3
num = np.sqrt(std_post_vent**2 + std_pre_vent**2)
post_ = (post - mean_post_vent) / num
pre_ = (pre - mean_pre_vent) / num
else:
print("undefined enhancement kind")
return 0.0
E = post_ - pre_
if return_parts:
return E, post_, pre_
else:
return E
#def VWI_Uncertainty(kind = "E1", **kwargs):
#""" calculate enhancement
#Parameters
#----------
#post : numpy array_like post contrast VWI
#pre : numpy array_like pre contrast VWI
#mean_post_vent : mean of post contrast ventricle
#mean_pre_vent : mean of pre contrast ventricle
#kind : which enhancement to calculate
#std_post_vent : mean of post contrast ventricle
#std_pre_vent : mean of pre contrast ventricle
#-------
#returns the enhancement calculation, numpy array_like
#"""
#if kind == "E1":
##"E = xi_vent / eta_vent * eta - xi"
#E = (mean_pre_vent / mean_post_vent * post) - pre
#elif kind == "E2":
##"E = eta / eta_vent - xi / xi_vent"
#E = (post / mean_post_vent) - (pre / mean_pre_vent)
#elif kind == "E3":
##"E = ( eta - mean_eta_vent) / stddev(eta_vent) - (xi - mean_xi_vent) / stddev(xi_vent)"
#E = ( (post - mean_post_vent) / std_post_vent) - ( (pre - mean_pre_vent) / std_pre_vent )
#elif kind == "E4":
## ratio of normalized things, similar to E3
#E = ( std_pre_vent / std_post_vent ) * (post - mean_post_vent) / (pre - mean_pre_vent)
#else:
#print("undefined enhancement kind")
#return 0.0
#return E
def gumbel_r_fit(input_data, n_pts=1000, tail=0.05):
# takes row or columnar format
param = stats.gumbel_r.fit(input_data)
arg = param[:-2]
loc = param[-2]
scale = param[-1]
# Get sane start and end points of distribution
start = stats.gumbel_r.ppf(tail, *arg, loc=loc, scale=scale) if arg else stats.gumbel_r.ppf(tail, loc=loc, scale=scale)
end = stats.gumbel_r.ppf(1.0 - tail, *arg, loc=loc, scale=scale) if arg else stats.gumbel_r.ppf(1.0 - tail, loc=loc, scale=scale)
x_test = np.linspace(start, end, n_pts)
pdf_fitted = stats.gumbel_r.pdf(x_test, *arg, loc=loc, scale=scale)
return pdf_fitted, x_test
#vars
conf = 2.0 #95% confidence interval
percent_boot = 0.01
font_size = 10
figure_1 = True
figure_2 = True
json_file = "/home/sansomk/caseFiles/mri/VWI_proj/step3_PI.json"
pickle_file = "/home/sansomk/caseFiles/mri/VWI_proj/step3_pickle_PI.pkl"
write_file_dir = "/home/sansomk/caseFiles/mri/VWI_proj"
write_dir = "VWI_analysis"
plots_dir = "plots"
overwrite = 0
overwrite_out = False
skip_bootstrap = True
skip_write = False
with open(json_file, 'r') as f:
data = json.load(f)
labels = ["post_float","pre_float", "VWI_post_masked_vent", "VWI_post_vent",
"pre2post", "level_set", "VWI_pre2post_masked_vent",
"VWI_background_post_masked", "VWI_background_post",
"VWI_background_pre_masked", "VWI_background_pre",
"model-label_cropped", "model_post_masked",
"model_pre2post_masked", "VWI_post_float_cropped", "VWI_background_intersection",
"VWI_background_post_intersection", "VWI_background_pre_intersection"]
subset_labels = ["VWI_post_masked_vent", "VWI_pre2post_masked_vent",
"VWI_background_post_masked", "VWI_background_pre_masked",
"model_post_masked", "model_pre2post_masked"]
groups = [("post_float", "pre2post"),
("VWI_post_masked_vent", "VWI_pre2post_masked_vent"),
("model_post_masked", "model_pre2post_masked"),
("VWI_background_post_masked", "VWI_background_pre_masked"),
("VWI_background_post_intersection", "VWI_background_pre_intersection"),
("VWI_post_PI_masked", "VWI_pre2post_PI_masked")
]
image_dict = {}
if ((not os.path.exists(pickle_file)) or overwrite == 1):
for case_id, images in data.items():
image_dict[case_id] = {}
print(case_id)
for post_label, pre_label in groups:
#print(pre_label, post_label)
pre_path = images[pre_label]
post_path = images[post_label]
#vwi_mask, vwi_mask_header = nrrd.read(vwi_mask_path)
image_tuple_pre = nrrd.read(pre_path)
image_tuple_post = nrrd.read(post_path)
image_dict[case_id][pre_label] = image_tuple_pre
image_dict[case_id][post_label] = image_tuple_post
pickle.dump(image_dict, open(pickle_file, "wb"))
else:
with open(pickle_file, "rb") as pkl_f:
image_dict = pickle.load(pkl_f)
pickle_time = os.path.getmtime(pickle_file)
dump = False
for case_id, images in data.items():
print(case_id)
for post_label, pre_label in groups:
#print(pre_label, post_label)numpy ones like
pre_path = images[pre_label]
pre_time = os.path.getmtime(pre_path)
post_path = images[post_label]
post_time = os.path.getmtime(post_path)
#vwi_mask, vwi_mask_header = nrrd.read(vwi_mask_path)
if ( pre_label not in image_dict[case_id].keys() or pre_time > pickle_time):
image_tuple_pre = nrrd.read(pre_path)
image_dict[case_id][pre_label] = image_tuple_pre
dump = True
if (post_label not in image_dict[case_id].keys() or post_time > pickle_time):
image_tuple_post = nrrd.read(post_path)
image_dict[case_id][post_label] = image_tuple_post
dump = True
if (dump ):
pickle.dump(image_dict, open(pickle_file, "wb"))
#test = np.linspace(1,64,64, dtype=np.int)
#lb_test = lower_bound(test)
#plt.plot(test, lb_test)
#plt.show()
channels = int(1)
image_path_list = []
bootstrap_fig_list = []
for case_id, case_imgs in image_dict.items():
img_post_vent = case_imgs["VWI_post_masked_vent"][0][case_imgs["VWI_post_masked_vent"][0] >= 0.0]
img_post_back = case_imgs["VWI_background_post_masked"][0][case_imgs["VWI_background_post_masked"][0] >= 0.0]
img_post_model = case_imgs["model_post_masked"][0][case_imgs["model_post_masked"][0] >= 0.0]
img_post_back_inter = case_imgs["VWI_background_post_intersection"][0][case_imgs["VWI_background_post_intersection"][0] >= 0.0]
img_pre_vent = case_imgs["VWI_pre2post_masked_vent"][0][case_imgs["VWI_pre2post_masked_vent"][0] >= 0.0]
img_pre_back = case_imgs["VWI_background_pre_masked"][0][case_imgs["VWI_background_pre_masked"][0] >= 0.0]
img_pre_model = case_imgs["model_pre2post_masked"][0][case_imgs["model_pre2post_masked"][0] >= 0.0]
img_pre_back_inter = case_imgs["VWI_background_pre_intersection"][0][case_imgs["VWI_background_pre_intersection"][0] >= 0.0]
eta = case_imgs["post_float"][0][case_imgs["post_float"][0] >= 0.0]
xi = case_imgs["pre2post"][0][case_imgs["pre2post"][0] >= 0.0]
non_zero_indx = np.where(case_imgs["post_float"][0] >= 0.0)
#zero_indx = np.where(case_imgs["post_float"][0] < 0.0)
eta_model = case_imgs["model_post_masked"][0][case_imgs["model_post_masked"][0] >= 0.0]
xi_model = case_imgs["model_pre2post_masked"][0][case_imgs["model_pre2post_masked"][0] >= 0.0]
eta_PI = case_imgs["VWI_post_PI_masked"][0][case_imgs["VWI_post_PI_masked"][0] >= 0.0]
xi_PI = case_imgs["VWI_pre2post_PI_masked"][0][case_imgs["VWI_pre2post_PI_masked"][0] >= 0.0]
#scale_factor = np.sqrt(2.0-np.pi/2.0)
#post_back_noise = np.mean(img_post_back) / np.sqrt(np.pi/2.0) # rayleigh distribution
#pre_back_noise = np.mean(img_pre_back) / np.sqrt(np.pi/2.0) # rayleigh distribution
back_std_pre = np.std(img_pre_back)
back_std_post = np.std(img_post_back)
print(case_id, "post vent MEAN: {0:.4f} pre vent MEAN {1:.4f}".format(np.mean(img_post_vent), np.mean(img_pre_vent)))
print(case_id, "post vent STD: {0:.4f} pre vent STD {1:.4f}".format(np.std(img_post_vent) , np.std(img_pre_vent) ))
print(case_id, "post back MEAN: {0:.4f} pre back MEAN {1:.4f}".format(np.mean(img_post_back) , np.mean(img_pre_back) ))
print(case_id, "post inter MEAN: {0:.4f} pre inter MEAN {1:.4f}".format(np.mean(img_post_back_inter) , np.mean(img_pre_back_inter) ))
#print(case_id, "post vent inter shape: {0} pre vent inter shape {1}".format(img_post_back_inter.shape ,img_pre_back_inter.shape ))
print(case_id, "post back STD: {0:.4f} pre back STD {1:.4f}".format(back_std_post, back_std_pre))
print(case_id, "post inter STD: {0:.4f} pre inter STD {1:.4f}".format(np.std(img_post_back_inter) , np.std(img_pre_back_inter) ))
print(case_id, "post PI Mean: {0:.4f} pre PI mean {1:.4f}".format(np.mean(eta_PI), np.mean(xi_PI)))
print(case_id, "post PI STD: {0:.4f} pre PI STD {1:.4f}".format(np.std(eta_PI), np.std(xi_PI)))
#koay_result_post, err = newton_koay(SNR_post_vent, channels)
## ventricle portion
cov_vent = np.cov(np.vstack((img_post_vent, img_pre_vent)))
N_vent = float(img_post_vent.shape[0])
cov_back = np.cov(np.vstack((img_post_back_inter, img_pre_back_inter)))
#print(np.sqrt(2.0*np.trace(cov_vent) - np.sum(cov_vent)))
eta_vent = np.mean(img_post_vent) # mean ventricle post
xi_vent = np.mean(img_pre_vent) # mean ventricle pre
u_eta_vent_2 = cov_vent[0,0] #/ N_vent # uncertainty vent post square
u_xi_vent_2 = cov_vent[1,1] #/ N_vent # uncertainty vent pre square
u_eta_xi_vent = cov_vent[0,1] #/ N_vent # covariance between pre and post
#print(np.sqrt(2.0*np.trace(cov_vent) - np.sum(cov_vent)))
u_eta_back_2 = cov_back[0,0] # uncertainty in post measures square
u_xi_back_2 = cov_back[1,1] # uncertainty in pre measures square
u_eta_xi_back = cov_back[0,1] # covariance estimate
eta_vent_term = np.square((xi_vent / (eta_vent**2)) * eta) * u_eta_vent_2
xi_vent_term = np.square( eta / eta_vent) * u_xi_vent_2
eta_term = np.square(xi_vent / eta_vent) * u_eta_back_2
xi_term = u_xi_back_2
eta_xi_term = -2.0 * xi_vent / eta_vent * u_eta_xi_back
eta_xi_vent_term = -2.0 * xi_vent / (eta_vent ** 3.0) * np.square(eta) * u_eta_xi_vent
E = VWI_Enhancement(eta, xi, 1.0, 1.0, kind = "E1")
E1 = VWI_Enhancement(eta, xi, eta_vent, xi_vent, kind = "E1")
E2 = VWI_Enhancement(eta, xi, eta_vent, xi_vent, kind = "E2")
E3 = VWI_Enhancement(eta, xi, eta_vent, xi_vent, kind = "E3",
std_post_vent = np.sqrt(u_eta_vent_2),
std_pre_vent = np.sqrt(u_xi_vent_2))
E4 = VWI_Enhancement(eta, xi, eta_vent, xi_vent, kind = "E4",
std_post_vent = np.sqrt(u_eta_vent_2),
std_pre_vent = np.sqrt(u_xi_vent_2))
#E4 = VWI_Enhancement(eta, xi, eta_vent, xi_vent, kind = "E4",
#std_post_vent = np.sqrt(u_eta_vent_2),
#std_pre_vent = np.sqrt(u_xi_vent_2))
#E4_param = stats.cauchy.fit(E4)
#print("cauchy model params", E4_param)
## Separate parts of parameters
#arg = E4_param[:-2]
#loc = E4_param[-2]
#scale = E4_param[-1]
## Get sane start and end points of distribution
#start = stats.cauchy.ppf(0.01, *arg, loc=loc, scale=scale) if arg else stats.cauchy.ppf(0.05, loc=loc, scale=scale)
#end = stats.cauchy.ppf(0.99, *arg, loc=loc, scale=scale) if arg else stats.cauchy.ppf(0.95, loc=loc, scale=scale)
#E4_x_test = np.linspace(start, end, 30000)
#E4_pdf_fitted = stats.cauchy.pdf(E4_x_test, *arg, loc=loc, scale=scale)
E_model, E_post_model, E_pre_model = VWI_Enhancement(eta_model, xi_model,
1.0, 1.0, kind = "E1", return_parts=True)
E1_model, E1_post_model, E1_pre_model = VWI_Enhancement(eta_model, xi_model,
eta_vent, xi_vent, kind = "E1", return_parts=True)
E2_model, E2_post_model, E2_pre_model = VWI_Enhancement(eta_model, xi_model,
eta_vent, xi_vent, kind = "E2", return_parts=True)
E3_model, E3_post_model, E3_pre_model = VWI_Enhancement(eta_model, xi_model,
eta_vent, xi_vent, kind = "E3",
std_post_vent = np.sqrt(u_eta_vent_2),
std_pre_vent = np.sqrt(u_xi_vent_2),
return_parts=True)
E4_model, E4_post_model, E4_pre_model = VWI_Enhancement(eta_model, xi_model,
eta_vent, xi_vent, kind = "E4",
std_post_vent = np.sqrt(u_eta_vent_2),
std_pre_vent = np.sqrt(u_xi_vent_2),
return_parts=True)
E_PI, E_post_PI, E_pre_PI = VWI_Enhancement(eta_PI, xi_PI,
1.0, 1.0, kind = "E1", return_parts=True)
try:
# Create target Directory
test = os.path.join(write_file_dir, case_id, plots_dir)
os.mkdir(test)
print("Directory " , test , " Created ")
except FileExistsError:
print("Directory " , test , " already exists")
gs4 = plt.GridSpec(3,2, wspace=0.2, hspace=0.8)
fig4 = plt.figure(figsize=(17, 13))
ax1_4 = fig4.add_subplot(gs4[0, :])
ax1_4.set_title("Compare E pre vs post", fontsize = font_size)
ax1_4.scatter(E_pre_model.ravel(), E_post_model.ravel())
ax1_4.set_xlabel(r'pre $\xi$', fontsize = font_size)
ax1_4.set_ylabel(r'post $\eta$', fontsize = font_size)
ax2_4 = fig4.add_subplot(gs4[1, 0])
ax2_4.set_title("Compare E1 pre vs post", fontsize = font_size)
ax2_4.scatter(E1_pre_model.ravel(), E1_post_model.ravel())
ax2_4.set_xlabel(r'pre $\xi$', fontsize = font_size)
ax2_4.set_ylabel(r'post $\frac{ \bar{\xi}_{vent}}{ \bar{\eta}_{vent}} \eta$', fontsize = font_size)
ax3_4 = fig4.add_subplot(gs4[1, 1])
ax3_4.set_title("Compare E2 pre vs post", fontsize = font_size)
ax3_4.scatter(E2_pre_model.ravel(), E2_post_model.ravel())
ax3_4.set_xlabel(r'pre $\frac{\xi}{\bar{\xi}_{vent}}$', fontsize = font_size)
ax3_4.set_ylabel(r'post $\frac{\eta}{\bar{\eta}_{vent}}$', fontsize = font_size)
ax4_4 = fig4.add_subplot(gs4[2, 0])
ax4_4.set_title("Compare E3 pre vs post", fontsize = font_size)
ax4_4.scatter(E3_pre_model.ravel(), E3_post_model.ravel())
ax4_4.set_xlabel(r'pre $ \frac{\xi - \bar{\xi}_{vent}}{ \sigma_{ \xi_{vent} } }$', fontsize = font_size)
ax4_4.set_ylabel(r'post $\frac{\eta - \bar{\eta}_{vent}}{\sigma_{ \eta_{vent} } }$', fontsize = font_size)
ax5_4 = fig4.add_subplot(gs4[2, 1])
ax5_4.set_title("Compare E4 pre vs post", fontsize = font_size)
ax5_4.scatter(E4_pre_model.ravel(), E4_post_model.ravel())
ax5_4.set_xlabel(r'pre $ \frac{\xi - \bar{\xi}_{vent}}{ \sqrt{\sigma^2_{ \eta_{vent}} + \sigma^2_{ \xi_{vent} } } }$', fontsize = font_size)
ax5_4.set_ylabel(r'post $\frac{\eta - \bar{\eta}_{vent}}{\sqrt{\sigma^2_{ \eta_{vent}} + \sigma^2_{ \xi_{vent} } } }$', fontsize = font_size)
#ax5_4 = fig4.add_subplot(gs4[1, 1])
#ax5_4.set_title("Compare PI pre vs post", fontsize = font_size)
#ax5_4.scatter(E3_pre_model.ravel(), E3_post_model.ravel())
#ax5_4.set_xlabel(r'pre $\xi$', fontsize = font_size)
#ax5_4.set_ylabel(r'pre $\eta$', fontsize = font_size)
path_E3_model = os.path.join(write_file_dir, case_id, plots_dir, "Compare_Enhancement_model.png")
fig4.savefig(path_E3_model)
del fig4
gs5 = plt.GridSpec(2,2, wspace=0.2, hspace=0.8)
fig5 = plt.figure(figsize=(13, 13))
ax1_5 = fig5.add_subplot(gs5[:, 0])
ax1_5.set_title("Compare ventricle pre vs post", fontsize = font_size)
ax1_5.scatter(img_pre_vent, img_post_vent)
ax1_5.axis('equal')
ax1_5.set_xlabel(r'pre $\xi_{ventricle}$', fontsize = font_size)
ax1_5.set_ylabel(r'post $\eta_{ventricle}$', fontsize = font_size)
ax2_5 = fig5.add_subplot(gs5[:, 1])
ax2_5.set_title("Compare Pituitary Infindibulum pre vs post", fontsize = font_size)
ax2_5.scatter(xi_PI, eta_PI)
ax2_5.axis('equal')
ax2_5.set_xlabel(r'pre $\xi_{ventricle}$', fontsize = font_size)
ax2_5.set_ylabel(r'post $\eta_{ventricle}$', fontsize = font_size)
path_ventricle_model = os.path.join(write_file_dir, case_id, plots_dir, "Compare_PI_Ventricle_data.png")
fig5.savefig(path_ventricle_model)
del fig5
# create histogram plots of ventricle and model
n_bins2 = 4000
n_bins3 = 100
gs2 = plt.GridSpec(5,2, wspace=0.2, hspace=0.8)
#gs2 = plt.GridSpec(4,4, wspace=0.2, hspace=0.8)
# Create a figure
fig2 = plt.figure(figsize=(17, 13))
# SUBFIGURE 1
# Create subfigure 1 (takes over two rows (0 to 1) and column 0 of the grid)
ax1a_2 = fig2.add_subplot(gs2[0, 0])
ax1a_2.set_title("{0}: $E$ Volume".format(case_id), fontsize = font_size)
ax1a_2.set_ylabel("count", fontsize = font_size)
#ax1a_2.set_xlabel("Enhancement", fontsize = font_size)
ax1b_2 = fig2.add_subplot(gs2[0,1])
ax1b_2.set_title("{0}: $E$ Volume".format(case_id), fontsize = font_size)
#ax1b_2.set_ylabel("count", fontsize = font_size)
ax2a_2 = fig2.add_subplot(gs2[1, 0])
ax2a_2.set_title("{0}: $E_1$ Volume".format(case_id), fontsize = font_size)
ax2a_2.set_ylabel("count", fontsize = font_size)
ax2b_2 = fig2.add_subplot(gs2[1, 1])
ax2b_2.set_title("{0}: $E_1$ Volume".format(case_id), fontsize = font_size)
ax3a_2 = fig2.add_subplot(gs2[2, 0])
ax3a_2.set_title("{0}: $E_2$ Volume".format(case_id), fontsize = font_size)
ax3a_2.set_ylabel("count", fontsize = font_size)
ax3b_2 = fig2.add_subplot(gs2[2, 1])
ax3b_2.set_title("{0}: $E_2$ Volume".format(case_id), fontsize = font_size)
ax4a_2 = fig2.add_subplot(gs2[3, 0])
ax4a_2.set_title("{0}: $E_3$ Volume".format(case_id), fontsize = font_size)
ax4a_2.set_ylabel("count", fontsize = font_size)
ax4b_2 = fig2.add_subplot(gs2[3, 1])
ax4b_2.set_title("{0}: $E_3$ Volume".format(case_id), fontsize = font_size)
ax5a_2 = fig2.add_subplot(gs2[3, 0])
ax5a_2.set_title("{0}: $E_4$ Volume".format(case_id), fontsize = font_size)
ax5a_2.set_ylabel("count", fontsize = font_size)
ax5a_2.set_xlabel("Enhancement", fontsize = font_size)
ax5b_2 = fig2.add_subplot(gs2[3, 1])
ax5b_2.set_title("{0}: $E_4$ Volume".format(case_id), fontsize = font_size)
ax5b_2.set_xlabel("Enhancement", fontsize = font_size)
ax1a_2.hist(E.ravel(), bins='auto', label="$E$")
ax1a_2.axvline(x=np.mean(E), color='r')
ax1b_2.hist(E_model.ravel(), bins='auto', label="$E$ model")
ax1b_2.axvline(x=np.mean(E), color='r')
ax2a_2.hist(E1.ravel(), bins='auto', label="$E_1$")
ax2a_2.axvline(x=np.mean(E), color='r')
ax2b_2.hist(E1_model.ravel(), bins='auto', label="$E_1$ model")
ax2b_2.axvline(x=np.mean(E1), color='r')
ax3a_2.hist(E2.ravel(), bins='auto', label="$E_2$")
ax3a_2.axvline(x=np.mean(E2), color='r')
ax3b_2.hist(E2_model.ravel(), bins='auto', label="$E_2$ model")
ax3b_2.axvline(x=np.mean(E2), color='r')
ax4a_2.hist(E2.ravel(), bins='auto', label="$E_3$")
ax4a_2.axvline(x=np.mean(E2), color='r')
ax4b_2.hist(E3_model.ravel(), bins='auto', label="$E_3$ model")
ax4b_2.axvline(x=np.mean(E3), color='r')
ax5a_2.hist(E4.ravel(), bins='auto', label="$E_4$")
ax5a_2.axvline(x=np.mean(E4), color='r')
ax5b_2.hist(E4_model.ravel(), bins='auto', label="$E_4$ model")
ax5b_2.axvline(x=np.mean(E4), color='r')
#ax8_2.hist(u_E_confidence.ravel(), bins=n_bins3)
#ax8_2.axvline(x=np.mean(u_E_confidence), color='r')
#ax8_2.set_ylabel("count", fontsize = font_size)
path_images = os.path.join(write_file_dir, case_id, plots_dir, "Compare_Enhancement_Distribution.png")
image_path_list.append(path_images)
fig2.savefig(path_images)
del fig2
# determine which term is the driver for uncertainty
u_E_2 = (u_eta_back_2 + u_xi_back_2 - 2.0 * u_eta_xi_back )
u_E = np.sqrt(u_E_2)
u_E_confidence = 2.0 * u_E # gaussian 95% confidence interval
u_E1_2 = eta_vent_term + xi_vent_term + eta_term + xi_term + eta_xi_term + eta_xi_vent_term
u_E1 = np.sqrt(u_E1_2)
u_E1_confidence = 2.0 * u_E1 # gaussian 95% confidence interval
u_E2_2 = (1.0 / (eta_vent**2.0) * u_eta_back_2 +
1.0 / (xi_vent**2.0) * u_xi_back_2 +
np.square( eta / (eta_vent**2.0)) * u_eta_vent_2 +
np.square( xi / (xi_vent**2.0)) * u_xi_vent_2 -
2.0 / (eta_vent * xi_vent) * u_eta_xi_back -
2.0 * (eta * xi) / ( (eta_vent * xi_vent)**2.0 ) * u_eta_xi_vent
)
u_E2 = np.sqrt(u_E2_2)
u_E2_confidence = 2.0 * u_E2 # gaussian 95% confidence interval
u_E3_2 = (1.0 / (u_eta_vent_2) * u_eta_back_2 +
1.0 / (u_xi_vent_2) * u_xi_back_2 -
2.0 / (np.sqrt(u_eta_vent_2 *u_xi_vent_2)) * u_eta_xi_back +
2.0 -
2.0 / (np.sqrt(u_eta_vent_2 *u_xi_vent_2)) * u_eta_xi_vent
)
u_E3 = np.sqrt(u_E3_2)
u_E3_confidence = 2.0 * u_E3 # gaussian 95% confidence interval
u_E4_2 = ( 1.0 / (u_eta_vent_2 + u_xi_vent_2) * (
u_eta_back_2 + u_xi_back_2 -
2.0 * u_eta_xi_back +
u_eta_vent_2 + u_xi_vent_2 -
2.0 * u_eta_xi_vent
)
)
u_E4 = np.sqrt(u_E4_2)
u_E4_confidence = 2.0 * u_E4 # gaussian 95% confidence interval
print(np.mean(u_E_2), np.mean(u_E), np.mean(u_E_confidence))
print(np.mean(u_E1_2), np.mean(u_E1), np.mean(u_E1_confidence))
print(np.mean(u_E2_2), np.mean(u_E2), np.mean(u_E2_confidence))
print(np.mean(u_E3_2), np.mean(u_E3), np.mean(u_E3_confidence))
print(np.mean(u_E4_2), np.mean(u_E4), np.mean(u_E4_confidence))
print("pink")
E_full = VWI_Enhancement(case_imgs["post_float"][0], case_imgs["pre2post"][0], 1.0, 1.0, kind = "E1")
write_list["E.nrrd"] = E_full
E1_full = VWI_Enhancement(case_imgs["post_float"][0], case_imgs["pre2post"][0], eta_vent, xi_vent, kind = "E1")
write_list["E1.nrrd"] = E1_full
E2_full = VWI_Enhancement(case_imgs["post_float"][0], case_imgs["pre2post"][0], eta_vent, xi_vent, kind = "E2")
write_list["E2.nrrd"] = E2_full
E3_full= VWI_Enhancement(case_imgs["post_float"][0], case_imgs["pre2post"][0], eta_vent, xi_vent, kind = "E3",
std_post_vent = np.sqrt(u_eta_vent_2),
std_pre_vent = np.sqrt(u_xi_vent_2),
return_parts=False)
write_list["E3.nrrd"] = E3_full
E4_full, E4_post, E4_pre = VWI_Enhancement(case_imgs["post_float"][0], case_imgs["pre2post"][0], eta_vent, xi_vent, kind = "E4",
std_post_vent = np.sqrt(u_eta_vent_2),
std_pre_vent = np.sqrt(u_xi_vent_2),
return_parts=False)
write_list["E4.nrrd"] = E4_full
#gs3 = plt.GridSpec(1,1, wspace=0.2, hspace=0.8)
#fig3 = plt.figure(figsize=(13, 13))
#ax1_3 = fig3.add_subplot(gs3[0, 0])
#ax1_3.set_title("Compare E3 pre vs post", fontsize = font_size)
#ax1_3.scatter(E3_pre.ravel(), E3_post.ravel())
#ax1_3.set_xlabel(r'pre $ \frac{\xi - \bar{\xi}_{vent}}{std(\xi_{vent})}$', fontsize = font_size)
#ax1_3.set_ylabel(r'post $\frac{\eta - \bar{\eta}_{vent}}{std(\eta_{vent})}$', fontsize = font_size)
#path_E3 = os.path.join(write_file_dir, case_id, plots_dir, "Compare_E3.png")
#fig3.savefig(path_E3)
#del fig3
# create confidence arrays
uE = -1.0*np.ones_like(E_full)
uE[non_zero_indx] = u_E_confidence
write_list["UE.nrrd"] = uE
uE1 = -1.0*np.ones_like(E1_full)
uE1[non_zero_indx] = u_E1_confidence
write_list["UE1.nrrd"] = uE1
uE2 = -1.0*np.ones_like(E2_full)
uE2[non_zero_indx] = u_E2_confidence
write_list["UE2.nrrd"] = uE2
uE3 = -1.0*np.ones_like(E3_full)
uE3[non_zero_indx] = u_E3_confidence
write_list["UE3.nrrd"] = uE3
uE4 = -1.0*np.ones_like(E4_full)
uE4[non_zero_indx] = u_E4_confidence
write_list["UE4.nrrd"] = uE4
# threshold
indx_E = np.where(E_full > 0.0)
E_thresh = np.zeros_like(E_full)
E_thresh[indx_E] = E_full[indx_E]
write_list["E_thresh.nrrd"] = E_thresh
indx_E1 = np.where(E1_full > 0.0)
E1_thresh = np.zeros_like(E1_full)
E1_thresh[indx_E1] = E1_full[indx_E1]
write_list["E1_thresh.nrrd"] = E1_thresh
indx_E2 = np.where(E2_full > 0.0)
E2_thresh = np.zeros_like(E2_full)
E2_thresh[indx_E2] = E2_full[indx_E2]
write_list["E2_thresh.nrrd"] = E2_thresh
indx_E3 = np.where(E3_full > 0.0)
E3_thresh = np.zeros_like(E3_full)
E3_thresh[indx_E3] = E3_full[indx_E3]
write_list["E3_thresh.nrrd"] = E3_thresh
indx_E4 = np.where(E4_full > 0.0.)
E4_thresh = np.zeros_like(E4_full)
E4_thresh[indx_E4] = E4_full[indx_E4]
write_list["E4_thresh.nrrd"] = E4_thresh
if (skip_write) :
pass
else:
for file_name, np_image in write_list.items():
path_test = os.path.join(write_file_dir, case_id, write_dir, file_name)
if ( not os.path.exists(path_test) or overwrite_out == True):
nrrd.write(path_test, np_image, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E.nrrd"), E_full, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E1.nrrd"), E1_full, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E2.nrrd"), E2_full, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E3.nrrd"), E3_full, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E4.nrrd"), E4_full, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "UE.nrrd"), uE, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "UE1.nrrd"), uE1, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "UE2.nrrd"), uE2, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "UE3.nrrd"), uE3, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "UE4.nrrd"), uE4, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E_thresh.nrrd"), E_thresh, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E1_thresh.nrrd"), E1_thresh, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E2_thresh.nrrd"), E2_thresh, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E3_thresh.nrrd"), E3_thresh, case_imgs["VWI_post_masked_vent"][1])
#nrrd.write(os.path.join(write_file_dir, case_id, write_dir, "E4_thresh.nrrd"), E4_thresh, case_imgs["VWI_post_masked_vent"][1])
if (skip_bootstrap):
pass
else:
boot_size = 10000
n_bins = 30
pre_dist_std = np.zeros(boot_size)
post_dist_std = np.zeros(boot_size)
pre_dist_mean = np.zeros(boot_size)
post_dist_mean = np.zeros(boot_size)
for i in range(boot_size):
X_resample_pre, ns = bootstrap_resample(img_pre_vent, n=None, percent=percent_boot)
X_resample_post, ns = bootstrap_resample(img_post_vent, n=None, percent=percent_boot)
pre_dist_std[i] = X_resample_pre.std()
post_dist_std[i] = X_resample_post.std()
pre_dist_mean[i] = X_resample_pre.mean()
post_dist_mean[i] = X_resample_post.mean()
#print( 'original mean:', X.mean()
#print(case_id, "post vent resample MEAN: {0:.4f} pre vent resample MEAN {1:.4f}".format(
# X_resample_pre.mean() , X_resample_post.mean() ))
print ("ha")
gs = plt.GridSpec(3,2, wspace=0.2, hspace=0.8)
# Create a figure
fig = plt.figure(figsize=(13, 13))
# SUBFIGURE 1
# Create subfigure 1 (takes over two rows (0 to 1) and column 0 of the grid)
ax1 = fig.add_subplot(gs[0, 0])
ax1.set_title("{0}: VWI_pre_masked_vent bootstrap".format(case_id), fontsize = font_size)
ax2 = fig.add_subplot(gs[0, 1])
ax2.set_title("VWI_post_masked_vent bootstrap", fontsize = font_size)
ax3 = fig.add_subplot(gs[1, 0])
ax3.set_title(r'$E = post-pre$ bootstrap', fontsize = font_size)
ax4 = fig.add_subplot(gs[1, 1])
ax4.set_title(r'$E_1 = \frac{\overline{pre}_{vent}}{\overline{post}_{vent}} post - pre$', fontsize = font_size)
ax5 = fig.add_subplot(gs[2, 0])
ax5.set_title(r'$E_2 = \frac{post}{\overline{post}_{vent}} - \frac{pre}{\overline{pre}_{vent}}$', fontsize = font_size)
ax6 = fig.add_subplot(gs[2, 1])
ax6.set_title(r'$E_3 = \frac{post - \overline{post}_{vent}}{s_{post_{vent}}} - \frac{pre - \overline{pre}_{vent}}{s_{pre_{vent}}}$', fontsize = font_size)
ax1.hist(pre_dist_mean, bins=n_bins)
ax1.axvline(x=xi_vent, color='r')
ax1.set_xlabel("mean", fontsize = font_size)
ax1.set_ylabel("count", fontsize = font_size)
ax2.hist(post_dist_mean, bins=n_bins)
ax2.axvline(x=eta_vent, color='r')
ax2.set_xlabel("mean ", fontsize = font_size)
ax2.set_ylabel("count", fontsize = font_size)
test_E = VWI_Enhancement(post_dist_mean, pre_dist_mean, 1.0, 1.0, kind = "E1")
ax3.hist(test_E, bins=n_bins)
ax3.axvline(x=(eta_vent - xi_vent), color='r')
#ax3.axvline(x=test_E.mean(), color='b')
ax3.set_xlabel("mean E", fontsize = font_size)
ax3.set_ylabel("count", fontsize = font_size)
test_E1 = VWI_Enhancement(post_dist_mean, pre_dist_mean, eta_vent, xi_vent, kind = "E1")
ax4.hist(test_E1, bins=n_bins)
ax4.axvline(x=0.0, color='r')
#ax4.axvline(x=xi_vent/eta_vent*post_dist_mean.mean() - pre_dist_mean.mean(), color='r')
#ax4.axvline(x=test_E1.mean(), color='b')
ax4.set_xlabel("mean E1", fontsize = font_size)
ax4.set_ylabel("count", fontsize = font_size)
test_E2 = VWI_Enhancement(post_dist_mean, pre_dist_mean, eta_vent, xi_vent, kind = "E2")
ax5.hist(test_E2, bins=n_bins)
ax5.axvline(x=0.0, color='r')
#ax5.axvline(x=(post_dist_mean.mean() / eta_vent) - (pre_dist_mean.mean() / xi_vent), color='r')
#ax5.axvline(x=test_E2.mean(), color='b')
ax5.set_xlabel("mean E2", fontsize = font_size)
ax5.set_ylabel("count", fontsize = font_size)
test_E3 = VWI_Enhancement(post_dist_mean, pre_dist_mean, eta_vent, xi_vent, kind = "E3",
std_post_vent = post_dist_std, std_pre_vent = pre_dist_std)
ax6.hist(test_E3, bins=n_bins)
ax6.axvline(x=0.0 , color='r')
#ax6.axvline(x=(post_dist_mean.mean() - eta_vent) / post_dist_std.mean() - (pre_dist_mean.mean() - xi_vent) / pre_dist_std.mean() , color='b')
ax6.set_xlabel("mean E3", fontsize = font_size)
ax6.set_ylabel("count", fontsize = font_size)
path_bootstrap = os.path.join(write_file_dir, case_id, plots_dir, "Compare_bootstrap.png")
bootstrap_fig_list.append(path_bootstrap)
fig.savefig(path_bootstrap)
#plt.show()
del fig
print("hehe")
print("hooray")
print(image_path_list)
print(bootstrap_fig_list)
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the curses-based CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import curses
import tempfile
import numpy as np
from tensorflow.python.debug.cli import curses_ui
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import tensor_format
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
def string_to_codes(cmd):
return [ord(c) for c in cmd]
def codes_to_string(cmd_code):
# Omit non-ASCII key codes.
return "".join([chr(code) for code in cmd_code if code < 256])
class MockCursesUI(curses_ui.CursesUI):
"""Mock subclass of CursesUI that bypasses actual terminal manipulations."""
def __init__(self,
height,
width,
command_sequence=None):
self._height = height
self._width = width
self._command_sequence = command_sequence
self._command_counter = 0
# The mock class has no actual textbox. So use this variable to keep
# track of what's entered in the textbox on creation.
self._curr_existing_command = ""
# Observers for test.
# Observers of screen output.
self.unwrapped_outputs = []
self.wrapped_outputs = []
self.scroll_messages = []
self.output_array_pointer_indices = []
self.output_pad_rows = []
# Observers of command textbox.
self.existing_commands = []
# Observer for tab-completion candidates.
self.candidates_lists = []
# Observer for the main menu.
self.main_menu_list = []
# Observer for toast messages.
self.toasts = []
curses_ui.CursesUI.__init__(self)
# Below, override the _screen_ prefixed member methods that interact with the
# actual terminal, so that the mock can run in a terminal-less environment.
# TODO(cais): Search for a way to have a mock terminal object that behaves
# like the actual terminal, so that we can test the terminal interaction
# parts of the CursesUI class.
def _screen_init(self):
pass
def _screen_refresh_size(self):
self._max_y = self._height
self._max_x = self._width
def _screen_launch(self, enable_mouse_on_start):
self._mouse_enabled = enable_mouse_on_start
def _screen_terminate(self):
pass
def _screen_refresh(self):
pass
def _screen_create_command_window(self):
pass
def _screen_create_command_textbox(self, existing_command):
"""Override to insert observer of existing commands.
Used in testing of history navigation and tab completion.
Args:
existing_command: Command string entered to the textbox at textbox
creation time. Note that the textbox does not actually exist in this
mock subclass. This method only keeps track of and records the state.
"""
self.existing_commands.append(existing_command)
self._curr_existing_command = existing_command
def _screen_new_output_pad(self, rows, cols):
return "mock_pad"
def _screen_add_line_to_output_pad(self, pad, row, txt, color_segments=None):
pass
def _screen_draw_text_line(self, row, line, attr=curses.A_NORMAL, color=None):
pass
def _screen_scroll_output_pad(self, pad, viewport_top, viewport_left,
screen_location_top, screen_location_left,
screen_location_bottom, screen_location_right):
pass
def _screen_get_user_command(self):
command = self._command_sequence[self._command_counter]
self._command_key_counter = 0
for c in command:
if c == curses.KEY_RESIZE:
# Special case for simulating a terminal resize event in curses.
self._height = command[1]
self._width = command[2]
self._on_textbox_keypress(c)
self._command_counter += 1
return ""
elif c == curses.KEY_MOUSE:
mouse_x = command[1]
mouse_y = command[2]
self._command_counter += 1
self._textbox_curr_terminator = c
return self._fetch_hyperlink_command(mouse_x, mouse_y)
else:
y = self._on_textbox_keypress(c)
self._command_key_counter += 1
if y == curses_ui.CursesUI.CLI_TERMINATOR_KEY:
break
self._command_counter += 1
# Take into account pre-existing string automatically entered on textbox
# creation.
return self._curr_existing_command + codes_to_string(command)
def _screen_getmouse(self):
output = (0, self._mouse_xy_sequence[self._mouse_counter][0],
self._mouse_xy_sequence[self._mouse_counter][1], 0,
curses.BUTTON1_CLICKED)
self._mouse_counter += 1
return output
def _screen_gather_textbox_str(self):
return codes_to_string(self._command_sequence[self._command_counter]
[:self._command_key_counter])
def _scroll_output(self, direction, line_index=None):
"""Override to observe screen output.
This method is invoked after every command that generates a new screen
output and after every keyboard triggered screen scrolling. Therefore
it is a good place to insert the observer.
Args:
direction: which direction to scroll.
line_index: (int or None) Optional line index to scroll to. See doc string
of the overridden method for more information.
"""
curses_ui.CursesUI._scroll_output(self, direction, line_index=line_index)
self.unwrapped_outputs.append(self._curr_unwrapped_output)
self.wrapped_outputs.append(self._curr_wrapped_output)
self.scroll_messages.append(self._scroll_info)
self.output_array_pointer_indices.append(self._output_array_pointer_indices)
self.output_pad_rows.append(self._output_pad_row)
def _display_main_menu(self, output):
curses_ui.CursesUI._display_main_menu(self, output)
self.main_menu_list.append(self._main_menu)
def _screen_render_menu_pad(self):
pass
def _display_candidates(self, candidates):
curses_ui.CursesUI._display_candidates(self, candidates)
self.candidates_lists.append(candidates)
def _toast(self, message, color=None, line_index=None):
curses_ui.CursesUI._toast(self, message, color=color, line_index=line_index)
self.toasts.append(message)
class CursesTest(test_util.TensorFlowTestCase):
_EXIT = string_to_codes("exit\n")
def _babble(self, args, screen_info=None):
ap = argparse.ArgumentParser(
description="Do babble.", usage=argparse.SUPPRESS)
ap.add_argument(
"-n",
"--num_times",
dest="num_times",
type=int,
default=60,
help="How many times to babble")
ap.add_argument(
"-l",
"--line",
dest="line",
type=str,
default="bar",
help="The content of each line")
ap.add_argument(
"-k",
"--link",
dest="link",
action="store_true",
help="Create a command link on each line")
ap.add_argument(
"-m",
"--menu",
dest="menu",
action="store_true",
help="Create a menu for testing")
parsed = ap.parse_args(args)
lines = [parsed.line] * parsed.num_times
font_attr_segs = {}
if parsed.link:
for i in range(len(lines)):
font_attr_segs[i] = [(
0,
len(lines[i]),
debugger_cli_common.MenuItem("", "babble"),)]
annotations = {}
if parsed.menu:
menu = debugger_cli_common.Menu()
menu.append(
debugger_cli_common.MenuItem("babble again", "babble"))
menu.append(
debugger_cli_common.MenuItem("ahoy", "ahoy", enabled=False))
annotations[debugger_cli_common.MAIN_MENU_KEY] = menu
output = debugger_cli_common.RichTextLines(
lines, font_attr_segs=font_attr_segs, annotations=annotations)
return output
def _print_ones(self, args, screen_info=None):
ap = argparse.ArgumentParser(
description="Print all-one matrix.", usage=argparse.SUPPRESS)
ap.add_argument(
"-s",
"--size",
dest="size",
type=int,
default=3,
help="Size of the matrix. For example, of the value is 3, "
"the matrix will have shape (3, 3)")
parsed = ap.parse_args(args)
m = np.ones([parsed.size, parsed.size])
return tensor_format.format_tensor(m, "m")
def testInitialization(self):
ui = MockCursesUI(40, 80)
self.assertEqual(0, ui._command_pointer)
self.assertEqual([], ui._active_command_history)
self.assertEqual("", ui._pending_command)
def testRunUIExitImmediately(self):
"""Make sure that the UI can exit properly after launch."""
ui = MockCursesUI(40, 80, command_sequence=[self._EXIT])
ui.run_ui()
# No screen output should have happened.
self.assertEqual(0, len(ui.unwrapped_outputs))
def testRunUIEmptyCommand(self):
"""Issue an empty command then exit."""
ui = MockCursesUI(40, 80, command_sequence=[[], self._EXIT])
ui.run_ui()
# Empty command should not lead to any screen output.
self.assertEqual(0, len(ui.unwrapped_outputs))
def testRunUIInvalidCommandPrefix(self):
"""Handle an unregistered command prefix."""
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("foo\n"), self._EXIT])
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["ERROR: Invalid command prefix \"foo\""],
ui.unwrapped_outputs[0].lines)
# TODO(cais): Add explanation for the 35 extra lines.
self.assertEqual(["ERROR: Invalid command prefix \"foo\""],
ui.wrapped_outputs[0].lines[:1])
# A single line of output should not have caused scrolling.
self.assertNotIn("Scroll", ui.scroll_messages[0])
self.assertIn("Mouse:", ui.scroll_messages[0])
def testRunUIInvalidCommandSyntax(self):
"""Handle a command with invalid syntax."""
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble -z\n"), self._EXIT])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertIn("Mouse:", ui.scroll_messages[0])
self.assertEqual(
["Syntax error for command: babble", "For help, do \"help babble\""],
ui.unwrapped_outputs[0].lines)
def testRunUIScrollTallOutputPageDownUp(self):
"""Scroll tall output with PageDown and PageUp."""
# Use PageDown and PageUp to scroll back and forth a little before exiting.
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble\n"), [curses.KEY_NPAGE] * 2 +
[curses.KEY_PPAGE] + self._EXIT])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(4, len(ui.wrapped_outputs))
self.assertEqual(4, len(ui.scroll_messages))
# Before scrolling.
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
# Initial scroll: At the top.
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
self.assertIn("Mouse:", ui.scroll_messages[0])
# After 1st scrolling (PageDown).
# The screen output shouldn't have changed. Only the viewport should.
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
self.assertIn("Scroll (PgDn/PgUp): 1.69%", ui.scroll_messages[1])
self.assertIn("Mouse:", ui.scroll_messages[1])
# After 2nd scrolling (PageDown).
self.assertIn("Scroll (PgDn/PgUp): 3.39%", ui.scroll_messages[2])
self.assertIn("Mouse:", ui.scroll_messages[2])
# After 3rd scrolling (PageUp).
self.assertIn("Scroll (PgDn/PgUp): 1.69%", ui.scroll_messages[3])
self.assertIn("Mouse:", ui.scroll_messages[3])
def testCutOffTooManyOutputLines(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble -n 20\n"), self._EXIT])
# Modify max_output_lines so that this test doesn't use too much time or
# memory.
ui.max_output_lines = 10
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(["bar"] * 10 + ["Output cut off at 10 lines!"],
ui.wrapped_outputs[0].lines[:11])
def testRunUIScrollTallOutputEndHome(self):
"""Scroll tall output with PageDown and PageUp."""
# Use End and Home to scroll a little before exiting to test scrolling.
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble\n"),
[curses.KEY_END] * 2 + [curses.KEY_HOME] + self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# Screen output/scrolling should have happened exactly once.
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(4, len(ui.wrapped_outputs))
self.assertEqual(4, len(ui.scroll_messages))
# Before scrolling.
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
# Initial scroll: At the top.
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
# After 1st scrolling (End).
self.assertIn("Scroll (PgUp): 100.00%", ui.scroll_messages[1])
# After 2nd scrolling (End).
self.assertIn("Scroll (PgUp): 100.00%", ui.scroll_messages[2])
# After 3rd scrolling (Hhome).
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[3])
def testRunUIWithInitCmd(self):
"""Run UI with an initial command specified."""
ui = MockCursesUI(40, 80, command_sequence=[self._EXIT])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui(init_command="babble")
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
def testCompileHelpWithoutHelpIntro(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"), self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[0].lines[:4])
def testCompileHelpWithHelpIntro(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"), self._EXIT])
help_intro = debugger_cli_common.RichTextLines(
["This is a curses UI.", "All it can do is 'babble'.", ""])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.set_help_intro(help_intro)
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(
help_intro.lines + ["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[0].lines[:7])
def testCommandHistoryNavBackwardOnce(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
[curses.KEY_UP], # Hit Up and Enter.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(2, len(ui.unwrapped_outputs))
for i in [0, 1]:
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[i].lines[:4])
def testCommandHistoryNavBackwardTwice(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
string_to_codes("babble\n"),
[curses.KEY_UP],
[curses.KEY_UP], # Hit Up twice and Enter.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
# The 1st and 3rd outputs are for command "help".
for i in [0, 2]:
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[i].lines[:4])
# The 2nd output is for command "babble".
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
def testCommandHistoryNavBackwardOverLimit(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
string_to_codes("babble\n"),
[curses.KEY_UP],
[curses.KEY_UP],
[curses.KEY_UP], # Hit Up three times and Enter.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
# The 1st and 3rd outputs are for command "help".
for i in [0, 2]:
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[i].lines[:4])
# The 2nd output is for command "babble".
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
def testCommandHistoryNavBackwardThenForward(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("help\n"),
string_to_codes("babble\n"),
[curses.KEY_UP],
[curses.KEY_UP],
[curses.KEY_DOWN], # Hit Up twice and Down once.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(3, len(ui.unwrapped_outputs))
# The 1st output is for command "help".
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[0].lines[:4])
# The 2nd and 3rd outputs are for command "babble".
for i in [1, 2]:
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[i].lines)
def testCommandHistoryPrefixNavBackwardOnce(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 1\n"),
string_to_codes("babble -n 10\n"),
string_to_codes("help\n"),
string_to_codes("b") + [curses.KEY_UP], # Navigate with prefix.
string_to_codes("\n"),
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(["bar"], ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[1].lines)
self.assertEqual(["babble", " Aliases: b", "", " babble some"],
ui.unwrapped_outputs[2].lines[:4])
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[3].lines)
def testTerminalResize(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("babble\n"),
[curses.KEY_RESIZE, 100, 85], # Resize to [100, 85]
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The resize event should have caused a second screen output event.
self.assertEqual(2, len(ui.unwrapped_outputs))
self.assertEqual(2, len(ui.wrapped_outputs))
self.assertEqual(2, len(ui.scroll_messages))
# The 1st and 2nd screen outputs should be identical (unwrapped).
self.assertEqual(ui.unwrapped_outputs[0], ui.unwrapped_outputs[1])
# The 1st scroll info should contain scrolling, because the screen size
# is less than the number of lines in the output.
self.assertIn("Scroll (PgDn): 0.00%", ui.scroll_messages[0])
def testTabCompletionWithCommonPrefix(self):
# Type "b" and trigger tab completion.
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("b\t"), string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["ba"])
ui.run_ui()
# The automatically registered exit commands "exit" and "quit" should not
# appear in the tab completion candidates because they don't start with
# "b".
self.assertEqual([["ba", "babble"]], ui.candidates_lists)
# "ba" is a common prefix of the two candidates. So the "ba" command should
# have been issued after the Enter.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
def testTabCompletionEmptyTriggerWithoutCommonPrefix(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["a"])
# Use a different alias "a" instead.
ui.run_ui()
# The manually registered command, along with the automatically registered
# exit commands should appear in the candidates.
self.assertEqual(
[["a", "babble", "exit", "h", "help", "m", "mouse", "quit"]],
ui.candidates_lists)
# The two candidates have no common prefix. So no command should have been
# issued.
self.assertEqual(0, len(ui.unwrapped_outputs))
self.assertEqual(0, len(ui.wrapped_outputs))
self.assertEqual(0, len(ui.scroll_messages))
def testTabCompletionNonemptyTriggerSingleCandidate(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("b\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["a"])
ui.run_ui()
# There is only one candidate, so no candidates should have been displayed.
# Instead, the completion should have been automatically keyed in, leading
# to the "babble" command being issue.
self.assertEqual([[]], ui.candidates_lists)
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.wrapped_outputs[0].lines[:60])
def testTabCompletionNoMatch(self):
ui = MockCursesUI(
40,
80,
command_sequence=[string_to_codes("c\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["a"])
ui.run_ui()
# Only the invalid command "c" should have been issued.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["ERROR: Invalid command prefix \"c\""],
ui.unwrapped_outputs[0].lines)
self.assertEqual(["ERROR: Invalid command prefix \"c\""],
ui.wrapped_outputs[0].lines[:1])
def testTabCompletionOneWordContext(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\t"), # Trigger tab completion.
string_to_codes("\n"),
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.register_tab_comp_context(["babble", "b"], ["10", "20", "30", "300"])
ui.run_ui()
self.assertEqual([["30", "300"]], ui.candidates_lists)
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 30, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 30, ui.wrapped_outputs[0].lines[:30])
def testTabCompletionTwice(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 1\t"), # Trigger tab completion.
string_to_codes("2\t"), # With more prefix, tab again.
string_to_codes("3\n"),
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.register_tab_comp_context(["babble", "b"], ["10", "120", "123"])
ui.run_ui()
# There should have been two different lists of candidates.
self.assertEqual([["10", "120", "123"], ["120", "123"]],
ui.candidates_lists)
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.wrapped_outputs))
self.assertEqual(1, len(ui.scroll_messages))
self.assertEqual(["bar"] * 123, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 123, ui.wrapped_outputs[0].lines[:123])
def testRegexSearch(self):
"""Test regex search."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/(b|r)\n"), # Regex search and highlight.
string_to_codes("/a\n"), # Regex search and highlight.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The unwrapped (original) output should never have any highlighting.
self.assertEqual(3, len(ui.unwrapped_outputs))
for i in range(3):
self.assertEqual(["bar"] * 3, ui.unwrapped_outputs[i].lines)
self.assertEqual({}, ui.unwrapped_outputs[i].font_attr_segs)
# The wrapped outputs should show highlighting depending on the regex.
self.assertEqual(3, len(ui.wrapped_outputs))
# The first output should have no highlighting.
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[0].lines[:3])
self.assertEqual({}, ui.wrapped_outputs[0].font_attr_segs)
# The second output should have highlighting for "b" and "r".
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[1].lines[:3])
for i in range(3):
self.assertEqual([(0, 1, "black_on_white"), (2, 3, "black_on_white")],
ui.wrapped_outputs[1].font_attr_segs[i])
# The third output should have highlighting for "a" only.
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[1].lines[:3])
for i in range(3):
self.assertEqual([(1, 2, "black_on_white")],
ui.wrapped_outputs[2].font_attr_segs[i])
def testRegexSearchContinuation(self):
"""Test continuing scrolling down to next regex match."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/(b|r)\n"), # Regex search and highlight.
string_to_codes("/\n"), # Continue scrolling down: 1st time.
string_to_codes("/\n"), # Continue scrolling down: 2nd time.
string_to_codes("/\n"), # Continue scrolling down: 3rd time.
string_to_codes("/\n"), # Continue scrolling down: 4th time.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The 1st output is for the non-searched output. The other three are for
# the searched output. Even though continuation search "/" is performed
# four times, there should be only three searched outputs, because the
# last one has exceeded the end.
self.assertEqual(4, len(ui.unwrapped_outputs))
for i in range(4):
self.assertEqual(["bar"] * 3, ui.unwrapped_outputs[i].lines)
self.assertEqual({}, ui.unwrapped_outputs[i].font_attr_segs)
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[0].lines[:3])
self.assertEqual({}, ui.wrapped_outputs[0].font_attr_segs)
for j in range(1, 4):
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[j].lines[:3])
self.assertEqual({
0: [(0, 1, "black_on_white"), (2, 3, "black_on_white")],
1: [(0, 1, "black_on_white"), (2, 3, "black_on_white")],
2: [(0, 1, "black_on_white"), (2, 3, "black_on_white")]
}, ui.wrapped_outputs[j].font_attr_segs)
self.assertEqual([0, 0, 1, 2], ui.output_pad_rows)
def testRegexSearchUnderLineWrapping(self):
ui = MockCursesUI(
40,
5, # Use a narrow window to trigger line wrapping
command_sequence=[
string_to_codes("babble -n 3 -l foo-bar-baz-qux\n"),
string_to_codes("/foo\n"), # Regex search and highlight.
string_to_codes("/\n"), # Continue scrolling down: 1st time.
string_to_codes("/\n"), # Continue scrolling down: 2nd time.
string_to_codes("/\n"), # Continue scrolling down: 3rd time.
string_to_codes("/\n"), # Continue scrolling down: 4th time.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some")
ui.run_ui()
self.assertEqual(4, len(ui.wrapped_outputs))
for wrapped_output in ui.wrapped_outputs:
self.assertEqual(["foo-", "bar-", "baz-", "qux"] * 3,
wrapped_output.lines[0 : 12])
# The scroll location should reflect the line wrapping.
self.assertEqual([0, 0, 4, 8], ui.output_pad_rows)
def testRegexSearchNoMatchContinuation(self):
"""Test continuing scrolling when there is no regex match."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/foo\n"), # Regex search and highlight.
string_to_codes("/\n"), # Continue scrolling down.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# The regex search and continuation search in the 3rd command should not
# have produced any output.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual([0], ui.output_pad_rows)
def testRegexSearchContinuationWithoutSearch(self):
"""Test continuation scrolling when no regex search has been performed."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/\n"), # Continue scrolling without search first.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual([0], ui.output_pad_rows)
def testRegexSearchWithInvalidRegex(self):
"""Test using invalid regex to search."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/[\n"), # Continue scrolling without search first.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
# Invalid regex should not have led to a new screen of output.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual([0], ui.output_pad_rows)
# Invalid regex should have led to a toast error message.
self.assertEqual(["ERROR: Invalid regular expression: \"[\""], ui.toasts)
def testRegexSearchFromCommandHistory(self):
"""Test regex search commands are recorded in command history."""
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 3\n"),
string_to_codes("/(b|r)\n"), # Regex search and highlight.
string_to_codes("babble -n 4\n"),
[curses.KEY_UP],
[curses.KEY_UP],
string_to_codes("\n"), # Hit Up twice and Enter.
self._EXIT
])
ui.register_command_handler(
"babble", self._babble, "babble some", prefix_aliases=["b"])
ui.run_ui()
self.assertEqual(4, len(ui.wrapped_outputs))
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[0].lines[:3])
self.assertEqual({}, ui.wrapped_outputs[0].font_attr_segs)
self.assertEqual(["bar"] * 3, ui.wrapped_outputs[1].lines[:3])
for i in range(3):
self.assertEqual([(0, 1, "black_on_white"), (2, 3, "black_on_white")],
ui.wrapped_outputs[1].font_attr_segs[i])
self.assertEqual(["bar"] * 4, ui.wrapped_outputs[2].lines[:4])
self.assertEqual({}, ui.wrapped_outputs[2].font_attr_segs)
# The regex search command loaded from history should have worked on the
# new screen output.
self.assertEqual(["bar"] * 4, ui.wrapped_outputs[3].lines[:4])
for i in range(4):
self.assertEqual([(0, 1, "black_on_white"), (2, 3, "black_on_white")],
ui.wrapped_outputs[3].font_attr_segs[i])
def testDisplayTensorWithIndices(self):
"""Test displaying tensor with indices."""
ui = MockCursesUI(
8, # Use a small screen height to cause scrolling.
80,
command_sequence=[
string_to_codes("print_ones --size 5\n"),
[curses.KEY_NPAGE],
[curses.KEY_NPAGE],
[curses.KEY_NPAGE],
[curses.KEY_END],
[curses.KEY_NPAGE], # This PageDown goes over the bottom limit.
[curses.KEY_PPAGE],
[curses.KEY_PPAGE],
[curses.KEY_PPAGE],
[curses.KEY_HOME],
[curses.KEY_PPAGE], # This PageDown goes over the top limit.
self._EXIT
])
ui.register_command_handler("print_ones", self._print_ones,
"print an all-one matrix of specified size")
ui.run_ui()
self.assertEqual(11, len(ui.unwrapped_outputs))
self.assertEqual(11, len(ui.output_array_pointer_indices))
self.assertEqual(11, len(ui.scroll_messages))
for i in range(11):
self.assertEqual([
"Tensor \"m\":", "", "array([[ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.]])"
], ui.unwrapped_outputs[i].lines)
self.assertEqual({
0: None,
-1: [1, 0]
}, ui.output_array_pointer_indices[0])
self.assertIn(" Scroll (PgDn): 0.00% -[1,0] ", ui.scroll_messages[0])
# Scrolled down one line.
self.assertEqual({
0: None,
-1: [2, 0]
}, ui.output_array_pointer_indices[1])
self.assertIn(" Scroll (PgDn/PgUp): 16.67% -[2,0] ", ui.scroll_messages[1])
# Scrolled down one line.
self.assertEqual({
0: [0, 0],
-1: [3, 0]
}, ui.output_array_pointer_indices[2])
self.assertIn(" Scroll (PgDn/PgUp): 33.33% [0,0]-[3,0] ",
ui.scroll_messages[2])
# Scrolled down one line.
self.assertEqual({
0: [1, 0],
-1: [4, 0]
}, ui.output_array_pointer_indices[3])
self.assertIn(" Scroll (PgDn/PgUp): 50.00% [1,0]-[4,0] ",
ui.scroll_messages[3])
# Scroll to the bottom.
self.assertEqual({
0: [4, 0],
-1: None
}, ui.output_array_pointer_indices[4])
self.assertIn(" Scroll (PgUp): 100.00% [4,0]- ", ui.scroll_messages[4])
# Attempt to scroll beyond the bottom should lead to no change.
self.assertEqual({
0: [4, 0],
-1: None
}, ui.output_array_pointer_indices[5])
self.assertIn(" Scroll (PgUp): 100.00% [4,0]- ", ui.scroll_messages[5])
# Scrolled up one line.
self.assertEqual({
0: [3, 0],
-1: None
}, ui.output_array_pointer_indices[6])
self.assertIn(" Scroll (PgDn/PgUp): 83.33% [3,0]- ", ui.scroll_messages[6])
# Scrolled up one line.
self.assertEqual({
0: [2, 0],
-1: None
}, ui.output_array_pointer_indices[7])
self.assertIn(" Scroll (PgDn/PgUp): 66.67% [2,0]- ", ui.scroll_messages[7])
# Scrolled up one line.
self.assertEqual({
0: [1, 0],
-1: [4, 0]
}, ui.output_array_pointer_indices[8])
self.assertIn(" Scroll (PgDn/PgUp): 50.00% [1,0]-[4,0] ",
ui.scroll_messages[8])
# Scroll to the top.
self.assertEqual({
0: None,
-1: [1, 0]
}, ui.output_array_pointer_indices[9])
self.assertIn(" Scroll (PgDn): 0.00% -[1,0] ", ui.scroll_messages[9])
# Attempt to scroll pass the top limit should lead to no change.
self.assertEqual({
0: None,
-1: [1, 0]
}, ui.output_array_pointer_indices[10])
self.assertIn(" Scroll (PgDn): 0.00% -[1,0] ", ui.scroll_messages[10])
def testScrollTensorByValidIndices(self):
"""Test scrolling to specified (valid) indices in a tensor."""
ui = MockCursesUI(
8, # Use a small screen height to cause scrolling.
80,
command_sequence=[
string_to_codes("print_ones --size 5\n"),
string_to_codes("@[0, 0]\n"), # Scroll to element [0, 0].
string_to_codes("@1,0\n"), # Scroll to element [3, 0].
string_to_codes("@[0,2]\n"), # Scroll back to line 0.
self._EXIT
])
ui.register_command_handler("print_ones", self._print_ones,
"print an all-one matrix of specified size")
ui.run_ui()
self.assertEqual(4, len(ui.unwrapped_outputs))
self.assertEqual(4, len(ui.output_array_pointer_indices))
for i in range(4):
self.assertEqual([
"Tensor \"m\":", "", "array([[ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.],",
" [ 1., 1., 1., 1., 1.]])"
], ui.unwrapped_outputs[i].lines)
self.assertEqual({
0: None,
-1: [1, 0]
}, ui.output_array_pointer_indices[0])
self.assertEqual({
0: [0, 0],
-1: [3, 0]
}, ui.output_array_pointer_indices[1])
self.assertEqual({
0: [1, 0],
-1: [4, 0]
}, ui.output_array_pointer_indices[2])
self.assertEqual({
0: [0, 0],
-1: [3, 0]
}, ui.output_array_pointer_indices[3])
def testScrollTensorByInvalidIndices(self):
"""Test scrolling to specified invalid indices in a tensor."""
ui = MockCursesUI(
8, # Use a small screen height to cause scrolling.
80,
command_sequence=[
string_to_codes("print_ones --size 5\n"),
string_to_codes("@[10, 0]\n"), # Scroll to invalid indices.
string_to_codes("@[]\n"), # Scroll to invalid indices.
string_to_codes("@\n"), # Scroll to invalid indices.
self._EXIT
])
ui.register_command_handler("print_ones", self._print_ones,
"print an all-one matrix of specified size")
ui.run_ui()
# Because all scroll-by-indices commands are invalid, there should be only
# one output event.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(1, len(ui.output_array_pointer_indices))
# Check error messages.
self.assertEqual("ERROR: Indices exceed tensor dimensions.", ui.toasts[1])
self.assertEqual("ERROR: invalid literal for int() with base 10: ''",
ui.toasts[2])
self.assertEqual("ERROR: Empty indices.", ui.toasts[3])
def testWriteScreenOutputToFileWorks(self):
output_path = tempfile.mktemp()
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2>%s\n" % output_path),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
with gfile.Open(output_path, "r") as f:
self.assertEqual("bar\nbar\n", f.read())
# Clean up output file.
gfile.Remove(output_path)
def testIncompleteRedirectErrors(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2 >\n"),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(["ERROR: Redirect file path is empty"], ui.toasts)
self.assertEqual(0, len(ui.unwrapped_outputs))
def testAppendingRedirectErrors(self):
output_path = tempfile.mktemp()
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 2 >> %s\n" % output_path),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(
["Syntax error for command: babble", "For help, do \"help babble\""],
ui.unwrapped_outputs[0].lines)
# Clean up output file.
gfile.Remove(output_path)
def testMouseOffTakesEffect(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("mouse off\n"), string_to_codes("babble\n"),
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertFalse(ui._mouse_enabled)
self.assertIn("Mouse: OFF", ui.scroll_messages[-1])
def testMouseOffAndOnTakeEffect(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("mouse off\n"), string_to_codes("mouse on\n"),
string_to_codes("babble\n"), self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertTrue(ui._mouse_enabled)
self.assertIn("Mouse: ON", ui.scroll_messages[-1])
def testMouseClickOnLinkTriggersCommand(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -k\n"),
[curses.KEY_MOUSE, 1, 4], # A click on a hyperlink.
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(2, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
def testMouseClickOffLinkDoesNotTriggersCommand(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -k\n"),
# A click off a hyperlink (too much to the right).
[curses.KEY_MOUSE, 8, 4],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
# The mouse click event should not triggered no command.
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
# This command should have generated no main menus.
self.assertEqual([None], ui.main_menu_list)
def testMouseClickOnEnabledMenuItemWorks(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -m\n"),
# A click on the enabled menu item.
[curses.KEY_MOUSE, 3, 1],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(2, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
self.assertEqual(["bar"] * 60, ui.unwrapped_outputs[1].lines)
# Check the content of the menu.
self.assertEqual(["| babble again | ahoy | "], ui.main_menu_list[0].lines)
self.assertEqual(1, len(ui.main_menu_list[0].font_attr_segs))
self.assertEqual(1, len(ui.main_menu_list[0].font_attr_segs[0]))
item_annot = ui.main_menu_list[0].font_attr_segs[0][0]
self.assertEqual(2, item_annot[0])
self.assertEqual(14, item_annot[1])
self.assertEqual("babble", item_annot[2][0].content)
self.assertEqual("underline", item_annot[2][1])
# The output from the menu-triggered command does not have a menu.
self.assertIsNone(ui.main_menu_list[1])
def testMouseClickOnDisabledMenuItemTriggersNoCommand(self):
ui = MockCursesUI(
40,
80,
command_sequence=[
string_to_codes("babble -n 10 -m\n"),
# A click on the disabled menu item.
[curses.KEY_MOUSE, 18, 1],
self._EXIT
])
ui.register_command_handler("babble", self._babble, "")
ui.run_ui()
self.assertEqual(1, len(ui.unwrapped_outputs))
self.assertEqual(["bar"] * 10, ui.unwrapped_outputs[0].lines)
if __name__ == "__main__":
googletest.main()
|
|
#!/usr/bin/env python
"""This module has unit tests for the pvl __init__ functions."""
# Copyright 2019, Ross A. Beyer ([email protected])
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest.mock import call, create_autospec, mock_open, patch
from pathlib import Path
import pvl
import pvl.new as pvln
from pvl.collections import PVLModuleNew as PVLModule
from pvl.collections import PVLGroupNew as PVLGroup
from pvl.collections import PVLObjectNew as PVLObject
data_dir = Path("tests/data")
class TestLoadS(unittest.TestCase):
def test_loads(self):
some_pvl = """
a = b
GROUP = c
c = d
END_GROUP
e =false
END"""
decoded = PVLModule(a="b", c=PVLGroup(c="d"), e=False)
self.assertEqual(decoded, pvln.loads(some_pvl))
self.assertEqual(PVLModule(a="b"), pvln.loads("a=b"))
class TestLoad(unittest.TestCase):
def setUp(self):
self.simple = data_dir / "pds3" / "simple_image_1.lbl"
rawurl = "https://raw.githubusercontent.com/planetarypy/pvl/main/"
self.url = rawurl + str(self.simple)
self.simplePVL = PVLModule(
{
"PDS_VERSION_ID": "PDS3",
"RECORD_TYPE": "FIXED_LENGTH",
"RECORD_BYTES": 824,
"LABEL_RECORDS": 1,
"FILE_RECORDS": 601,
"^IMAGE": 2,
"IMAGE": PVLObject(
{
"LINES": 600,
"LINE_SAMPLES": 824,
"SAMPLE_TYPE": "MSB_INTEGER",
"SAMPLE_BITS": 8,
"MEAN": 51.67785396440129,
"MEDIAN": 50.0,
"MINIMUM": 0,
"MAXIMUM": 255,
"STANDARD_DEVIATION": 16.97019,
"CHECKSUM": 25549531,
}
),
}
)
def test_load_w_open(self):
with open(self.simple) as f:
self.assertEqual(self.simplePVL, pvln.load(f))
def test_load_w_Path(self):
self.assertEqual(self.simplePVL, pvln.load(self.simple))
def test_load_w_string_path(self):
string_path = str(self.simple)
self.assertEqual(self.simplePVL, pvln.load(string_path))
def test_loadu(self):
self.assertEqual(self.simplePVL, pvln.loadu(self.url))
self.assertEqual(
self.simplePVL, pvln.loadu(self.simple.resolve().as_uri())
)
@patch("pvl.new.loads")
@patch("pvl.new.decode_by_char")
def test_loadu_args(self, m_decode, m_loads):
pvln.loadu(self.url, data=None)
pvln.loadu(self.url, noturlopen="should be passed to loads")
m_decode.assert_called()
self.assertNotIn("data", m_loads.call_args_list[0][1])
self.assertIn("noturlopen", m_loads.call_args_list[1][1])
def test_load_w_quantity(self):
try:
from astropy import units as u
from pvl.decoder import OmniDecoder
pvl_file = "tests/data/pds3/units1.lbl"
km_upper = u.def_unit("KM", u.km)
m_upper = u.def_unit("M", u.m)
u.add_enabled_units([km_upper, m_upper])
label = pvln.load(
pvl_file, decoder=OmniDecoder(quantity_cls=u.Quantity)
)
self.assertEqual(label["FLOAT_UNIT"], u.Quantity(0.414, "KM"))
except ImportError:
pass
class TestISIScub(unittest.TestCase):
def setUp(self):
self.cub = data_dir / "pattern.cub"
self.cubpvl = PVLModule(
IsisCube=PVLObject(
Core=PVLObject(
StartByte=65537,
Format="Tile",
TileSamples=128,
TileLines=128,
Dimensions=PVLGroup(Samples=90, Lines=90, Bands=1),
Pixels=PVLGroup(
Type="Real", ByteOrder="Lsb", Base=0.0, Multiplier=1.0
),
)
),
Label=PVLObject(Bytes=65536),
)
def test_load_cub(self):
self.assertEqual(self.cubpvl, pvln.load(self.cub))
def test_load_cub_opened(self):
with open(self.cub, "rb") as f:
self.assertEqual(self.cubpvl, pvln.load(f))
class TestDumpS(unittest.TestCase):
def setUp(self):
self.module = PVLModule(
a="b",
staygroup=PVLGroup(c="d"),
obj=PVLGroup(d="e", f=PVLGroup(g="h")),
)
def test_dumps_PDS(self):
s = """A = b\r
GROUP = staygroup\r
C = d\r
END_GROUP = staygroup\r
OBJECT = obj\r
D = e\r
GROUP = f\r
G = h\r
END_GROUP = f\r
END_OBJECT = obj\r
END\r\n"""
self.assertEqual(s, pvln.dumps(self.module))
def test_dumps_PVL(self):
s = """a = b;
BEGIN_GROUP = staygroup;
c = d;
END_GROUP = staygroup;
BEGIN_GROUP = obj;
d = e;
BEGIN_GROUP = f;
g = h;
END_GROUP = f;
END_GROUP = obj;
END;"""
self.assertEqual(
s, pvln.dumps(
self.module,
encoder=pvl.encoder.PVLEncoder(
group_class=PVLGroup, object_class=PVLObject
)
)
)
def test_dumps_ODL(self):
s = """A = b\r
GROUP = staygroup\r
C = d\r
END_GROUP = staygroup\r
GROUP = obj\r
D = e\r
GROUP = f\r
G = h\r
END_GROUP = f\r
END_GROUP = obj\r
END\r\n"""
self.assertEqual(
s, pvln.dumps(self.module, encoder=pvl.encoder.ODLEncoder(
group_class=PVLGroup, object_class=PVLObject
))
)
class TestDump(unittest.TestCase):
def setUp(self):
self.module = PVLModule(
a="b",
staygroup=PVLGroup(c="d"),
obj=PVLGroup(d="e", f=PVLGroup(g="h")),
)
self.string = """A = b\r
GROUP = staygroup\r
C = d\r
END_GROUP = staygroup\r
OBJECT = obj\r
D = e\r
GROUP = f\r
G = h\r
END_GROUP = f\r
END_OBJECT = obj\r
END\r\n"""
def test_dump_Path(self):
mock_path = create_autospec(Path)
with patch("pvl.new.Path", autospec=True, return_value=mock_path):
pvln.dump(self.module, Path("dummy"))
self.assertEqual(
[call.write_text(self.string)], mock_path.method_calls
)
@patch("builtins.open", mock_open())
def test_dump_file_object(self):
with open("dummy", "w") as f:
pvln.dump(self.module, f)
self.assertEqual(
[call.write(self.string.encode())], f.method_calls
)
def test_not_dumpable(self):
f = 5
self.assertRaises(TypeError, pvln.dump, self.module, f)
|
|
# -*- coding: utf-8 -*-
## Author : [email protected]
## This code for T110, ELT CO2 sensor
## Please see details for the CO2 sensor data sheet : http://eltsensor.co.kr/2012/eng/pdf/T-110/DS_T-110-3V_ver1.210.pdf
import serial,os,time
import sys
import RPi.GPIO as GPIO
import logging
import logging.handlers
import json
import requests
import fcntl, socket, struct
DEBUG_PRINT = 1
SERIAL_READ_BYTE = 12
FILEMAXBYTE = 1024 * 1024 * 100 #100MB
LOG_PATH = '/home/pi/log_tos.log'
# important, sensorname shuould be pre-defined, unique sensorname
sensorname = "co2.ws"
url = "http://xxx.xxx.xxx.xxx/api/put"
def getHwAddr(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15]))
return ':'.join(['%02x' %ord(char) for char in info[18:24]])
macAddr = getHwAddr('eth0')
macAddr = macAddr.replace(':','.')
level = 0
ppm = 0
def led0On():
GPIO.output(18, True)
def led1On():
GPIO.output(23, True)
def led2On():
GPIO.output(24, True)
def led3On():
GPIO.output(25, True)
def led0Off():
GPIO.output(18, False)
def led1Off():
GPIO.output(23, False)
def led2Off():
GPIO.output(24, False)
def led3Off():
GPIO.output(25, False)
def ledAllOff():
led0Off()
led1Off()
led2Off()
led3Off()
def ledAllOn():
led0On()
led1On()
led2On()
led3On()
def rled0On():
led0Off()
def rled1On():
led1Off()
def rled2On():
led2Off()
def rled3On():
led3Off()
def rled0Off():
led0On()
def rled1Off():
led1On()
def rled2Off():
led2On()
def rled3Off():
led3On()
def rledAllOff():
ledAllOn()
def rledAllOn():
ledAllOff()
def rled0Blink():
led0On()
time.sleep(0.5)
led0Off()
time.sleep(0.3)
led0On()
time.sleep(0.5)
led0Off()
time.sleep(0.3)
led0On()
def rled1Blink():
led1On()
time.sleep(0.5)
led1Off()
time.sleep(0.3)
led1On()
time.sleep(0.5)
led1Off()
time.sleep(0.3)
led1On()
def rled2Blink():
led2On()
time.sleep(0.5)
led2Off()
time.sleep(0.3)
led2On()
time.sleep(0.5)
led2Off()
time.sleep(0.3)
led2On()
def rled3Blink():
led3On()
time.sleep(0.5)
led3Off()
time.sleep(0.3)
led3On()
time.sleep(0.5)
led3Off()
time.sleep(0.3)
led3On()
# check length, alignment of incoming packet string
def syncfind():
index = 0
alignment = 0
while 1:
in_byte = serial_in_device.read(1)
# packet[8] should be 'm'
# end of packet is packet[10]
if in_byte is 'm' :
#print 'idx =', index, in_byte
alignment = 8
if alignment is 10 :
alignment = 1
index = 0
break
elif alignment > 0 :
alignment += 1
index += 1
def checkAlignment(incoming):
idxNum = incoming.find('m')
# idxNum is 9, correct
offset = idxNum - 9
if offset > 0 :
new_str = incoming[offset:]
new_str = new_str + incoming[:offset]
if offset < 0 :
offset = 12 + offset
new_str = incoming[offset:]
new_str = new_str + incoming[:offset]
return new_str
def init_process():
print " "
print "MSG - [S100, T110 CO2 Sensor Driver on RASPI2, Please check log file : ", LOG_PATH
print "MSG - now starting to read SERIAL PORT"
print " "
# HW setup, GPIO
GPIO.setwarnings(False)
GPIO.cleanup()
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT)
GPIO.setup(23, GPIO.OUT)
GPIO.setup(24, GPIO.OUT)
GPIO.setup(25, GPIO.OUT)
logger.info(' *start* GPIO all set, trying to open serial port, SW starting ')
rledAllOn()
######################################################################
# START Here. Main
######################################################################
# set logger file
logger = logging.getLogger(sensorname)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fileHandler = logging.handlers.RotatingFileHandler(LOG_PATH, maxBytes=FILEMAXBYTE,backupCount=10)
fileHandler.setLevel(logging.DEBUG)
fileHandler.setFormatter(formatter)
logger.addHandler(fileHandler)
#consoleHandler = logging.StreamHandler()
#consoleHandler.setLevel(logging.DEBUG)
#consoleHandler.setFormatter(formatter)
#logger.addHandler(consoleHandler)
# call raspi init...
init_process()
# open RASPI serial device, 38400
try:
serial_in_device = serial.Serial('/dev/ttyAMA0',38400)
except serial.SerialException, e:
logger.error("Serial port open error")
rled0Off()
rled1Off()
rled2Off()
rled3Off()
while True:
ppm = 0
try:
in_byte = serial_in_device.read(SERIAL_READ_BYTE)
pos = 0
except serial.SerialException, e:
rled0Off()
rled1Off()
rled2Off()
rled3Off()
if not (len(in_byte) is SERIAL_READ_BYTE) :
logger.error("Serial packet size is strange, %d, expected size is %d" % (len(in_byte),SERIAL_READ_BYTE))
print 'serial byte read count error'
continue
# sometimes, 12 byte alighn is in-correct
# espacially run on /etc/rc.local
if not in_byte[9] is 'm':
shift_byte = checkAlignment(in_byte)
in_byte = shift_byte
if ('ppm' in in_byte):
if DEBUG_PRINT :
print '-----\/---------\/------ DEBUG_PRINT set -----\/---------\/------ '
for byte in in_byte :
print "serial_in_byte[%d]: " %pos,
pos += 1
if ord(byte) is 0x0d :
print "escape:", '0x0d'," Hex: ", byte.encode('hex')
continue
elif ord(byte) is 0x0a :
print "escape:", '0x0a'," Hex: ", byte.encode('hex')
continue
print " String:", byte, " Hex: ", byte.encode('hex')
if not (in_byte[2] is ' ') :
ppm += (int(in_byte[2])) * 1000
if not (in_byte[3] is ' ') :
ppm += (int(in_byte[3])) * 100
if not (in_byte[4] is ' ') :
ppm += (int(in_byte[4])) * 10
if not (in_byte[5] is ' ') :
ppm += (int(in_byte[5]))
logline = sensorname + ' CO2 Level is '+ str(ppm) + ' ppm'
#now = time.localtime()
#now_str = "%04d-%02d-%02d %02d:%02d:%02d" % (now.tm_year, now.tm_mon, now.tm_mday, now.tm_hour, now.tm_min, now.tm_sec)
#logline += now_str
if DEBUG_PRINT :
print logline
if ppm > 2500 :
logger.error("%s", logline)
continue
else :
logger.info("%s", logline)
data = {
"metric": "rc1.co2.ppm",
"timestamp": time.time(),
"value": ppm,
"tags": {
"eth0": macAddr,
"stalk": "VOLOSSH" ,
"sensor" : "co2.t110",
"name" : sensorname,
"floor_room": "10fl_min_room",
"building": "woosung",
"owner": "kang",
"country": "kor"
}
#tags should be less than 9, 8 is alright, 9 returns http error
}
try :
ret = requests.post(url, data=json.dumps(data))
logger.info("http ret %s", ret)
if DEBUG_PRINT :
print "http return : %s" %ret
except requests.exceptions.Timeout :
logger.error("http connection error, Timeout %s", ret)
continue
except requests.exceptions.ConnectionError :
logger.error("http connection error, Too many requests %s")
continue
#except requests.exceptions :
# print " --------------"
# continue
# level = 1, 0~1000 ppm, no- LED
# level = 2, 1000~1150 ppm, 1 - LED
# level = 3, 1150~1300 ppm, 2 - LED
# level = 4, 1300~1700 ppm, 3 - LED
# level = 5, 1750~ ppm, 4 - LED
if ppm < 800 :
rled0Blink()
rled0Blink()
rled0Blink()
led1Off()
led2Off()
led3Off()
elif ppm < 1000 :
led0On()
led1Off()
led2Off()
led3Off()
elif ppm < 1300 :
led0Off()
led1On()
led2Off()
led3Off()
elif ppm < 1600:
led0Off()
led1Off()
led2On()
led3Off()
elif ppm < 1900:
led0Off()
led1Off()
rled2Blink()
rled2Blink()
rled2Blink()
led3Off()
elif ppm >= 1900 :
rled0Blink()
rled0Blink()
rled0Blink()
rled1Blink()
rled1Blink()
rled1Blink()
rled2Blink()
rled2Blink()
rled2Blink()
rled3Blink()
rled3Blink()
rled3Blink()
GPIO.cleanup()
|
|
from flask import render_template, redirect, url_for, abort, flash, request,\
current_app, make_response
from flask_login import login_required, current_user
from flask_sqlalchemy import get_debug_queries
from . import main
from .forms import EditProfileForm, EditProfileAdminForm, PostForm,\
CommentForm
from .. import db
from ..models import Permission, Role, User, Post, Comment
from ..decorators import admin_required, permission_required
@main.after_app_request
def after_request(response):
for query in get_debug_queries():
if query.duration >= current_app.config['FLASKY_SLOW_DB_QUERY_TIME']:
current_app.logger.warning(
'Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n'
% (query.statement, query.parameters, query.duration,
query.context))
return response
@main.route('/shutdown')
def server_shutdown():
if not current_app.testing:
abort(404)
shutdown = request.environ.get('werkzeug.server.shutdown')
if not shutdown:
abort(500)
shutdown()
return 'Shutting down...'
@main.route('/', methods=['GET', 'POST'])
def index():
form = PostForm()
if current_user.can(Permission.WRITE_ARTICLES) and \
form.validate_on_submit():
post = Post(body=form.body.data,
author=current_user._get_current_object())
db.session.add(post)
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
show_followed = False
if current_user.is_authenticated:
show_followed = bool(request.cookies.get('show_followed', ''))
if show_followed:
query = current_user.followed_posts
else:
query = Post.query
pagination = query.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('index.html', form=form, posts=posts,
show_followed=show_followed, pagination=pagination)
@main.route('/about', methods=['GET', 'POST'])
def about():
return render_template('about.html')
@main.route('/case', methods=['GET', 'POST'])
def case():
return render_template('case.html')
@main.route('/user/<username>')
def user(username):
user = User.query.filter_by(username=username).first_or_404()
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
return render_template('user.html', user=user, posts=posts,
pagination=pagination)
@main.route('/edit-profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.name = form.name.data
current_user.location = form.location.data
current_user.about_me = form.about_me.data
db.session.add(current_user)
flash('Your profile has been updated.')
return redirect(url_for('.user', username=current_user.username))
form.name.data = current_user.name
form.location.data = current_user.location
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', form=form)
@main.route('/edit-profile/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_profile_admin(id):
user = User.query.get_or_404(id)
form = EditProfileAdminForm(user=user)
if form.validate_on_submit():
user.email = form.email.data
user.username = form.username.data
user.confirmed = form.confirmed.data
user.role = Role.query.get(form.role.data)
user.name = form.name.data
user.location = form.location.data
user.about_me = form.about_me.data
db.session.add(user)
flash('The profile has been updated.')
return redirect(url_for('.user', username=user.username))
form.email.data = user.email
form.username.data = user.username
form.confirmed.data = user.confirmed
form.role.data = user.role_id
form.name.data = user.name
form.location.data = user.location
form.about_me.data = user.about_me
return render_template('edit_profile.html', form=form, user=user)
@main.route('/post/<int:id>', methods=['GET', 'POST'])
def post(id):
post = Post.query.get_or_404(id)
form = CommentForm()
if form.validate_on_submit():
comment = Comment(body=form.body.data,
post=post,
author=current_user._get_current_object())
db.session.add(comment)
flash('Your comment has been published.')
return redirect(url_for('.post', id=post.id, page=-1))
page = request.args.get('page', 1, type=int)
if page == -1:
page = (post.comments.count() - 1) // \
current_app.config['FLASKY_COMMENTS_PER_PAGE'] + 1
pagination = post.comments.order_by(Comment.timestamp.asc()).paginate(
page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
return render_template('post.html', posts=[post], form=form,
comments=comments, pagination=pagination)
@main.route('/edit/<int:id>', methods=['GET', 'POST'])
@login_required
def edit(id):
post = Post.query.get_or_404(id)
if current_user != post.author and \
not current_user.can(Permission.ADMINISTER):
abort(403)
form = PostForm()
if form.validate_on_submit():
post.body = form.body.data
db.session.add(post)
flash('The post has been updated.')
return redirect(url_for('.post', id=post.id))
form.body.data = post.body
return render_template('edit_post.html', form=form)
@main.route('/delete/<int:id>', methods=['GET', 'POST'])
@login_required
def delete(id):
post = Post.query.get_or_404(id)
if current_user != post.author and \
not current_user.can(Permission.ADMINISTER):
abort(404)
db.session.delete(post)
flash('The post has been updated.')
return redirect(url_for('.index'))
@main.route('/follow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
if current_user.is_following(user):
flash('You are already following this user.')
return redirect(url_for('.user', username=username))
current_user.follow(user)
flash('You are now following %s.' % username)
return redirect(url_for('.user', username=username))
@main.route('/unfollow/<username>')
@login_required
@permission_required(Permission.FOLLOW)
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
if not current_user.is_following(user):
flash('You are not following this user.')
return redirect(url_for('.user', username=username))
current_user.unfollow(user)
flash('You are not following %s anymore.' % username)
return redirect(url_for('.user', username=username))
@main.route('/followers/<username>')
def followers(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followers.paginate(
page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{'user': item.follower, 'timestamp': item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, title="Followers of",
endpoint='.followers', pagination=pagination,
follows=follows)
@main.route('/followed-by/<username>')
def followed_by(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash('Invalid user.')
return redirect(url_for('.index'))
page = request.args.get('page', 1, type=int)
pagination = user.followed.paginate(
page, per_page=current_app.config['FLASKY_FOLLOWERS_PER_PAGE'],
error_out=False)
follows = [{'user': item.followed, 'timestamp': item.timestamp}
for item in pagination.items]
return render_template('followers.html', user=user, title="Followed by",
endpoint='.followed_by', pagination=pagination,
follows=follows)
@main.route('/all')
@login_required
def show_all():
resp = make_response(redirect(url_for('.index')))
resp.set_cookie('show_followed', '', max_age=30*24*60*60)
return resp
@main.route('/followed')
@login_required
def show_followed():
resp = make_response(redirect(url_for('.index')))
resp.set_cookie('show_followed', '1', max_age=30*24*60*60)
return resp
@main.route('/moderate')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def moderate():
page = request.args.get('page', 1, type=int)
pagination = Comment.query.order_by(Comment.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_COMMENTS_PER_PAGE'],
error_out=False)
comments = pagination.items
return render_template('moderate.html', comments=comments,
pagination=pagination, page=page)
@main.route('/moderate/enable/<int:id>')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def moderate_enable(id):
comment = Comment.query.get_or_404(id)
comment.disabled = False
db.session.add(comment)
return redirect(url_for('.moderate',
page=request.args.get('page', 1, type=int)))
@main.route('/moderate/disable/<int:id>')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def moderate_disable(id):
comment = Comment.query.get_or_404(id)
comment.disabled = True
db.session.add(comment)
return redirect(url_for('.moderate',
page=request.args.get('page', 1, type=int)))
|
|
# /test/test_acceptance.py
#
# Test case for psqtraviscontainer/create.py, creating proot containers
#
# See /LICENCE.md for Copyright information
"""Test case for psqtraviscontainer/create.py, creating proot containers."""
import os
import platform
import stat
import sys
import tempfile
from collections import namedtuple
from contextlib import contextmanager
from test.testutil import (download_file_cached,
temporary_environment)
from nose_parameterized import parameterized
from psqtraviscontainer import architecture
from psqtraviscontainer import create
from psqtraviscontainer import use
from psqtraviscontainer.architecture import Alias
from psqtraviscontainer.constants import have_proot_distribution
from psqtraviscontainer.constants import proot_distribution_dir
from psqtraviscontainer.distro import available_distributions
from psqtraviscontainer.linux_container import get_dir_for_distro
import tempdir
from testtools import ExpectedException
from testtools import TestCase
from testtools.matchers import DirExists
from testtools.matchers import FileExists
def _convert_to_switch_args(kwargs):
"""Convert keyword arguments to command line switches."""
arguments = []
def _get_representation(value):
"""Get representation of value as a list."""
if isinstance(value, list):
return " ".join(value)
else:
return str(value)
for key, value in kwargs.items():
if not isinstance(value, bool) or value:
arguments.append("--{0}".format(key))
if not isinstance(value, bool):
arguments.append(_get_representation(value))
return arguments
class SafeTempDir(object): # pylint:disable=R0903
"""A TempDir that dissolves on __exit__, ignoring PermissionError."""
def __init__(self):
"""Forward initialization."""
super(SafeTempDir, self).__init__()
self._temp_dir = tempdir.TempDir()
def __enter__(self):
"""Return internal tempdir."""
return self._temp_dir.__enter__()
def __exit__(self, exc_type, value, traceback):
"""Call dissolve."""
del exc_type
del value
del traceback
self.dissolve()
def dissolve(self):
"""Forward to TempDir dissolve function, ignore PermissionError."""
try:
self._temp_dir.dissolve()
except (IOError, OSError): # suppress(pointless-except)
# IOError and OSError are fine. The directory will be deleted by
# the user's operating system a little later, there's not much we
# can do about this.
pass
@property
def name(self):
"""Getter for 'name'."""
return self._temp_dir.name
def run_create_container_on_dir(directory, *args, **kwargs):
"""Run main setting the container to be at directory."""
del args
arguments = [directory] + _convert_to_switch_args(kwargs)
with cached_downloads():
create.main(arguments=arguments)
def run_create_container(**kwargs):
"""Run main() and returns the container in a TempDir.
This houses the container created. Keyword args are converted
into switch arguments as appropriate.
"""
temp_dir = SafeTempDir()
run_create_container_on_dir(temp_dir.name, **kwargs)
return temp_dir
def default_create_container_arguments(local=True):
"""Get set of arguments which would create first known distribution."""
distro_config = list(available_distributions())[0]
arguments = ("distro", "release")
config = {k: v for k, v in distro_config.items() if k in arguments}
# We must force local to true here so that it gets passed to
# the underlying container
config["local"] = local
return config
def run_create_default_container(local=True):
"""Run main() and return container for first known distribution."""
return run_create_container(**(default_create_container_arguments(local)))
def run_use_container_on_dir(directory, **kwargs):
"""Run main() from psqtraviscontainer/use.py and return status code."""
cmd = kwargs["cmd"]
del kwargs["cmd"]
arguments = [directory] + _convert_to_switch_args(kwargs) + ["--"] + cmd
return use.main(arguments=arguments)
def test_case_requiring_platform(system):
"""Get a TestCase base class which can only be run on platform."""
class TestCaseRequiring(TestCase):
"""A wrapper around TestCase which only runs tests on platform."""
def setUp(self): # suppress(N802)
"""Automatically skips tests if not run on platform."""
super(TestCaseRequiring, self).setUp()
if platform.system() != system:
self.skipTest("""not running on system - {0}""".format(system))
return TestCaseRequiring
class TestCreateProot(test_case_requiring_platform("Linux")):
"""A test case for proot creation basics."""
def setUp(self):
"""Set up the test case and check that we can run it."""
if os.environ.get("TRAVIS", False):
self.skipTest("""Cannot run proot on travis-ci""")
super(TestCreateProot, self).setUp()
def test_create_proot_distro(self):
"""Check that we create a proot distro."""
with run_create_default_container(local=False) as container:
self.assertThat(have_proot_distribution(container),
FileExists())
def test_use_existing_proot_distro(self):
"""Check that we re-use an existing proot distro.
In that case, the timestamp for /.have-proot-distribution and
make sure that across two runs they are actual. If they were,
then no re-downloading took place.
"""
with run_create_default_container(local=False) as container:
path_to_proot_stamp = have_proot_distribution(container)
first_timestamp = os.stat(path_to_proot_stamp).st_mtime
config = default_create_container_arguments(local=False)
run_create_container_on_dir(container, **config)
second_timestamp = os.stat(path_to_proot_stamp).st_mtime
self.assertEqual(first_timestamp, second_timestamp)
@contextmanager
def cached_downloads():
"""Context manager to ensure that download_file is patched to use cache."""
import six
import psqtraviscontainer.download # suppress(PYC50)
original_download_file = psqtraviscontainer.download.download_file
psqtraviscontainer.download.download_file = download_file_cached
original_stdout = sys.stdout
original_stderr = sys.stderr
if not os.environ.get("_POLYSQUARE_TRAVIS_CONTAINER_TEST_SHOW_OUTPUT",
None):
sys.stdout = six.StringIO()
sys.stderr = six.StringIO()
try:
yield
finally:
psqtraviscontainer.download.download_file = original_download_file
sys.stdout = original_stdout
sys.stderr = original_stderr
def make_container_inspection_test_case(**create_container_kwargs):
"""Make a TestCase which persists a container until test are complete.
create_container_kwargs is stored and applied to creating the container -
this allows us to switch between proot-based and non-proot containers.
"""
class ContainerInspectionTestCase(TestCase):
"""TestCase where container persists until all tests have completed.
No modifications should be made to the container during any
individual test. The order of tests should not be relied upon.
"""
container_temp_dir = None
def __init__(self, *args, **kwargs):
"""Initialize class."""
cls = ContainerInspectionTestCase
super(cls, self).__init__(*args, **kwargs)
self.container_dir = None
def setUp(self): # suppress(N802)
"""Set up container dir."""
super(ContainerInspectionTestCase, self).setUp()
self.container_dir = self.__class__.container_temp_dir.name
@classmethod
def create_container(cls, **kwargs):
"""Overridable method to create a container for this test case."""
cls.container_temp_dir = run_create_container(**kwargs)
# Suppress flake8 complaints about uppercase characters in function
# names, these functions are overloaded
@classmethod
def setUpClass(cls): # suppress(N802)
"""Set up container for all tests in this test case."""
# Detect if we're about to create a non-local container on an
# environment that doesn't support it
if (create_container_kwargs.get("local", None) is False and
os.environ.get("TRAVIS", None)):
return
with temporary_environment(_FORCE_DOWNLOAD_QEMU="True"):
apply_kwargs = create_container_kwargs
config = default_create_container_arguments(**apply_kwargs)
cls.create_container(**config)
@classmethod
def tearDownClass(cls): # suppress(N802)
"""Dissolve container for all tests in this test case."""
if cls.container_temp_dir:
cls.container_temp_dir.dissolve()
cls.container_temp_dir = None
return ContainerInspectionTestCase
QEMU_ARCHITECTURES = [
"arm",
"i386",
"ppc",
"x86_64"
]
def _format_arch(func, num, params):
"""Format docstring for TestProotDistribution parameterized tests."""
del num
return func.__doc__.format(arch=params[0][0])
class TestProotDistribution(make_container_inspection_test_case(local=False)):
"""Tests to inspect a proot distribution itself."""
def setUp(self): # suppress(N802)
"""Set up TestProotDistribution."""
if platform.system() != "Linux":
self.skipTest("""proot is only available on linux""")
if os.environ.get("TRAVIS", False):
self.skipTest("""Cannot run proot on travis-ci""")
super(TestProotDistribution, self).setUp()
def test_has_proot_dir(self):
"""Check that we have a proot directory in our distribution."""
self.assertThat(proot_distribution_dir(self.container_dir),
DirExists())
def test_has_proot_executable(self):
"""Check that we have a proot executable in our distribution."""
cont = proot_distribution_dir(self.container_dir)
self.assertThat(os.path.join(cont, "bin/proot"),
FileExists())
def test_proot_binary_is_executable(self):
"""Check that that the proot binary is executable."""
cont = proot_distribution_dir(self.container_dir)
proot_binary = os.path.join(cont, "bin/proot")
stat_result = os.stat(proot_binary)
executable_mask = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
self.assertTrue(stat_result.st_mode & executable_mask != 0)
@parameterized.expand(QEMU_ARCHITECTURES, testcase_func_doc=_format_arch)
def test_has_qemu_executables(self, arch):
"""Check that we have a qemu executable qemu-{arch}."""
cont = proot_distribution_dir(self.container_dir)
self.assertThat(os.path.join(cont, "bin/qemu-{}".format(arch)),
FileExists())
@parameterized.expand(QEMU_ARCHITECTURES, testcase_func_doc=_format_arch)
def test_qemu_binary_is_executable(self, arch):
"""Check that qemu binary qemu-{arch} is executable."""
cont = proot_distribution_dir(self.container_dir)
proot_binary = os.path.join(cont, "bin/qemu-{}".format(arch))
stat_result = os.stat(proot_binary)
executable_mask = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
self.assertTrue(stat_result.st_mode & executable_mask != 0)
def exec_for_returncode(*argv):
"""Execute command for its return code.
Check that use.main() returns exit code of subprocess.
"""
config = default_create_container_arguments()
with run_create_container(**config) as cont:
use_config = config.copy()
use_config["cmd"] = list(argv)
return run_use_container_on_dir(cont,
**use_config)
PLATFORM_PROGRAM_MAPPINGS = {
"Linux": {
"0": ["true"],
"1": ["false"]
},
"Darwin": {
"0": ["true"],
"1": ["false"]
},
"Windows": {
"0": ["python", "-c", "import sys;sys.exit(0);"],
"1": ["python", "-c", "import sys;sys.exit(1);"]
}
}
class TestExecInContainer(TestCase):
"""A test case for executing things inside a container."""
def test_exec_fail_no_distro(self): # suppress(no-self-use)
"""Check that use.main() fails where there is no distro."""
with SafeTempDir() as container_dir:
with ExpectedException(RuntimeError):
cmd = PLATFORM_PROGRAM_MAPPINGS[platform.system()]["0"]
run_use_container_on_dir(container_dir, cmd=cmd)
def test_exec_success_where_distro(self): # suppress(no-self-use)
"""Check that use.main() succeeds where there is a distro."""
# This will test that we can use "use.main()" without needing
# to specify a distro configuration
with run_create_default_container() as container_dir:
cmd = PLATFORM_PROGRAM_MAPPINGS[platform.system()]["0"]
self.assertEqual(run_use_container_on_dir(container_dir,
cmd=cmd), 0)
def test_exec_return_zero(self):
"""Check that use.main() returns true exit code of subprocess."""
cmd = PLATFORM_PROGRAM_MAPPINGS[platform.system()]["0"]
self.assertEqual(exec_for_returncode(*cmd), 0)
def test_exec_return_one(self):
"""Check that use.main() returns false exit code of subprocess."""
cmd = PLATFORM_PROGRAM_MAPPINGS[platform.system()]["1"]
self.assertEqual(exec_for_returncode(*cmd), 1)
ARCHITECTURE_LIBDIR_MAPPINGS = {
"armhf": "arm-linux-gnueabihf",
"i386": "i386-linux-gnu",
"amd64": "x86_64-linux-gnu",
"arm64": "arm64-linux-gnu",
"powerpc": "powerpc-linux-gnu",
"ppc64el": "ppc64el-linux-gnu"
}
class InstallationConfig(object): # pylint:disable=R0903
"""Manages configuration files."""
def __init__(self, packages, repos):
"""Create temporary files for packages and repos."""
packages_fd, self.packages_path = tempfile.mkstemp()
repos_fd, self.repos_path = tempfile.mkstemp()
packages_file = os.fdopen(packages_fd, "a")
repos_file = os.fdopen(repos_fd, "a")
for package in packages:
packages_file.write("{0}\n".format(package))
for repository in repos:
repos_file.write("{0}\n".format(repository))
packages_file.close()
repos_file.close()
def __enter__(self):
"""Use as context manager."""
return self
def __exit__(self, exc_type, value, traceback):
"""Destroy temporary files."""
del exc_type
del value
del traceback
os.remove(self.packages_path)
os.remove(self.repos_path)
def _create_distro_test(test_name, # pylint:disable=R0913
config,
repos,
packages,
test_files,
**kwargs):
"""Create a TemplateDistroTest class."""
class TemplateDistroTest(make_container_inspection_test_case()):
"""Template for checking a distro proot."""
def __init__(self, *args, **kwargs):
"""Initialize members used by this class."""
cls = TemplateDistroTest
super(cls, self).__init__(*args, **kwargs)
self.path_to_distro_root = None
def setUp(self): # suppress(N802)
"""Set up path to distro root."""
super(TemplateDistroTest, self).setUp()
@classmethod
def setUpClass(cls): # suppress(N802)
"""Create a container for all uses of this TemplateDistroTest."""
with InstallationConfig(packages, repos) as command_config:
keys = ("distro", "release")
kwargs.update({k: v for k, v in config.items() if k in keys})
cls.create_container(repos=command_config.repos_path,
packages=command_config.packages_path,
**kwargs)
def test_distro_folder_exists(self):
"""Check that distro folder exists for ."""
if platform.system() == "Linux":
root = get_dir_for_distro(self.container_dir,
config)
self.assertThat(os.path.join(self.container_dir, root),
DirExists())
elif platform.system() == "Darwin":
self.assertThat(os.path.join(self.container_dir, "bin"),
DirExists())
def test_has_package_installed(self):
"""Check that our testing package got installed.
If it did get installed, then it means that the repository
was successfully added and the package was successfully installed
using the native tool. That means that the proot "works".
"""
format_kwargs = dict()
if kwargs.get("release", None) == "trusty":
self.skipTest("""Trusty images are currently unavailable""")
return
if platform.system() == "Linux":
root = get_dir_for_distro(self.container_dir,
config)
distro_arch = architecture.Alias.debian(kwargs["arch"])
archlib = ARCHITECTURE_LIBDIR_MAPPINGS[distro_arch]
format_kwargs["archlib"] = archlib
else:
root = self.container_dir
# Match against a list of files. If none of the results are None,
# then throw a list of mismatches.
match_results = []
for filename in test_files:
path_to_file = os.path.join(root,
filename.format(**format_kwargs))
result = FileExists().match(path_to_file)
if result:
match_results.append(result)
if len(match_results) == len(test_files):
raise Exception(repr(match_results))
TemplateDistroTest.__name__ = test_name
return TemplateDistroTest
_DistroPackage = namedtuple("_DistroPackage", "package files repo")
_DISTRO_INFO = {
"Ubuntu": _DistroPackage(package="libaacs0",
repo=["{ubuntu} {release} universe"],
files=["usr/lib/{archlib}/libaacs.so.0"]),
"Debian": _DistroPackage(package="libaio1",
repo=[],
files=["lib/libaio.so.1.0.1",
"lib/{archlib}/libaio.so.1.0.1"]),
"Fedora": _DistroPackage(package="libaio",
repo=[],
files=["lib/libaio.so.1.0.1",
"lib64/libaio.so.1.0.1"]),
"OSX": _DistroPackage(package="xz",
repo=[],
files=["bin/xz"]),
"Windows": _DistroPackage(package="cmake.portable",
repo=[],
files=["lib/cmake.portable.3.7.0/"
"tools/cmake-3.7.0-win32-x86/bin/"
"cmake.exe"])
}
def get_distribution_tests():
"""Fetch distribution tests as dictionary."""
tests = {}
for config in available_distributions():
config = config.copy()
name_array = bytearray()
for key in sorted(list(config.keys())):
if key in ("info", "pkgsys", "url"):
continue
name_array += bytes(key[0].upper().encode() +
key[1:].encode() +
config[key][0].upper().encode() +
config[key][1:].encode())
name = "Test{0}".format(name_array.decode("ascii"))
distro = config["distro"]
repositories_to_add = _DISTRO_INFO[distro].repo
packages_to_install = [_DISTRO_INFO[distro].package]
files_to_test_for = _DISTRO_INFO[distro].files
kwargs = dict()
try:
kwargs["arch"] = Alias.universal(config["arch"])
except KeyError: # suppress(pointless-except)
pass
# Set the --local switch if the installation type is local. This is
# because we pass the keyword arguments to the main function of
# psq-travis-container-create
kwargs["local"] = (config.get("installation", None) == "local")
tests[name] = _create_distro_test(name,
config,
repositories_to_add,
packages_to_install,
files_to_test_for,
**kwargs)
return tests
for _name, _test in get_distribution_tests().items():
exec("{0} = _test".format(_name)) # pylint:disable=W0122
del _test
|
|
#
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley with assistance from asn1ate v.0.6.0.
# Modified by Russ Housley to add a map for use with opentypes.
#
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
# Enhanced Security Services for S/MIME
#
# ASN.1 source from:
# https://www.rfc-editor.org/rfc/rfc2634.txt
#
from pyasn1.type import char
from pyasn1.type import constraint
from pyasn1.type import namedval
from pyasn1.type import namedtype
from pyasn1.type import tag
from pyasn1.type import univ
from pyasn1.type import useful
from pyasn1_modules import rfc5652
from pyasn1_modules import rfc5280
MAX = float('inf')
ContentType = rfc5652.ContentType
IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber
SubjectKeyIdentifier = rfc5652.SubjectKeyIdentifier
PolicyInformation = rfc5280.PolicyInformation
GeneralNames = rfc5280.GeneralNames
CertificateSerialNumber = rfc5280.CertificateSerialNumber
# Signing Certificate Attribute
# Warning: It is better to use SigningCertificateV2 from RFC 5035
id_aa_signingCertificate = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.12')
class Hash(univ.OctetString):
pass # SHA-1 hash of entire certificate; RFC 5035 supports other hash algorithms
class IssuerSerial(univ.Sequence):
pass
IssuerSerial.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuer', GeneralNames()),
namedtype.NamedType('serialNumber', CertificateSerialNumber())
)
class ESSCertID(univ.Sequence):
pass
ESSCertID.componentType = namedtype.NamedTypes(
namedtype.NamedType('certHash', Hash()),
namedtype.OptionalNamedType('issuerSerial', IssuerSerial())
)
class SigningCertificate(univ.Sequence):
pass
SigningCertificate.componentType = namedtype.NamedTypes(
namedtype.NamedType('certs', univ.SequenceOf(
componentType=ESSCertID())),
namedtype.OptionalNamedType('policies', univ.SequenceOf(
componentType=PolicyInformation()))
)
# Mail List Expansion History Attribute
id_aa_mlExpandHistory = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.3')
ub_ml_expansion_history = univ.Integer(64)
class EntityIdentifier(univ.Choice):
pass
EntityIdentifier.componentType = namedtype.NamedTypes(
namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier())
)
class MLReceiptPolicy(univ.Choice):
pass
MLReceiptPolicy.componentType = namedtype.NamedTypes(
namedtype.NamedType('none', univ.Null().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('insteadOf', univ.SequenceOf(
componentType=GeneralNames()).subtype(
sizeSpec=constraint.ValueSizeConstraint(1, MAX)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
namedtype.NamedType('inAdditionTo', univ.SequenceOf(
componentType=GeneralNames()).subtype(
sizeSpec=constraint.ValueSizeConstraint(1, MAX)).subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
)
class MLData(univ.Sequence):
pass
MLData.componentType = namedtype.NamedTypes(
namedtype.NamedType('mailListIdentifier', EntityIdentifier()),
namedtype.NamedType('expansionTime', useful.GeneralizedTime()),
namedtype.OptionalNamedType('mlReceiptPolicy', MLReceiptPolicy())
)
class MLExpansionHistory(univ.SequenceOf):
pass
MLExpansionHistory.componentType = MLData()
MLExpansionHistory.sizeSpec = constraint.ValueSizeConstraint(1, ub_ml_expansion_history)
# ESS Security Label Attribute
id_aa_securityLabel = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.2')
ub_privacy_mark_length = univ.Integer(128)
ub_security_categories = univ.Integer(64)
ub_integer_options = univ.Integer(256)
class ESSPrivacyMark(univ.Choice):
pass
ESSPrivacyMark.componentType = namedtype.NamedTypes(
namedtype.NamedType('pString', char.PrintableString().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, ub_privacy_mark_length))),
namedtype.NamedType('utf8String', char.UTF8String().subtype(
subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
)
class SecurityClassification(univ.Integer):
pass
SecurityClassification.subtypeSpec=constraint.ValueRangeConstraint(0, ub_integer_options)
SecurityClassification.namedValues = namedval.NamedValues(
('unmarked', 0),
('unclassified', 1),
('restricted', 2),
('confidential', 3),
('secret', 4),
('top-secret', 5)
)
class SecurityPolicyIdentifier(univ.ObjectIdentifier):
pass
class SecurityCategory(univ.Sequence):
pass
SecurityCategory.componentType = namedtype.NamedTypes(
namedtype.NamedType('type', univ.ObjectIdentifier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('value', univ.Any().subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 1)))
)
class SecurityCategories(univ.SetOf):
pass
SecurityCategories.componentType = SecurityCategory()
SecurityCategories.sizeSpec = constraint.ValueSizeConstraint(1, ub_security_categories)
class ESSSecurityLabel(univ.Set):
pass
ESSSecurityLabel.componentType = namedtype.NamedTypes(
namedtype.NamedType('security-policy-identifier', SecurityPolicyIdentifier()),
namedtype.OptionalNamedType('security-classification', SecurityClassification()),
namedtype.OptionalNamedType('privacy-mark', ESSPrivacyMark()),
namedtype.OptionalNamedType('security-categories', SecurityCategories())
)
# Equivalent Labels Attribute
id_aa_equivalentLabels = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.9')
class EquivalentLabels(univ.SequenceOf):
pass
EquivalentLabels.componentType = ESSSecurityLabel()
# Content Identifier Attribute
id_aa_contentIdentifier = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.7')
class ContentIdentifier(univ.OctetString):
pass
# Content Reference Attribute
id_aa_contentReference = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.10')
class ContentReference(univ.Sequence):
pass
ContentReference.componentType = namedtype.NamedTypes(
namedtype.NamedType('contentType', ContentType()),
namedtype.NamedType('signedContentIdentifier', ContentIdentifier()),
namedtype.NamedType('originatorSignatureValue', univ.OctetString())
)
# Message Signature Digest Attribute
id_aa_msgSigDigest = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.5')
class MsgSigDigest(univ.OctetString):
pass
# Content Hints Attribute
id_aa_contentHint = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.4')
class ContentHints(univ.Sequence):
pass
ContentHints.componentType = namedtype.NamedTypes(
namedtype.OptionalNamedType('contentDescription', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
namedtype.NamedType('contentType', ContentType())
)
# Receipt Request Attribute
class AllOrFirstTier(univ.Integer):
pass
AllOrFirstTier.namedValues = namedval.NamedValues(
('allReceipts', 0),
('firstTierRecipients', 1)
)
class ReceiptsFrom(univ.Choice):
pass
ReceiptsFrom.componentType = namedtype.NamedTypes(
namedtype.NamedType('allOrFirstTier', AllOrFirstTier().subtype(
implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
namedtype.NamedType('receiptList', univ.SequenceOf(
componentType=GeneralNames()).subtype(implicitTag=tag.Tag(
tag.tagClassContext, tag.tagFormatSimple, 1)))
)
id_aa_receiptRequest = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.1')
ub_receiptsTo = univ.Integer(16)
class ReceiptRequest(univ.Sequence):
pass
ReceiptRequest.componentType = namedtype.NamedTypes(
namedtype.NamedType('signedContentIdentifier', ContentIdentifier()),
namedtype.NamedType('receiptsFrom', ReceiptsFrom()),
namedtype.NamedType('receiptsTo', univ.SequenceOf(componentType=GeneralNames()).subtype(sizeSpec=constraint.ValueSizeConstraint(1, ub_receiptsTo)))
)
# Receipt Content Type
class ESSVersion(univ.Integer):
pass
ESSVersion.namedValues = namedval.NamedValues(
('v1', 1)
)
id_ct_receipt = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.1')
class Receipt(univ.Sequence):
pass
Receipt.componentType = namedtype.NamedTypes(
namedtype.NamedType('version', ESSVersion()),
namedtype.NamedType('contentType', ContentType()),
namedtype.NamedType('signedContentIdentifier', ContentIdentifier()),
namedtype.NamedType('originatorSignatureValue', univ.OctetString())
)
# Map of Attribute Type to the Attribute structure is added to the
# ones that are in rfc5652.py
_cmsAttributesMapUpdate = {
id_aa_signingCertificate: SigningCertificate(),
id_aa_mlExpandHistory: MLExpansionHistory(),
id_aa_securityLabel: ESSSecurityLabel(),
id_aa_equivalentLabels: EquivalentLabels(),
id_aa_contentIdentifier: ContentIdentifier(),
id_aa_contentReference: ContentReference(),
id_aa_msgSigDigest: MsgSigDigest(),
id_aa_contentHint: ContentHints(),
id_aa_receiptRequest: ReceiptRequest(),
}
rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
# Map of Content Type OIDs to Content Types is added to the
# ones that are in rfc5652.py
_cmsContentTypesMapUpdate = {
id_ct_receipt: Receipt(),
}
rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
|
|
# -*- coding: utf-8 -*-
"""helper functions that are used both in rtfits and rtplots"""
from itertools import tee, islice
from collections import OrderedDict
try:
import numpy as np
except ModuleNotFoundError:
pass
def rectangularize(array, return_mask=False, dim=None, return_masked=False, dtype=None):
"""
return a rectangularized version of the input-array by repeating the
last value to obtain the smallest possible rectangular shape.
input:
- array = [[1,2,3], [1], [1,2]]
output:
- return_masked=False: [[1,2,3], [1,1,1], [1,2,2]]
- return_masked=True: [[1,2,3], [1,--,--], [1,2,--]]
Parameters
----------
array: list of lists
the input-data that is intended to be rectangularized
return_mask: bool (default = False)
indicator if weights and mask should be evaluated or not
dim: int (default = None)
the dimension of the rectangularized array
if None, the shortest length of all sub-lists will be used
return_masked: bool (default=False)
indicator if a masked-array should be returned
dtype: type (default = None)
the dtype of the returned array. If None, the dtype of the first
element will be used
Returns
-------
new_array: array-like
a rectangularized version of the input-array
mask: array-like (only if 'weights_and_mask' is True)
a mask indicating the added values
"""
# use this method to get the dtype of the first element since it works with
# pandas-Series, lists, arrays, dict-value views, etc.
if dtype is None:
dtype = np.array(next(islice(array, 1))).dtype
if dim is None:
# get longest dimension of sub-arrays
dim = len(max(array, key=len))
if return_mask is True or return_masked is True:
newarray = np.empty((len(array), dim), dtype=dtype)
mask = np.full((len(array), dim), False, dtype=bool)
for i, s in enumerate(array):
le = len(s)
newarray[i, :le] = s
newarray[i, le:] = s[-1]
mask[i, le:] = True
if return_masked is True:
return np.ma.masked_array(newarray, mask)
else:
return [newarray, mask]
else:
newarray = np.empty((len(array), dim), dtype=dtype)
for i, s in enumerate(array):
le = len(s)
newarray[i, :le] = s
newarray[i, le:] = s[-1]
return newarray
def meandatetime(datetimes):
"""
calculate the average date from a given list of datetime-objects
(can be applied to a pandas-Series via Series.apply(meandatetime))
Parameters
----------
datetimes: list
a list of datetime-objects
Returns
-------
meandate: Timestamp
"""
if len(datetimes) == 1:
return datetimes[0]
x = datetimes
deltas = (x[0] - x[1:]) / len(x)
meandelta = sum(deltas)
meandate = x[0] - meandelta
return meandate
def dBsig0convert(val, inc, dB, sig0, fitdB, fitsig0):
"""
A convenience-function to convert an array of measurements (and it's
associated incidence-angles).
- between linear- and dB units `( val_dB = 10 * log10(val_linear) )`
- between sigma0 and intensity `( sig0 = 4 * pi * cos(inc) * I )`
Parameters
----------
val: array-like
the backscatter-values that should be converted
inc: array-like
the associated incidence-angle values (in radians)
dB: bool
indicator if the output-dataset should be in dB or not
sig0: bool
indicator if the output-values should be intensity or sigma_0
fitdB: bool
indicator if the input-values have been provided in linear-units
or in dB
fitsig0: bool
indicator if the input-values are given as sigma0 or intensity
Returns
-------
val : array-like
the converted values
"""
if sig0 is not fitsig0:
# if results are provided in dB convert them to linear units before
# applying the sig0-intensity conversion
if fitdB is True:
val = 10 ** (val / 10.0)
# convert sig0 to intensity
if sig0 is False and fitsig0 is True:
val = val / (4.0 * np.pi * np.cos(inc))
# convert intensity to sig0
if sig0 is True and fitsig0 is False:
val = 4.0 * np.pi * np.cos(inc) * val
# convert back to dB if required
if dB is True:
val = 10.0 * np.log10(val)
elif dB is not fitdB:
# if dB output is required, convert to dB
if dB is True and fitdB is False:
val = 10.0 * np.log10(val)
# if linear output is required, convert to linear units
if dB is False and fitdB is True:
val = 10 ** (val / 10.0)
return val
def pairwise(iterable, pairs=2):
"""
a generator to return n consecutive values from an iterable, e.g.:
pairs = 2
s -> (s0,s1), (s1,s2), (s2, s3), ...
pairs = 3
s -> (s0, s1, s2), (s1, s2, s3), (s2, s3, s4), ...
adapted from https://docs.python.org/3.7/library/itertools.html
"""
x = tee(iterable, pairs)
for n, n_iter in enumerate(x[1:]):
[next(n_iter, None) for i in range(n + 1)]
return zip(*x)
def split_into(iterable, sizes):
"""
a generator that splits the iterable into iterables with the given sizes
see more_itertools split_into for details:
https://more-itertools.readthedocs.io/en/stable/api.html#more_itertools.split_into
"""
it = iter(iterable)
for size in sizes:
if size is None:
yield list(it)
return
else:
yield list(islice(it, size))
def scale(x, out_range=(0, 1), domainfuncs=None):
"""
scale an array between out_range = (min, max) where the range of the
array is evaluated via the domainfuncs (min-function, max-funcion)
the default domainfuncs are:
>>> np.nanmin()
>>> np.nanmax()
>>> from itertools import partial
>>> partial(np.percentile, q=95)
Notice: using functions like np.percentile might result in values that
exceed the specified `out_range`! (e.g. if the out-range is (0,1),
a min-function of np.percentile(q=5) might result in negative values!)
"""
if domainfuncs is None:
domain = np.nanmin(x), np.nanmax(x)
else:
domain = domainfuncs[0](x), domainfuncs[1](x)
y = (x - (domain[1] + domain[0]) / 2) / (domain[1] - domain[0])
return y * (out_range[1] - out_range[0]) + (out_range[1] + out_range[0]) / 2
def update_progress(
progress, max_prog=100, title="", finalmsg=" DONE\r\n", progress2=None
):
"""
print a progress-bar
adapted from: https://blender.stackexchange.com/a/30739
"""
length = 25 # the length of the progress bar
block = int(round(length * progress / max_prog))
if progress2 is not None:
msg = (
f'\r{title} {"#"*block + "-"*(length-block)}'
+ f" {progress} [{progress2}] / {max_prog}"
)
else:
msg = (
f'\r{title} {"#"*block + "-"*(length-block)}' + f" {progress} / {max_prog}"
)
if progress >= max_prog:
msg = f"\r{finalmsg:<79}\n"
return msg
def dt_to_hms(td):
"""
convert a datetime.timedelta object into days, hours,
minutes and seconds
"""
days, hours, minutes = td.days, td.seconds // 3600, td.seconds % 3600 // 60
seconds = td.seconds - hours * 3600 - minutes * 60
return days, hours, minutes, seconds
def groupby_unsorted(a, key=lambda x: x, sort=False, get=lambda x: x):
"""
group the elements of the input-array and return it as a dict with a list
of the found values. optionally use a key- and a get- function.
if sort is True, a OrderedDict with sorted keys will be returned
roughly equivalent to:
>>> # if only the input-array a is provided
... {unique value of a: [found copies of the unique value]}
... # if a and a key-function is provided
... {key(a) : [...values with the same key(a)...]}
... # if both a key- and a get-function is provided
... {key(a) : [get(x) for x in ...values with the same key(a)...]}
"""
# always use an OrderedDict to ensure sort-order for python < 3.6
d = OrderedDict()
for item in a:
d.setdefault(key(item), []).append(get(item))
if sort is True:
return OrderedDict(sorted(d.items()))
else:
return d
def interpolate_to_index(data, index, data_index=None, **interp1d_kwargs):
"""
A wrapper around scipy.interp1d to interpolate a dataset to a given index
Parameters
----------
data : list, array-like, pandas.Series or pandas.DataFrame
The input-data as list, array, pandas.Series or pandas.DataFrame
If the data is provided as pandas Series or DataFrame, the index
must support a method .to_julian_date() to convert the timestamps
into numerical values.
index : array-like
the index to which the dataset should be interpolated.
It must support a method .to_julian_date()
data_index : TYPE, optional
DESCRIPTION. The default is None.
**interp1d_kwargs :
additional keyword-arguments passed to scipy.interpolate.interp1d
the default is (fill_value=None, bounds_error=False)
Returns
-------
TYPE
DESCRIPTION.
"""
from pandas import Series, DataFrame
from scipy.interpolate import interp1d
kwargs = dict(fill_value=None, bounds_error=False)
kwargs.update(interp1d_kwargs)
if isinstance(data, Series):
# perform a linear interpolation to the auxiliary data timestamps
f = interp1d(data.index.to_julian_date(), data.values, **kwargs)
x = f(index.to_julian_date())
return Series(x, index)
elif isinstance(data, DataFrame):
f = interp1d(data.index.to_julian_date(), data.values, axis=0, **kwargs)
x = f(index.to_julian_date())
return DataFrame(x, index, columns=data.columns)
elif isinstance(data, (list, np.ndarray)):
assert data_index is not None, (
'you must provide "data_index"' + "if data is provided as list or array"
)
f = interp1d(data_index.to_julian_date(), data.values, **kwargs)
x = f(index.to_julian_date())
return Series(x, index)
|
|
# encoding: utf-8
import os
import re
import urllib
import urllib2
import uuid
import logging
import hashlib
import tempfile
import locale
import ConfigParser
import mimetypes
import contextlib
from datetime import datetime
from urlparse import urlparse, urljoin
import json
import ccnet
from constance import config
import seaserv
from seaserv import seafile_api
from django.core.urlresolvers import reverse
from django.core.mail import EmailMessage
from django.shortcuts import render_to_response
from django.template import RequestContext, Context, loader
from django.utils.translation import ugettext as _
from django.http import HttpResponseRedirect, HttpResponse, HttpResponseNotModified
from django.utils.http import urlquote
from django.utils.html import escape
from django.views.static import serve as django_static_serve
from seahub.api2.models import Token, TokenV2
import seahub.settings
from seahub.settings import SITE_NAME, MEDIA_URL, LOGO_PATH
try:
from seahub.settings import EVENTS_CONFIG_FILE
except ImportError:
EVENTS_CONFIG_FILE = None
try:
from seahub.settings import EMAIL_HOST
IS_EMAIL_CONFIGURED = True
except ImportError:
IS_EMAIL_CONFIGURED = False
try:
from seahub.settings import CLOUD_MODE
except ImportError:
CLOUD_MODE = False
try:
from seahub.settings import ENABLE_INNER_FILESERVER
except ImportError:
ENABLE_INNER_FILESERVER = True
try:
from seahub.settings import CHECK_SHARE_LINK_TRAFFIC
except ImportError:
CHECK_SHARE_LINK_TRAFFIC = False
def is_cluster_mode():
cfg = ConfigParser.ConfigParser()
if 'SEAFILE_CENTRAL_CONF_DIR' in os.environ:
confdir = os.environ['SEAFILE_CENTRAL_CONF_DIR']
else:
confdir = os.environ['SEAFILE_CONF_DIR']
conf = os.path.join(confdir, 'seafile.conf')
cfg.read(conf)
if cfg.has_option('cluster', 'enabled'):
enabled = cfg.getboolean('cluster', 'enabled')
else:
enabled = False
if enabled:
logging.debug('cluster mode is enabled')
else:
logging.debug('cluster mode is disabled')
return enabled
CLUSTER_MODE = is_cluster_mode()
try:
from seahub.settings import OFFICE_CONVERTOR_ROOT
except ImportError:
OFFICE_CONVERTOR_ROOT = ''
try:
from seahub.settings import OFFICE_CONVERTOR_NODE
except ImportError:
OFFICE_CONVERTOR_NODE = False
from seahub.utils.file_types import *
from seahub.utils.htmldiff import HtmlDiff # used in views/files.py
EMPTY_SHA1 = '0000000000000000000000000000000000000000'
MAX_INT = 2147483647
PREVIEW_FILEEXT = {
TEXT: ('ac', 'am', 'bat', 'c', 'cc', 'cmake', 'cpp', 'cs', 'css', 'diff', 'el', 'h', 'html', 'htm', 'java', 'js', 'json', 'less', 'make', 'org', 'php', 'pl', 'properties', 'py', 'rb', 'scala', 'script', 'sh', 'sql', 'txt', 'text', 'tex', 'vi', 'vim', 'xhtml', 'xml', 'log', 'csv', 'groovy', 'rst', 'patch', 'go'),
IMAGE: ('gif', 'jpeg', 'jpg', 'png', 'ico', 'bmp'),
DOCUMENT: ('doc', 'docx', 'ppt', 'pptx'),
SPREADSHEET: ('xls', 'xlsx', 'ods', 'fods'),
# SVG: ('svg',),
PDF: ('pdf',),
OPENDOCUMENT: ('odt', 'fodt', 'odp', 'fodp'),
MARKDOWN: ('markdown', 'md'),
VIDEO: ('mp4', 'ogv', 'webm', 'flv', 'wmv'),
AUDIO: ('mp3', 'oga', 'ogg'),
'3D': ('stl', 'obj'),
}
def gen_fileext_type_map():
"""
Generate previewed file extension and file type relation map.
"""
d = {}
for filetype in PREVIEW_FILEEXT.keys():
for fileext in PREVIEW_FILEEXT.get(filetype):
d[fileext] = filetype
return d
FILEEXT_TYPE_MAP = gen_fileext_type_map()
def render_permission_error(request, msg=None, extra_ctx=None):
"""
Return permisson error page.
"""
ctx = {}
ctx['error_msg'] = msg or _('permission error')
if extra_ctx:
for k in extra_ctx:
ctx[k] = extra_ctx[k]
return render_to_response('permission_error.html', ctx,
context_instance=RequestContext(request))
def render_error(request, msg=None, extra_ctx=None):
"""
Return normal error page.
"""
ctx = {}
ctx['error_msg'] = msg or _('Internal error')
if extra_ctx:
for k in extra_ctx:
ctx[k] = extra_ctx[k]
return render_to_response('error.html', ctx,
context_instance=RequestContext(request))
def list_to_string(l):
"""
Return string of a list.
"""
return ','.join(l)
def get_fileserver_root():
""" Construct seafile fileserver address and port.
Returns:
Constructed fileserver root.
"""
return config.FILE_SERVER_ROOT
def get_inner_fileserver_root():
"""Construct inner seafile fileserver address and port.
Inner fileserver root allows Seahub access fileserver through local
address, thus avoiding the overhead of DNS queries, as well as other
related issues, for example, the server can not ping itself, etc.
Returns:
http://127.0.0.1:<port>
"""
return seahub.settings.INNER_FILE_SERVER_ROOT
def gen_token(max_length=5):
"""
Generate a random token.
"""
return uuid.uuid4().hex[:max_length]
def normalize_cache_key(value, prefix=None, token=None, max_length=200):
"""Returns a cache key consisten of ``value`` and ``prefix`` and ``token``. Cache key
must not include control characters or whitespace.
"""
key = value if prefix is None else prefix + value
key = key if token is None else key + '_' + token
return urlquote(key)[:max_length]
def get_repo_last_modify(repo):
""" Get last modification time for a repo.
If head commit id of a repo is provided, we use that commit as last commit,
otherwise falls back to getting last commit of a repo which is time
consuming.
"""
if repo.head_cmmt_id is not None:
last_cmmt = seaserv.get_commit(repo.id, repo.version, repo.head_cmmt_id)
else:
logger = logging.getLogger(__name__)
logger.info('[repo %s] head_cmmt_id is missing.' % repo.id)
last_cmmt = seafile_api.get_commit_list(repo.id, 0, 1)[0]
return last_cmmt.ctime if last_cmmt else 0
def calculate_repos_last_modify(repo_list):
""" Get last modification time for repos.
"""
for repo in repo_list:
repo.latest_modify = get_repo_last_modify(repo)
def normalize_dir_path(path):
"""Add '/' at the end of directory path if necessary.
"""
if path[-1] != '/':
path = path + '/'
return path
def normalize_file_path(path):
"""Remove '/' at the end of file path if necessary.
"""
return path.rstrip('/')
# modified from django1.5:/core/validators, and remove the support for single
# quote in email address
email_re = re.compile(
r"(^[-!#$%&*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
# quoted-string, see also http://tools.ietf.org/html/rfc2822#section-3.2.5
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"'
r')@((?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)$)' # domain
r'|\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$', re.IGNORECASE) # literal form, ipv4 address (SMTP 4.1.3)
def is_valid_email(email):
"""A heavy email format validation.
"""
return True if email_re.match(email) is not None else False
def is_valid_username(username):
"""Check whether username is valid, currently only email can be a username.
"""
return is_valid_email(username)
def is_ldap_user(user):
"""Check whether user is a LDAP user.
"""
return user.source == 'LDAP' or user.source == 'LDAPImport'
def check_filename_with_rename(repo_id, parent_dir, filename):
cmmts = seafile_api.get_commit_list(repo_id, 0, 1)
latest_commit = cmmts[0] if cmmts else None
if not latest_commit:
return ''
# TODO: what if parrent_dir does not exist?
dirents = seafile_api.list_dir_by_commit_and_path(repo_id, latest_commit.id,
parent_dir.encode('utf-8'))
def no_duplicate(name):
for dirent in dirents:
if dirent.obj_name == name:
return False
return True
def make_new_name(filename, i):
base, ext = os.path.splitext(filename)
if ext:
new_base = "%s (%d)" % (base, i)
return new_base + ext
else:
return "%s (%d)" % (filename, i)
if no_duplicate(filename):
return filename
else:
i = 1
while True:
new_name = make_new_name (filename, i)
if no_duplicate(new_name):
return new_name
else:
i += 1
def get_user_repos(username, org_id=None):
"""
Get all repos that user can access, including owns, shared, public, and
repo in groups.
If ``org_id`` is not None, get org repos that user can access.
"""
if org_id is None:
owned_repos = seaserv.list_personal_repos_by_owner(username)
shared_repos = seafile_api.get_share_in_repo_list(username, -1, -1)
groups_repos = []
for group in seaserv.get_personal_groups_by_user(username):
# TODO: use seafile_api.get_group_repos
groups_repos += seaserv.get_group_repos(group.id, username)
if CLOUD_MODE:
public_repos = []
else:
public_repos = seaserv.list_inner_pub_repos(username)
for r in shared_repos + public_repos:
# collumn names in shared_repo struct are not same as owned or group
# repos.
r.id = r.repo_id
r.name = r.repo_name
r.desc = r.repo_desc
r.last_modify = r.last_modified
else:
owned_repos = seafile_api.get_org_owned_repo_list(org_id, username)
shared_repos = seafile_api.get_org_share_in_repo_list(org_id, username,
-1, -1)
groups_repos = []
for group in seaserv.get_org_groups_by_user(org_id, username):
groups_repos += seafile_api.get_org_group_repos(org_id, group.id)
public_repos = seaserv.seafserv_threaded_rpc.list_org_inner_pub_repos(org_id)
for r in shared_repos + groups_repos + public_repos:
# collumn names in shared_repo struct are not same as owned
# repos.
r.id = r.repo_id
r.name = r.repo_name
r.desc = r.repo_desc
r.last_modify = r.last_modified
return (owned_repos, shared_repos, groups_repos, public_repos)
def get_file_type_and_ext(filename):
"""
Return file type and extension if the file can be previewd online,
otherwise, return unknown type.
"""
fileExt = os.path.splitext(filename)[1][1:].lower()
filetype = FILEEXT_TYPE_MAP.get(fileExt)
if filetype:
return (filetype, fileExt)
else:
return ('Unknown', fileExt)
def get_file_revision_id_size(repo_id, commit_id, path):
"""Given a commit and a file path in that commit, return the seafile id
and size of the file blob
"""
repo = seafile_api.get_repo(repo_id)
dirname = os.path.dirname(path)
filename = os.path.basename(path)
seafdir = seafile_api.list_dir_by_commit_and_path(repo_id, commit_id, dirname)
for dirent in seafdir:
if dirent.obj_name == filename:
file_size = seafile_api.get_file_size(repo.store_id, repo.version,
dirent.obj_id)
return dirent.obj_id, file_size
return None, None
def new_merge_with_no_conflict(commit):
"""Check whether a commit is a new merge, and no conflict.
Arguments:
- `commit`:
"""
if commit.second_parent_id is not None and commit.new_merge is True and \
commit.conflict is False:
return True
else:
return False
def get_commit_before_new_merge(commit):
"""Traverse parents of ``commit``, and get a commit which is not a new merge.
Pre-condition: ``commit`` must be a new merge and not conflict.
Arguments:
- `commit`:
"""
assert new_merge_with_no_conflict(commit) is True
while(new_merge_with_no_conflict(commit)):
p1 = seaserv.get_commit(commit.repo_id, commit.version, commit.parent_id)
p2 = seaserv.get_commit(commit.repo_id, commit.version, commit.second_parent_id)
commit = p1 if p1.ctime > p2.ctime else p2
assert new_merge_with_no_conflict(commit) is False
return commit
def gen_inner_file_get_url(token, filename):
"""Generate inner fileserver file url.
If ``ENABLE_INNER_FILESERVER`` set to False(defaults to True), will
returns outer fileserver file url.
Arguments:
- `token`:
- `filename`:
Returns:
e.g., http://127.0.0.1:<port>/files/<token>/<filename>
"""
if ENABLE_INNER_FILESERVER:
return '%s/files/%s/%s' % (get_inner_fileserver_root(), token,
urlquote(filename))
else:
return gen_file_get_url(token, filename)
def get_max_upload_file_size():
"""Get max upload file size from config file, defaults to no limit.
Returns ``None`` if this value is not set.
"""
return seaserv.MAX_UPLOAD_FILE_SIZE
def gen_block_get_url(token, blkid):
"""
Generate fileserver block url.
Format: http://<domain:port>/blks/<token>/<blkid>
"""
if blkid:
return '%s/blks/%s/%s' % (get_fileserver_root(), token, blkid)
else:
return '%s/blks/%s/' % (get_fileserver_root(), token)
def gen_file_get_url(token, filename):
"""
Generate fileserver file url.
Format: http://<domain:port>/files/<token>/<filename>
"""
return '%s/files/%s/%s' % (get_fileserver_root(), token, urlquote(filename))
def gen_file_upload_url(token, op):
return '%s/%s/%s' % (get_fileserver_root(), op, token)
def get_ccnet_server_addr_port():
"""get ccnet server host and port"""
return seaserv.CCNET_SERVER_ADDR, seaserv.CCNET_SERVER_PORT
def string2list(string):
"""
Split string contacted with different separators to a list, and remove
duplicated strings.
"""
tmp_str = string.replace(';', ',').replace('\n', ',').replace('\r', ',')
# Remove empty and duplicate strings
s = set()
for e in tmp_str.split(','):
e = e.strip(' ')
if not e:
continue
s.add(e)
return [ x for x in s ]
# def get_cur_ctx(request):
# ctx_dict = request.session.get('current_context', {
# 'base_template': 'myhome_base.html',
# 'org_dict': None})
# return ctx_dict
# def set_cur_ctx(request, ctx_dict):
# request.session['current_context'] = ctx_dict
# request.user.org = ctx_dict.get('org_dict', None)
def is_org_context(request):
"""An organization context is a virtual private Seafile instance on cloud
service.
Arguments:
- `request`:
"""
return request.cloud_mode and request.user.org is not None
# def check_and_get_org_by_repo(repo_id, user):
# """
# Check whether repo is org repo, get org info if it is, and set
# base template.
# """
# org_id = get_org_id_by_repo_id(repo_id)
# if org_id > 0:
# # this repo is org repo, get org info
# org = get_org_by_id(org_id)
# org._dict['is_staff'] = is_org_staff(org_id, user)
# org._dict['email'] = user
# base_template = 'org_base.html'
# else:
# org = None
# base_template = 'myhome_base.html'
# return org, base_template
def check_and_get_org_by_group(group_id, user):
"""
Check whether repo is org repo, get org info if it is, and set
base template.
"""
org_id = seaserv.get_org_id_by_group(group_id)
if org_id > 0:
# this repo is org repo, get org info
org = seaserv.get_org_by_id(org_id)
org._dict['is_staff'] = seaserv.is_org_staff(org_id, user)
org._dict['email'] = user
base_template = 'org_base.html'
else:
org = None
base_template = 'myhome_base.html'
return org, base_template
# events related
if EVENTS_CONFIG_FILE:
parsed_events_conf = ConfigParser.ConfigParser()
parsed_events_conf.read(EVENTS_CONFIG_FILE)
import seafevents
EVENTS_ENABLED = True
SeafEventsSession = seafevents.init_db_session_class(EVENTS_CONFIG_FILE)
@contextlib.contextmanager
def _get_seafevents_session():
try:
session = SeafEventsSession()
yield session
finally:
session.close()
def _same_events(e1, e2):
"""Two events are equal should follow two rules:
1. event1.repo_id = event2.repo_id
2. event1.commit.creator = event2.commit.creator
3. event1.commit.desc = event2.commit.desc
"""
if hasattr(e1, 'commit') and hasattr(e2, 'commit'):
if e1.repo_id == e2.repo_id and \
e1.commit.desc == e2.commit.desc and \
e1.commit.creator_name == e2.commit.creator_name:
return True
return False
def _get_events(username, start, count, org_id=None):
ev_session = SeafEventsSession()
valid_events = []
total_used = 0
try:
next_start = start
while True:
events = _get_events_inner(ev_session, username, next_start,
count, org_id)
if not events:
break
for e1 in events:
duplicate = False
for e2 in valid_events:
if _same_events(e1, e2): duplicate = True; break
new_merge = False
if hasattr(e1, 'commit') and e1.commit and \
new_merge_with_no_conflict(e1.commit):
new_merge = True
if not duplicate and not new_merge:
valid_events.append(e1)
total_used = total_used + 1
if len(valid_events) == count:
break
if len(valid_events) == count:
break
next_start = next_start + len(events)
finally:
ev_session.close()
for e in valid_events: # parse commit description
if hasattr(e, 'commit'):
e.commit.converted_cmmt_desc = convert_cmmt_desc_link(e.commit)
e.commit.more_files = more_files_in_commit(e.commit)
return valid_events, start + total_used
def _get_events_inner(ev_session, username, start, limit, org_id=None):
'''Read events from seafevents database, and remove events that are
no longer valid
Return 'limit' events or less than 'limit' events if no more events remain
'''
valid_events = []
next_start = start
while True:
if org_id > 0:
events = seafevents.get_org_user_events(ev_session, org_id,
username, next_start,
limit)
else:
events = seafevents.get_user_events(ev_session, username,
next_start, limit)
if not events:
break
for ev in events:
if ev.etype == 'repo-update':
repo = seafile_api.get_repo(ev.repo_id)
if not repo:
# delete the update event for repo which has been deleted
seafevents.delete_event(ev_session, ev.uuid)
continue
if repo.encrypted:
repo.password_set = seafile_api.is_password_set(
repo.id, username)
ev.repo = repo
ev.commit = seaserv.get_commit(repo.id, repo.version, ev.commit_id)
valid_events.append(ev)
if len(valid_events) == limit:
break
if len(valid_events) == limit:
break
next_start = next_start + len(valid_events)
return valid_events
def get_user_events(username, start, count):
"""Return user events list and a new start.
For example:
``get_user_events('[email protected]', 0, 10)`` returns the first 10
events.
``get_user_events('[email protected]', 5, 10)`` returns the 6th through
15th events.
"""
return _get_events(username, start, count)
def get_org_user_events(org_id, username, start, count):
return _get_events(username, start, count, org_id=org_id)
def get_log_events_by_time(log_type, tstart, tend):
"""Return log events list by start/end timestamp. (If no logs, return 'None')
"""
with _get_seafevents_session() as session:
events = seafevents.get_event_log_by_time(session, log_type, tstart, tend)
return events if events else None
def generate_file_audit_event_type(e):
return {
'file-download-web': ('web', ''),
'file-download-share-link': ('share-link',''),
'file-download-api': ('API', e.device),
'repo-download-sync': ('download-sync', e.device),
'repo-upload-sync': ('upload-sync', e.device),
}[e.etype]
def get_file_audit_events_by_path(email, org_id, repo_id, file_path, start, limit):
"""Return file audit events list by file path. (If no file audit, return 'None')
For example:
``get_file_audit_events_by_path(email, org_id, repo_id, file_path, 0, 10)`` returns the first 10
events.
``get_file_audit_events_by_path(email, org_id, repo_id, file_path, 5, 10)`` returns the 6th through
15th events.
"""
with _get_seafevents_session() as session:
events = seafevents.get_file_audit_events_by_path(session,
email, org_id, repo_id, file_path, start, limit)
return events if events else None
def get_file_audit_events(email, org_id, repo_id, start, limit):
"""Return file audit events list. (If no file audit, return 'None')
For example:
``get_file_audit_events(email, org_id, repo_id, 0, 10)`` returns the first 10
events.
``get_file_audit_events(email, org_id, repo_id, 5, 10)`` returns the 6th through
15th events.
"""
with _get_seafevents_session() as session:
events = seafevents.get_file_audit_events(session, email, org_id, repo_id, start, limit)
return events if events else None
def get_file_update_events(email, org_id, repo_id, start, limit):
"""Return file update events list. (If no file update, return 'None')
For example:
``get_file_update_events(email, org_id, repo_id, 0, 10)`` returns the first 10
events.
``get_file_update_events(email, org_id, repo_id, 5, 10)`` returns the 6th through
15th events.
"""
with _get_seafevents_session() as session:
events = seafevents.get_file_update_events(session, email, org_id, repo_id, start, limit)
return events if events else None
def get_perm_audit_events(email, org_id, repo_id, start, limit):
"""Return repo perm events list. (If no repo perm, return 'None')
For example:
``get_repo_perm_events(email, org_id, repo_id, 0, 10)`` returns the first 10
events.
``get_repo_perm_events(email, org_id, repo_id, 5, 10)`` returns the 6th through
15th events.
"""
with _get_seafevents_session() as session:
events = seafevents.get_perm_audit_events(session, email, org_id, repo_id, start, limit)
return events if events else None
def get_virus_record(repo_id=None, start=-1, limit=-1):
with _get_seafevents_session() as session:
r = seafevents.get_virus_record(session, repo_id, start, limit)
return r if r else []
def handle_virus_record(vid):
with _get_seafevents_session() as session:
return True if seafevents.handle_virus_record(session, vid) == 0 else False
def get_virus_record_by_id(vid):
with _get_seafevents_session() as session:
return seafevents.get_virus_record_by_id(session, vid)
else:
EVENTS_ENABLED = False
def get_user_events():
pass
def get_log_events_by_time():
pass
def get_org_user_events():
pass
def generate_file_audit_event_type():
pass
def get_file_audit_events_by_path():
pass
def get_file_audit_events():
pass
def get_file_update_events():
pass
def get_perm_audit_events():
pass
def get_virus_record():
pass
def handle_virus_record():
pass
def get_virus_record_by_id(vid):
pass
def calc_file_path_hash(path, bits=12):
if isinstance(path, unicode):
path = path.encode('UTF-8')
path_hash = hashlib.md5(urllib2.quote(path)).hexdigest()[:bits]
return path_hash
def get_service_url():
"""Get service url from seaserv.
"""
return config.SERVICE_URL
def get_server_id():
"""Get server id from seaserv.
"""
return getattr(seaserv, 'SERVER_ID', '-')
def get_site_scheme_and_netloc():
"""Return a string contains site scheme and network location part from
service url.
For example:
>>> get_site_scheme_and_netloc("https://example.com:8000/seafile/")
https://example.com:8000
"""
parse_result = urlparse(get_service_url())
return "%s://%s" % (parse_result.scheme, parse_result.netloc)
def send_html_email(subject, con_template, con_context, from_email, to_email,
reply_to=None):
"""Send HTML email
"""
base_context = {
'url_base': get_site_scheme_and_netloc(),
'site_name': SITE_NAME,
'media_url': MEDIA_URL,
'logo_path': LOGO_PATH,
}
t = loader.get_template(con_template)
con_context.update(base_context)
headers = {}
if IS_EMAIL_CONFIGURED:
if reply_to is not None:
headers['Reply-to'] = reply_to
msg = EmailMessage(subject, t.render(Context(con_context)), from_email,
to_email, headers=headers)
msg.content_subtype = "html"
msg.send()
def gen_dir_share_link(token):
"""Generate directory share link.
"""
return gen_shared_link(token, 'd')
def gen_file_share_link(token):
"""Generate file share link.
"""
return gen_shared_link(token, 'f')
def gen_shared_link(token, s_type):
service_url = get_service_url()
assert service_url is not None
service_url = service_url.rstrip('/')
if s_type == 'f':
return '%s/f/%s/' % (service_url, token)
else:
return '%s/d/%s/' % (service_url, token)
def gen_shared_upload_link(token):
service_url = get_service_url()
assert service_url is not None
service_url = service_url.rstrip('/')
return '%s/u/d/%s/' % (service_url, token)
def show_delete_days(request):
if request.method == 'GET':
days_str = request.GET.get('days', '')
elif request.method == 'POST':
days_str = request.POST.get('days', '')
else:
days_str = ''
try:
days = int(days_str)
except ValueError:
days = 7
return days
def is_textual_file(file_type):
"""
Check whether a file type is a textual file.
"""
if file_type == TEXT or file_type == MARKDOWN:
return True
else:
return False
def redirect_to_login(request):
from django.conf import settings
login_url = settings.LOGIN_URL
path = urlquote(request.get_full_path())
tup = login_url, redirect_field_name, path
return HttpResponseRedirect('%s?%s=%s' % tup)
def mkstemp():
'''Returns (fd, filepath), the same as tempfile.mkstemp, except the
filepath is encoded in UTF-8
'''
fd, path = tempfile.mkstemp()
system_encoding = locale.getdefaultlocale()[1]
if system_encoding is not None:
path_utf8 = path.decode(system_encoding).encode('UTF-8')
return fd, path_utf8
else:
return fd, path
# File or directory operations
FILE_OP = ('Added or modified', 'Added', 'Modified', 'Renamed', 'Moved',
'Added directory', 'Renamed directory', 'Moved directory')
OPS = '|'.join(FILE_OP)
CMMT_DESC_PATT = re.compile(r'(%s) "(.*)"\s?(and \d+ more (?:files|directories))?' % OPS)
API_OPS = '|'.join((OPS, 'Deleted', 'Removed'))
API_CMMT_DESC_PATT = r'(%s) "(.*)"\s?(and \d+ more (?:files|directories))?' % API_OPS
def convert_cmmt_desc_link(commit):
"""Wrap file/folder with ``<a></a>`` in commit description.
"""
repo_id = commit.repo_id
cmmt_id = commit.id
conv_link_url = reverse('convert_cmmt_desc_link')
def link_repl(matchobj):
op = matchobj.group(1)
file_or_dir = matchobj.group(2)
remaining = matchobj.group(3)
tmp_str = '%s "<a href="%s?repo_id=%s&cmmt_id=%s&nm=%s" class="normal">%s</a>"'
if remaining:
return (tmp_str + ' %s') % (op, conv_link_url, repo_id, cmmt_id, urlquote(file_or_dir),
escape(file_or_dir), remaining)
else:
return tmp_str % (op, conv_link_url, repo_id, cmmt_id, urlquote(file_or_dir), escape(file_or_dir))
return re.sub(CMMT_DESC_PATT, link_repl, commit.desc)
def api_tsstr_sec(value):
"""Turn a timestamp to string"""
try:
return datetime.fromtimestamp(value).strftime("%Y-%m-%d %H:%M:%S")
except:
return datetime.fromtimestamp(value/1000000).strftime("%Y-%m-%d %H:%M:%S")
def api_convert_desc_link(e):
"""Wrap file/folder with ``<a></a>`` in commit description.
"""
commit = e.commit
repo_id = commit.repo_id
cmmt_id = commit.id
def link_repl(matchobj):
op = matchobj.group(1)
file_or_dir = matchobj.group(2)
remaining = matchobj.group(3)
tmp_str = '%s "<span class="file-name">%s</span>"'
if remaining:
url = reverse('api_repo_history_changes', args=[repo_id])
e.link = "%s?commit_id=%s" % (url, cmmt_id)
e.dtime = api_tsstr_sec(commit.props.ctime)
return (tmp_str + ' %s') % (op, file_or_dir, remaining)
else:
diff_result = seafile_api.diff_commits(repo_id, '', cmmt_id)
if diff_result:
for d in diff_result:
if file_or_dir not in d.name:
# skip to next diff_result if file/folder user clicked does not
# match the diff_result
continue
if d.status == 'add' or d.status == 'mod':
e.link = "api://repo/%s/files/?p=/%s" % (repo_id, d.name)
elif d.status == 'mov':
e.link = "api://repo/%s/files/?p=/%s" % (repo_id, d.new_name)
elif d.status == 'newdir':
e.link = "api://repo/%s/dir/?p=/%s" % (repo_id, d.name)
else:
continue
return tmp_str % (op, file_or_dir)
e.desc = re.sub(API_CMMT_DESC_PATT, link_repl, commit.desc)
MORE_PATT = r'and \d+ more (?:files|directories)'
def more_files_in_commit(commit):
"""Check whether added/deleted/modified more files in commit description.
"""
return True if re.search(MORE_PATT, commit.desc) else False
# file audit related
FILE_AUDIT_ENABLED = False
if EVENTS_CONFIG_FILE:
def check_file_audit_enabled():
enabled = seafevents.is_audit_enabled(parsed_events_conf)
if enabled:
logging.debug('file audit: enabled')
else:
logging.debug('file audit: not enabled')
return enabled
FILE_AUDIT_ENABLED = check_file_audit_enabled()
# office convert related
HAS_OFFICE_CONVERTER = False
if EVENTS_CONFIG_FILE:
def check_office_converter_enabled():
enabled = seafevents.is_office_converter_enabled(parsed_events_conf)
if enabled:
logging.debug('office converter: enabled')
else:
logging.debug('office converter: not enabled')
return enabled
def get_office_converter_html_dir():
return seafevents.get_office_converter_html_dir(parsed_events_conf)
def get_office_converter_limit():
return seafevents.get_office_converter_limit(parsed_events_conf)
HAS_OFFICE_CONVERTER = check_office_converter_enabled()
if HAS_OFFICE_CONVERTER:
OFFICE_HTML_DIR = get_office_converter_html_dir()
OFFICE_PREVIEW_MAX_SIZE, OFFICE_PREVIEW_MAX_PAGES = get_office_converter_limit()
all_doc_types = PREVIEW_FILEEXT[DOCUMENT] + PREVIEW_FILEEXT[OPENDOCUMENT]
PREVIEW_FILEEXT[DOCUMENT] = all_doc_types
PREVIEW_FILEEXT.pop(OPENDOCUMENT)
FILEEXT_TYPE_MAP = gen_fileext_type_map()
from seafevents.office_converter import OfficeConverterRpcClient
office_converter_rpc = None
def _get_office_converter_rpc():
global office_converter_rpc
if office_converter_rpc is None:
pool = ccnet.ClientPool(
seaserv.CCNET_CONF_PATH,
central_config_dir=seaserv.SEAFILE_CENTRAL_CONF_DIR
)
office_converter_rpc = OfficeConverterRpcClient(pool)
return office_converter_rpc
def office_convert_cluster_token(file_id):
from django.core import signing
s = '-'.join([file_id, datetime.now().strftime('%Y%m%d')])
return signing.Signer().sign(s)
def _office_convert_token_header(file_id):
return {
'X-Seafile-Office-Preview-Token': office_convert_cluster_token(file_id),
}
def cluster_delegate(delegate_func):
'''usage:
@cluster_delegate(funcA)
def func(*args):
...non-cluster logic goes here...
- In non-cluster mode, this decorator effectively does nothing.
- In cluster mode, if this node is not the office convert node,
funcA is called instead of the decorated function itself
'''
def decorated(func):
def real_func(*args, **kwargs):
cluster_internal = kwargs.pop('cluster_internal', False)
if CLUSTER_MODE and not OFFICE_CONVERTOR_NODE and not cluster_internal:
return delegate_func(*args)
else:
return func(*args)
return real_func
return decorated
def delegate_add_office_convert_task(file_id, doctype, raw_path):
url = urljoin(OFFICE_CONVERTOR_ROOT, '/office-convert/internal/add-task/')
data = urllib.urlencode({
'file_id': file_id,
'doctype': doctype,
'raw_path': raw_path,
})
headers = _office_convert_token_header(file_id)
ret = do_urlopen(url, data=data, headers=headers).read()
return json.loads(ret)
def delegate_query_office_convert_status(file_id, page):
url = urljoin(OFFICE_CONVERTOR_ROOT, '/office-convert/internal/status/')
url += '?file_id=%s&page=%s' % (file_id, page)
headers = _office_convert_token_header(file_id)
ret = do_urlopen(url, headers=headers).read()
return json.loads(ret)
def delegate_get_office_converted_page(request, repo_id, commit_id, path, static_filename, file_id):
url = urljoin(OFFICE_CONVERTOR_ROOT,
'/office-convert/internal/static/%s/%s%s/%s' % (
repo_id, commit_id, urlquote(path), urlquote(static_filename)))
url += '?file_id=' + file_id
headers = _office_convert_token_header(file_id)
timestamp = request.META.get('HTTP_IF_MODIFIED_SINCE')
if timestamp:
headers['If-Modified-Since'] = timestamp
try:
ret = do_urlopen(url, headers=headers)
data = ret.read()
except urllib2.HTTPError, e:
if timestamp and e.code == 304:
return HttpResponseNotModified()
else:
raise
content_type = ret.headers.get('content-type', None)
if content_type is None:
dummy, ext = os.path.splitext(os.path.basename(path))
content_type = mimetypes.types_map.get(ext, 'application/octet-stream')
resp = HttpResponse(data, content_type=content_type)
if 'last-modified' in ret.headers:
resp['Last-Modified'] = ret.headers.get('last-modified')
return resp
@cluster_delegate(delegate_add_office_convert_task)
def add_office_convert_task(file_id, doctype, raw_path):
rpc = _get_office_converter_rpc()
d = rpc.add_task(file_id, doctype, raw_path)
return {
'exists': False,
}
@cluster_delegate(delegate_query_office_convert_status)
def query_office_convert_status(file_id, page):
rpc = _get_office_converter_rpc()
d = rpc.query_convert_status(file_id, page)
ret = {}
if d.error:
ret['error'] = d.error
ret['status'] = 'ERROR'
else:
ret['success'] = True
ret['status'] = d.status
ret['info'] = d.info
return ret
@cluster_delegate(delegate_get_office_converted_page)
def get_office_converted_page(request, repo_id, commit_id, path, static_filename, file_id):
return django_static_serve(request,
os.path.join(file_id, static_filename),
document_root=OFFICE_HTML_DIR)
def prepare_converted_html(raw_path, obj_id, doctype, ret_dict):
try:
add_office_convert_task(obj_id, doctype, raw_path)
except:
logging.exception('failed to add_office_convert_task:')
return _(u'Internal error')
return None
# search realted
HAS_FILE_SEARCH = False
if EVENTS_CONFIG_FILE:
def check_search_enabled():
enabled = False
if hasattr(seafevents, 'is_search_enabled'):
enabled = seafevents.is_search_enabled(parsed_events_conf)
if enabled:
logging.debug('search: enabled')
else:
logging.debug('search: not enabled')
return enabled
HAS_FILE_SEARCH = check_search_enabled()
TRAFFIC_STATS_ENABLED = False
if EVENTS_CONFIG_FILE and hasattr(seafevents, 'get_user_traffic_stat'):
TRAFFIC_STATS_ENABLED = True
def get_user_traffic_stat(username):
session = SeafEventsSession()
try:
stat = seafevents.get_user_traffic_stat(session, username)
finally:
session.close()
return stat
def get_user_traffic_list(month, start=0, limit=25):
session = SeafEventsSession()
try:
stat = seafevents.get_user_traffic_list(session, month, start, limit)
finally:
session.close()
return stat
else:
def get_user_traffic_stat(username):
pass
def get_user_traffic_list():
pass
def user_traffic_over_limit(username):
"""Return ``True`` if user traffic over the limit, otherwise ``False``.
"""
if not CHECK_SHARE_LINK_TRAFFIC:
return False
from seahub_extra.plan.models import UserPlan
from seahub_extra.plan.settings import PLAN
up = UserPlan.objects.get_valid_plan_by_user(username)
plan = 'Free' if up is None else up.plan_type
traffic_limit = int(PLAN[plan]['share_link_traffic']) * 1024 * 1024 * 1024
try:
stat = get_user_traffic_stat(username)
except Exception as e:
logger = logging.getLogger(__name__)
logger.error('Failed to get user traffic stat: %s' % username,
exc_info=True)
return True
if stat is None: # No traffic record yet
return False
month_traffic = stat['file_view'] + stat['file_download'] + stat['dir_download']
return True if month_traffic >= traffic_limit else False
def is_user_password_strong(password):
"""Return ``True`` if user's password is STRONG, otherwise ``False``.
STRONG means password has at least USER_PASSWORD_STRENGTH_LEVEL(3) types of the bellow:
num, upper letter, lower letter, other symbols
"""
if len(password) < config.USER_PASSWORD_MIN_LENGTH:
return False
else:
num = 0
for letter in password:
# get ascii dec
# bitwise OR
num |= get_char_mode(ord(letter))
if calculate_bitwise(num) < config.USER_PASSWORD_STRENGTH_LEVEL:
return False
else:
return True
def get_char_mode(n):
"""Return different num according to the type of given letter:
'1': num,
'2': upper_letter,
'4': lower_letter,
'8': other symbols
"""
if (n >= 48 and n <= 57): #nums
return 1;
if (n >= 65 and n <= 90): #uppers
return 2;
if (n >= 97 and n <= 122): #lowers
return 4;
else:
return 8;
def calculate_bitwise(num):
"""Return different level according to the given num:
"""
level = 0
for i in range(4):
# bitwise AND
if (num&1):
level += 1
# Right logical shift
num = num >> 1
return level
def do_md5(s):
if isinstance(s, unicode):
s = s.encode('UTF-8')
return hashlib.md5(s).hexdigest()
def do_urlopen(url, data=None, headers=None):
headers = headers or {}
req = urllib2.Request(url, data=data, headers=headers)
ret = urllib2.urlopen(req)
return ret
def is_pro_version():
if seahub.settings.DEBUG:
if hasattr(seahub.settings, 'IS_PRO_VERSION') \
and seahub.settings.IS_PRO_VERSION:
return True
if EVENTS_CONFIG_FILE:
return True
else:
return False
def clear_token(username):
'''
clear web api and repo sync token
when delete/inactive an user
'''
Token.objects.filter(user = username).delete()
TokenV2.objects.filter(user = username).delete()
seafile_api.delete_repo_tokens_by_email(username)
def send_perm_audit_msg(etype, from_user, to, repo_id, path, perm):
"""Send repo permission audit msg.
Arguments:
- `etype`: add/modify/delete-repo-perm
- `from_user`: email
- `to`: email or group_id or all(public)
- `repo_id`: origin repo id
- `path`: dir path
- `perm`: r or rw
"""
msg = 'perm-update\t%s\t%s\t%s\t%s\t%s\t%s' % \
(etype, from_user, to, repo_id, path, perm)
msg_utf8 = msg.encode('utf-8')
try:
seaserv.send_message('seahub.stats', msg_utf8)
except Exception as e:
logger.error("Error when sending perm-audit-%s message: %s" %
(etype, str(e)))
def get_origin_repo_info(repo_id):
repo = seafile_api.get_repo(repo_id)
if repo.origin_repo_id is not None:
origin_repo_id = repo.origin_repo_id
origin_path = repo.origin_path
return (origin_repo_id, origin_path)
return (None, None)
def within_time_range(d1, d2, maxdiff_seconds):
'''Return true if two datetime.datetime object differs less than the given seconds'''
delta = d2 - d1 if d2 > d1 else d1 - d2
# delta.total_seconds() is only available in python 2.7+
diff = (delta.microseconds + (delta.seconds + delta.days*24*3600) * 1e6) / 1e6
return diff < maxdiff_seconds
def is_org_repo_creation_allowed(request):
"""Whether or not allow a user create organization library.
"""
if request.user.is_staff:
return True
else:
return config.ENABLE_USER_CREATE_ORG_REPO
def get_system_admins():
db_users = seaserv.get_emailusers('DB', -1, -1)
ldpa_imported_users = seaserv.get_emailusers('LDAPImport', -1, -1)
admins = []
for user in db_users + ldpa_imported_users:
if user.is_staff:
admins.append(user)
return admins
|
|
from collections import deque, OrderedDict
from django.conf import settings
from elasticsearch_dsl import Document, Date, Integer, Keyword, Text, Search, Index, Boolean, Completion, \
SearchAsYouType, normalizer, analyzer
from elasticsearch_dsl.connections import connections
from tqdm import tqdm
from elasticsearch.helpers import bulk, parallel_bulk, BulkIndexError
from .utils import normalise_lemma, normalise_form, get_ascii_from_unicode
'''
http://localhost:9200/_cat/indices
# TODO: print -> log or sys.out
'''
# Define a default Elasticsearch client
from . import utils
c = connections.configure(**settings.ELASTICSEARCH_DSL)
# https://github.com/elastic/elasticsearch-dsl-py/issues/669
# https://sunscrapers.com/blog/elasticsearch-with-python-7-tips-and-best-practices/
normalizer_insensitive = normalizer(
'insensitive_normalizer',
filter=['lowercase', 'asciifolding']
)
analyzer_insensitive = analyzer(
'insensitive_analyzer',
tokenizer='standard',
filter=['lowercase', 'asciifolding']
)
class KeywordInsensitive(Keyword):
'''A ES Keyword field with a .insensitive subfield
which is case-insensitive'''
def __init__(self, *args, **kwargs):
kwargs['fields'] = {
'insensitive': Keyword(normalizer=normalizer_insensitive)
}
super().__init__(*args, **kwargs)
class AnnotatedToken(Document):
'''An ElasticSearch document for an annotated token in the text.
The tokens and annotations come from the kwic file.
Constraints:
Text():
searchable (any token), case-insensitive
but can't be sorted
(unless we use fielddata=True which is not recommended)
accent-sensitive by default
Keyword():
exact search only ('julius cesar' won't match 'cesar')
accent-sensitive & case-sensitive search
'''
# string
token = Keyword()
form = KeywordInsensitive()
lemma = Keyword()
# searchable
searchable = Text(analyzer=analyzer_insensitive)
pos = Keyword()
lemmapos = Keyword()
speech_cat = Keyword()
verse_cat = Keyword()
manuscript_number = Integer()
section_number = Keyword()
is_rubric = Boolean()
preceding = Text()
following = Text()
para_number = Integer()
seg_number = Integer()
# n
token_number = Integer()
previous_word = KeywordInsensitive()
next_word = KeywordInsensitive()
# the seq order of appearance in the text
# for efficient sorting.
seq_order = Integer()
class Index:
name = 'tokens'
def set_derived_fields(self):
self.form = self.token
if self.pos != 'nom propre':
# capital in first letter may be due to:
# . proper name (form should preserve it)
# . capital at beginning of sentence (we lowercase it)
self.form = self.form.lower()
self.searchable = '{} {}'.format(
self.form,
self.lemma
)
@classmethod
def new_from_token_element(cls, token_element, parsing_context):
attrib = utils.get_data_from_kwik_item(None, token_element)
ret = cls()
# print(attrib)
for k, v in attrib.items():
field = None
if k == 'string':
field = 'token'
v = (v or '').strip()
elif k == 'n':
field = 'token_number'
elif k == 'type':
field = 'is_rubric'
v = v == 'rubric_item'
elif k == 'location':
parts = v.split('_')
ret.manuscript_number = int('edRoyal20D1' in v)
ret.seg_number = parts[2] if len(parts) > 2 else 0
field = 'para_number'
v = int(parts[1])
elif k == 'following':
field = k
ret.next_word = v.split(' ')[0]
elif k == 'preceding':
field = k
ret.previous_word = v.split(' ')[-1]
if field is None and hasattr(ret, k):
field = k
if field:
setattr(ret, field, v)
else:
# print('WARNING: no field mapped to kwic attribute: {}'.format(k))
# TODO: sp
pass
ret.meta.id = '{}.{:03d}'.format(attrib['location'], int(attrib['n']))
ret.set_derived_fields()
if not str(getattr(ret, 'lemma', '') or '').strip():
# we don't index unlemmatised tokens (asked by partners, 01/2021)
return []
return [ret]
class LemmaDocument(Document):
'''Indexing model for a lemma'''
# for as-is
lemma = Keyword(
fields={
# for search
'searchable': Text(analyzer=analyzer_insensitive),
# for sorting
'insensitive': Keyword(normalizer=normalizer_insensitive)
}
)
# TODO?
forms = Keyword()
pos = Keyword()
name_type = Keyword()
class Index:
name = 'lemmata'
@classmethod
def new_from_token_element(cls, token_element, parsing_context):
tokenised_names = parsing_context.get('tokenised_names', None)
if tokenised_names is None:
tokenised_names = utils.read_tokenised_name_types()
parsing_context['tokenised_names'] = tokenised_names
ret = []
lemma = normalise_lemma(token_element.attrib.get('lemma', ''))
if lemma:
location_full = '__'.join([
token_element.attrib.get('location', ''),
token_element.attrib.get('n', '')
])
doc = cls(
lemma=lemma,
# lemma_sort=lemma.split(',')[0].strip().lower(),
pos=token_element.attrib.get('pos', 'Other').strip(),
name_type=tokenised_names.get(
location_full,
'Other'
)
)
if 0 and lemma == 'Ester':
print(location_full, doc.name_type)
# ES won't produce duplicates thanks to that id
doc.meta.id = lemma
ret.append(doc)
# doc.set_derived_fields()
return ret
class AutocompleteDocument(Document):
'''Indexing model for a form or lemma
TODO: check if there's a more standard or efficient way
of doing such suggestions.
'''
# autocomplete = Completion()
# For searching only, not displayed
# Basically we want a field with multiple tokens that can be search
# by prefix (e.g. par*) and we can sort by the first token.
autocomplete = SearchAsYouType()
# I don't think SearchAsYouType can be sorted or accepts sub-fields
# so we define a sortable version separately just for sorting purpose
autocomplete_sortable = Keyword()
# for display
form = Keyword()
lemma = Keyword()
class Index:
name = 'autocomplete'
@classmethod
def new_from_token_element(cls, token_element, parsing_context):
ret = []
lemma = normalise_lemma(token_element.attrib.get('lemma', ''))
if lemma:
form = normalise_form(token_element)
for i in [0, 2]:
autocomplete = '{} {}'.format(form or lemma, lemma)
doc = cls(
lemma=lemma,
form=form,
autocomplete=get_ascii_from_unicode(autocomplete).lower(),
autocomplete_sortable=get_ascii_from_unicode(autocomplete).lower(),
)
doc.meta.id = autocomplete + ' ' + str(i)
ret.append(doc)
form = ''
return ret
class Indexer:
'''
Manages the search indexes.
The index_names argument used in the methods
is a list of index names the action should work on.
If empty or None the action will apply to all available indexes.
'''
# available indexes
indexes = OrderedDict([
['tokens', {
'document_class': AnnotatedToken,
}],
['lemmata', {
'document_class': LemmaDocument,
}],
['autocomplete', {
'document_class': AutocompleteDocument,
}]
])
def list(self, index_names=None):
'''Retrieves stats about indexes'''
# todo: return list and move actual display to textsearch
import time
titles = {
'name': 'Name',
'size': 'Docs',
'created': 'Created',
'disk': 'MBytes',
}
print('{name:15} | {size:7} | {disk:8} | {created:19}'.format(**titles))
print('-' * 62)
for index_name, index_data in self._get_selected_indexes(index_names):
info = {
'name': index_name,
'size': 0,
'disk': 0,
'created': 'absent',
}
index = Index(name=index_name)
if index.exists():
info['size'] = 'exists'
get_res = index.get()
info['created'] = int(get_res[index_name]['settings']['index']['creation_date'])/1000
info['created'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(info['created']))
stats = index.stats()
info['size'] = stats['_all']['total']['docs']['count']
info['disk'] = stats['_all']['total']['store']['size_in_bytes'] / 1024 / 1024
print('{name:15} | {size:>7} | {disk:>8.2f} | {created:>19}'.format(**info))
def clear(self, index_names=None):
'''Recreate the tokens index'''
for index_name, index_data in self._get_selected_indexes(index_names):
index = Index(name=index_name)
if index.exists():
index.delete()
# recreate index with imposed schema.
# without this the index schema would have text instead of keywords, etc.
index_data['document_class'].init()
print('cleared {}'.format(index_name))
def rebuild(self, index_names=None, cap=-1):
'''index all the tokens from the kwic'''
self.clear(index_names)
# https://github.com/elastic/elasticsearch-py/blob/master/examples/bulk-ingest/bulk-ingest.py
stats = {}
# set this to 1 to debug the indexing.
# it seems the parallel version will just silence errors!
debug_bulk = 1
for index_name, index_data in self._get_selected_indexes(index_names):
options = {
'client': connections.get_connection(),
'actions': self._bulk_actions(index_name, index_data, cap),
'chunk_size': settings.SEARCH_INDEX_CHUNK_SIZE
}
try:
if debug_bulk:
bulk(**options)
else:
deque(parallel_bulk(**options), maxlen=0)
except BulkIndexError as e:
print('Errors while indexing: {}'.format(e))
def _bulk_actions(self, index_name, index_data, cap=-1):
'''elasticsearch-dsl bulk_actions callback'''
# https://elasticsearch-py.readthedocs.io/en/master/helpers.html
# #bulk-helpers
count = 0
if cap > -1:
total = cap
else:
print('Read token counts')
total = utils.KwicParser.read_token_count()
print('Indexing {} {}...'.format(total, index_name))
# A working area for the callback that persists across multiple calls.
# Used for caching, etc.
parsing_context = {}
def parsing_callback(token_element):
return index_data['document_class'].new_from_token_element(token_element, parsing_context)
with tqdm(total=total) as t:
for document in utils.KwicParser(parsing_callback):
if -1 < cap <= count:
break
yield document.to_dict(True)
t.update()
count += 1
if total != count:
print('WARNING: {} indexed != {} expected'.format(count, total))
def _get_selected_indexes(self, index_names=None):
'''Returns a list of index information.
One item per name in index_names.
Thrown Exception if name not found.'''
ret = [
(name, data)
for name, data
in self.indexes.items()
if (not index_names) or (name in index_names)
]
if index_names and len(index_names) != len(ret):
raise Exception(
'Index name not found ({}). Possible names are: {}.'.format(
', '.join(index_names),
', '.join(self.indexes.keys())
)
)
return ret
|
|
# -*- coding: utf-8 -*-
'''
Parser for multipart/form-data
==============================
This module provides a parser for the multipart/form-data format. It can read
from a file, a socket or a WSGI environment. The parser can be used to replace
cgi.FieldStorage (without the bugs) and works with Python 2.5+ and 3.x (2to3).
Licence (MIT)
-------------
Copyright (c) 2010, Marcel Hellkamp.
Inspired by the Werkzeug library: http://werkzeug.pocoo.org/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
'''
__author__ = 'Marcel Hellkamp'
__version__ = '0.1'
__license__ = 'MIT'
from tempfile import TemporaryFile
from wsgiref.headers import Headers
import re, sys
try:
from urllib.parse import parse_qs
except ImportError: # pragma: no cover (fallback for Python 2.5)
from cgi import parse_qs
try:
from io import BytesIO
except ImportError: # pragma: no cover (fallback for Python 2.5)
from io import StringIO as BytesIO
##############################################################################
################################ Helper & Misc ################################
##############################################################################
# Some of these were copied from bottle: http://bottle.paws.de/
try:
from collections import MutableMapping as DictMixin
except ImportError: # pragma: no cover (fallback for Python 2.5)
from UserDict import DictMixin
class MultiDict(DictMixin):
""" A dict that remembers old values for each key """
def __init__(self, *a, **k):
self.dict = dict()
for k, v in dict(*a, **k).items():
self[k] = v
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def keys(self): return list(self.dict.keys())
def __getitem__(self, key): return self.get(key, KeyError, -1)
def __setitem__(self, key, value): self.append(key, value)
def append(self, key, value): self.dict.setdefault(key, []).append(value)
def replace(self, key, value): self.dict[key] = [value]
def getall(self, key): return self.dict.get(key) or []
def get(self, key, default=None, index=-1):
if key not in self.dict and default != KeyError:
return [default][index]
return self.dict[key][index]
def iterallitems(self):
for key, values in self.dict.items():
for value in values:
yield key, value
def tob(data, enc='utf8'): # Convert strings to bytes (py2 and py3)
return data.encode(enc) if isinstance(data, str) else data
def copy_file(stream, target, maxread=-1, buffer_size=2*16):
''' Read from :stream and write to :target until :maxread or EOF. '''
size, read = 0, stream.read
while 1:
to_read = buffer_size if maxread < 0 else min(buffer_size, maxread-size)
part = read(to_read)
if not part: return size
target.write(part)
size += len(part)
##############################################################################
################################ Header Parser ################################
##############################################################################
_special = re.escape('()<>@,;:\\"/[]?={} \t')
_re_special = re.compile('[%s]' % _special)
_qstr = '"(?:\\\\.|[^"])*"' # Quoted string
_value = '(?:[^%s]+|%s)' % (_special, _qstr) # Save or quoted string
_option = '(?:;|^)\s*([^%s]+)\s*=\s*(%s)' % (_special, _value)
_re_option = re.compile(_option) # key=value part of an Content-Type like header
def header_quote(val):
if not _re_special.search(val):
return val
return '"' + val.replace('\\','\\\\').replace('"','\\"') + '"'
def header_unquote(val, filename=False):
if val[0] == val[-1] == '"':
val = val[1:-1]
if val[1:3] == ':\\' or val[:2] == '\\\\':
val = val.split('\\')[-1] # fix ie6 bug: full path --> filename
return val.replace('\\\\','\\').replace('\\"','"')
return val
def parse_options_header(header, options=None):
if ';' not in header:
return header.lower().strip(), {}
ctype, tail = header.split(';', 1)
options = options or {}
for match in _re_option.finditer(tail):
key = match.group(1).lower()
value = header_unquote(match.group(2), key=='filename')
options[key] = value
return ctype, options
##############################################################################
################################## Multipart ##################################
##############################################################################
class MultipartError(ValueError): pass
class MultipartParser(object):
def __init__(self, stream, boundary, content_length=-1,
disk_limit=2**30, mem_limit=2**20, memfile_limit=2**18,
buffer_size=2**16, charset='latin1'):
''' Parse a multipart/form-data byte stream. This object is an iterator
over the parts of the message.
:param stream: A file-like stream. Must implement ``.read(size)``.
:param boundary: The multipart boundary as a byte string.
:param content_length: The maximum number of bytes to read.
'''
self.stream, self.boundary = stream, boundary
self.content_length = content_length
self.disk_limit = disk_limit
self.memfile_limit = memfile_limit
self.mem_limit = min(mem_limit, self.disk_limit)
self.buffer_size = min(buffer_size, self.mem_limit)
self.charset = charset
if self.buffer_size - 6 < len(boundary): # "--boundary--\r\n"
raise MultipartError('Boundary does not fit into buffer_size.')
self._done = []
self._part_iter = None
def __iter__(self):
''' Iterate over the parts of the multipart message. '''
if not self._part_iter:
self._part_iter = self._iterparse()
for part in self._done:
yield part
for part in self._part_iter:
self._done.append(part)
yield part
def parts(self):
''' Returns a list with all parts of the multipart message. '''
return list(iter(self))
def get(self, name, default=None):
''' Return the first part with that name or a default value (None). '''
for part in self:
if name == part.name:
return part
return default
def get_all(self, name):
''' Return a list of parts with that name. '''
return [p for p in self if p.name == name]
def _lineiter(self):
''' Iterate over a binary file-like object line by line. Each line is
returned as a (line, line_ending) tuple. If the line does not fit
into self.buffer_size, line_ending is empty and the rest of the line
is returned with the next iteration.
'''
read = self.stream.read
maxread, maxbuf = self.content_length, self.buffer_size
_bcrnl = tob('\r\n')
_bcr = _bcrnl[:1]
_bnl = _bcrnl[1:]
_bempty = _bcrnl[:0] # b'rn'[:0] -> b''
buffer = _bempty # buffer for the last (partial) line
while 1:
data = read(maxbuf if maxread < 0 else min(maxbuf, maxread))
maxread -= len(data)
lines = (buffer+data).splitlines(True)
len_first_line = len(lines[0])
# be sure that the first line does not become too big
if len_first_line > self.buffer_size:
# at the same time don't split a '\r\n' accidentally
if (len_first_line == self.buffer_size+1 and
lines[0].endswith(_bcrnl)):
splitpos = self.buffer_size - 1
else:
splitpos = self.buffer_size
lines[:1] = [lines[0][:splitpos],
lines[0][splitpos:]]
if data:
buffer = lines[-1]
lines = lines[:-1]
for line in lines:
if line.endswith(_bcrnl): yield line[:-2], _bcrnl
elif line.endswith(_bnl): yield line[:-1], _bnl
elif line.endswith(_bcr): yield line[:-1], _bcr
else: yield line, _bempty
if not data:
break
def _iterparse(self):
lines, line = self._lineiter(), ''
separator = tob('--') + tob(self.boundary)
terminator = tob('--') + tob(self.boundary) + tob('--')
# Consume first boundary. Ignore leading blank lines
for line, nl in lines:
if line: break
if line != separator:
raise MultipartError("Stream does not start with boundary")
# For each part in stream...
mem_used, disk_used = 0, 0 # Track used resources to prevent DoS
is_tail = False # True if the last line was incomplete (cutted)
opts = {'buffer_size': self.buffer_size,
'memfile_limit': self.memfile_limit,
'charset': self.charset}
part = MultipartPart(**opts)
for line, nl in lines:
if line == terminator and not is_tail:
part.file.seek(0)
yield part
break
elif line == separator and not is_tail:
if part.is_buffered(): mem_used += part.size
else: disk_used += part.size
part.file.seek(0)
yield part
part = MultipartPart(**opts)
else:
is_tail = not nl # The next line continues this one
part.feed(line, nl)
if part.is_buffered():
if part.size + mem_used > self.mem_limit:
raise MultipartError("Memory limit reached.")
elif part.size + disk_used > self.disk_limit:
raise MultipartError("Disk limit reached.")
if line != terminator:
raise MultipartError("Unexpected end of multipart stream.")
class MultipartPart(object):
def __init__(self, buffer_size=2**16, memfile_limit=2**18, charset='latin1'):
self.headerlist = []
self.headers = None
self.file = False
self.size = 0
self._buf = tob('')
self.disposition, self.name, self.filename = None, None, None
self.content_type, self.charset = None, charset
self.memfile_limit = memfile_limit
self.buffer_size = buffer_size
def feed(self, line, nl=''):
if self.file:
return self.write_body(line, nl)
return self.write_header(line, nl)
def write_header(self, line, nl):
line = line.decode(self.charset or 'latin1')
if not nl: raise MultipartError('Unexpected end of line in header.')
if not line.strip(): # blank line -> end of header segment
self.finish_header()
elif line[0] in ' \t' and self.headerlist:
name, value = self.headerlist.pop()
self.headerlist.append((name, value+line.strip()))
else:
if ':' not in line:
raise MultipartError("Syntax error in header: No colon.")
name, value = line.split(':', 1)
self.headerlist.append((name.strip(), value.strip()))
def write_body(self, line, nl):
if not line and not nl: return # This does not even flush the buffer
self.size += len(line) + len(self._buf)
self.file.write(self._buf + line)
self._buf = nl
if self.content_length > 0 and self.size > self.content_length:
raise MultipartError('Size of body exceeds Content-Length header.')
if self.size > self.memfile_limit and isinstance(self.file, BytesIO):
# TODO: What about non-file uploads that exceed the memfile_limit?
self.file, old = TemporaryFile(mode='w+b'), self.file
old.seek(0)
copy_file(old, self.file, self.size, self.buffer_size)
def finish_header(self):
self.file = BytesIO()
self.headers = Headers(self.headerlist)
cdis = self.headers.get('Content-Disposition','')
ctype = self.headers.get('Content-Type','')
clen = self.headers.get('Content-Length','-1')
if not cdis:
raise MultipartError('Content-Disposition header is missing.')
self.disposition, self.options = parse_options_header(cdis)
self.name = self.options.get('name')
self.filename = self.options.get('filename')
self.content_type, options = parse_options_header(ctype)
self.charset = options.get('charset') or self.charset
self.content_length = int(self.headers.get('Content-Length','-1'))
def is_buffered(self):
''' Return true if the data is fully buffered in memory.'''
return isinstance(self.file, BytesIO)
@property
def value(self):
''' Data decoded with the specified charset '''
pos = self.file.tell()
self.file.seek(0)
val = self.file.read()
self.file.seek(pos)
return val.decode(self.charset)
def save_as(self, path):
fp = open(path, 'wb')
pos = self.file.tell()
try:
self.file.seek(0)
size = copy_file(self.file, fp)
finally:
self.file.seek(pos)
return size
##############################################################################
#################################### WSGI ####################################
##############################################################################
def parse_form_data(environ, charset='utf8', strict=False, **kw):
''' Parse form data from an environ dict and return a (forms, files) tuple.
Both tuple values are dictionaries with the form-field name as a key
(unicode) and lists as values (multiple values per key are possible).
The forms-dictionary contains form-field values as unicode strings.
The files-dictionary contains :class:`MultipartPart` instances, either
because the form-field was a file-upload or the value is to big to fit
into memory limits.
:param environ: An WSGI environment dict.
:param charset: The charset to use if unsure. (default: utf8)
:param strict: If True, raise :exc:`MultipartError` on any parsing
errors. These are silently ignored by default.
'''
forms, files = MultiDict(), MultiDict()
try:
if environ.get('REQUEST_METHOD','GET').upper() not in ('POST', 'PUT'):
raise MultipartError("Request method other than POST or PUT.")
content_length = int(environ.get('CONTENT_LENGTH', '-1'))
content_type = environ.get('CONTENT_TYPE', '')
if not content_type:
raise MultipartError("Missing Content-Type header.")
content_type, options = parse_options_header(content_type)
stream = environ.get('wsgi.input') or BytesIO()
kw['charset'] = charset = options.get('charset', charset)
if content_type == 'multipart/form-data':
boundary = options.get('boundary','')
if not boundary:
raise MultipartError("No boundary for multipart/form-data.")
for part in MultipartParser(stream, boundary, content_length, **kw):
if part.filename or not part.is_buffered():
files[part.name] = part
else: # TODO: Big form-fields are in the files dict. really?
forms[part.name] = part.value
elif content_type in ('application/x-www-form-urlencoded',
'application/x-url-encoded'):
mem_limit = kw.get('mem_limit', 2**20)
if content_length > mem_limit:
raise MultipartError("Request to big. Increase MAXMEM.")
data = stream.read(mem_limit).decode(charset)
if stream.read(1): # These is more that does not fit mem_limit
raise MultipartError("Request to big. Increase MAXMEM.")
data = parse_qs(data, keep_blank_values=True)
for key, values in data.items():
for value in values:
forms[key] = value
else:
raise MultipartError("Unsupported content type.")
except MultipartError:
if strict: raise
return forms, files
|
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'EnginePatch'
db.create_table(u'physical_enginepatch', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('engine', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'patchs', to=orm['physical.Engine'])),
('patch_version', self.gf('django.db.models.fields.CharField')(max_length=100)),
('is_initial_patch', self.gf('django.db.models.fields.BooleanField')(default=False)),
('patch_path', self.gf('django.db.models.fields.CharField')(default=u'', max_length=200, null=True, blank=True)),
))
db.send_create_signal(u'physical', ['EnginePatch'])
# Adding field 'DatabaseInfra.engine_patch'
db.add_column(u'physical_databaseinfra', 'engine_patch',
self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'databaseinfras', null=True, on_delete=models.PROTECT, to=orm['physical.EnginePatch']),
keep_default=False)
# Adding field 'Engine.major_version'
db.add_column(u'physical_engine', 'major_version',
self.gf('django.db.models.fields.CharField')(default=u'', max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'Engine.minor_version'
db.add_column(u'physical_engine', 'minor_version',
self.gf('django.db.models.fields.CharField')(default=u'', max_length=100, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting model 'EnginePatch'
db.delete_table(u'physical_enginepatch')
# Deleting field 'DatabaseInfra.engine_patch'
db.delete_column(u'physical_databaseinfra', 'engine_patch_id')
# Deleting field 'Engine.major_version'
db.delete_column(u'physical_engine', 'major_version')
# Deleting field 'Engine.minor_version'
db.delete_column(u'physical_engine', 'minor_version')
models = {
u'physical.cloud': {
'Meta': {'object_name': 'Cloud'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'backup_hour': ('django.db.models.fields.IntegerField', [], {}),
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'engine_patch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EnginePatch']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_vm_created': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'name_prefix': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'name_stamp': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'ssl_configured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.databaseinfraparameter': {
'Meta': {'unique_together': "((u'databaseinfra', u'parameter'),)", 'object_name': 'DatabaseInfraParameter'},
'applied_on_database': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_value': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.DatabaseInfra']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Parameter']"}),
'reset_default_value': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'ordering': "(u'engine_type__name', u'version')", 'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
'has_users': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'major_version': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'minor_version': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'read_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'write_node_description': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'physical.enginepatch': {
'Meta': {'object_name': 'EnginePatch'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'patchs'", 'to': u"orm['physical.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_initial_patch': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'patch_path': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'patch_version': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.enginetype': {
'Meta': {'ordering': "(u'name',)", 'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_in_memory': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'cloud': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'environment_cloud'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Cloud']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environmentgroup': {
'Meta': {'object_name': 'EnvironmentGroup'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'groups'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'offering': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Offering']", 'null': 'True'}),
'os_description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shard': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'total_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_in_bytes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
u'physical.offering': {
'Meta': {'object_name': 'Offering'},
'cpus': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'offerings'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory_size_mb': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.parameter': {
'Meta': {'ordering': "(u'engine_type__name', u'name')", 'unique_together': "((u'name', u'engine_type'),)", 'object_name': 'Parameter'},
'allowed_values': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'custom_method': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'dynamic': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'enginetype'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'plans'", 'symmetrical': 'False', 'to': u"orm['physical.Environment']"}),
'has_persistence': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'migrate_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Plan']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'stronger_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'main_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'weaker_offering': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'weaker_offerings'", 'null': 'True', 'to': u"orm['physical.Offering']"})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'can_change_parameters': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_clone_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_reinstall_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_resize_vm': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_setup_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_switch_master': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_upgrade_db': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
'has_horizontal_scalability': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'replication_topologies'", 'blank': 'True', 'to': u"orm['physical.Parameter']"}),
'script': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'replication_topologies'", 'null': 'True', 'to': u"orm['physical.Script']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.script': {
'Meta': {'object_name': 'Script'},
'configuration': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initialization': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'metric_collector': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'start_database': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'start_replication': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.topologyparametercustomvalue': {
'Meta': {'unique_together': "((u'topology', u'parameter'),)", 'object_name': 'TopologyParameterCustomValue'},
'attr_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'topology_custom_values'", 'to': u"orm['physical.Parameter']"}),
'topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'param_custom_values'", 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.vip': {
'Meta': {'object_name': 'Vip'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'infra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'vips'", 'to': u"orm['physical.DatabaseInfra']"}),
'original_vip': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Vip']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.volume': {
'Meta': {'object_name': 'Volume'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'volumes'", 'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'total_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'used_size_kb': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical']
|
|
import os
import sys
import time
import yaml
import copy
import random
import string
import logging
import subprocess
from importlib import import_module
from logging.handlers import RotatingFileHandler, TimedRotatingFileHandler
POSIX = os.name != 'nt'
TMP_SHELL_FILE_PREFIX = '/tmp/__rdb_backup_'
log = logging.getLogger(__name__)
log_configured = False
template_path = os.path.realpath(os.path.join(__file__, '..', 'template.yml'))
tests_config = os.path.realpath(os.path.join(__file__, '..', '..', 'tests', 'config_files'))
class ProcessorNonexistent(Exception):
pass
def load_yml(file_path, prefix=None):
if prefix:
file_path = os.path.join(prefix, file_path)
if not os.path.exists(file_path):
raise Exception('config file [%s] not exists.' % file_path)
f = open(file_path, 'r', encoding='utf-8')
conf = yaml.load(f)
f.close()
return conf
def init_processor(processor_paths):
from rdb_backup.processor import table_processors, database_processors, TableProcessor, DatabaseProcessor
import_module('rdb_backup.processor.mysql')
import_module('rdb_backup.processor.postgres')
processor_paths = processor_paths + []
for processor_path in processor_paths:
import_module(processor_path)
for table_processor in TableProcessor.__subclasses__():
if table_processor.processor_name is None:
raise NotImplementedError('member [processor_name] is not defined in %s' % table_processor)
table_processors[table_processor.processor_name] = table_processor
for database_processor in DatabaseProcessor.__subclasses__():
if database_processor.processor_name is None:
raise NotImplementedError('member [processor_name] is not defined in %s' % database_processor)
database_processors[database_processor.processor_name] = database_processor
def get_config(file_path, prefix=None):
""" Create config's data structure from configure file.
:param file_path: configure file path
:param prefix: configure file path prefix
"""
from rdb_backup.processor import database_processors
global log_configured
config = load_yml(file_path, prefix)
# communal config
communal_config = config.pop('communal')
init_processor(communal_config.get('include', []))
# logging config
log_config = config.pop('logging', None)
if not log_configured:
if not log_config and 'py.test' in sys.argv[0]:
log_config = load_yml(template_path).get('logging')
if log_config:
log_configured = True
set_logging(log_config, communal_config['backup_root'])
# dbms config
databases = []
for dbms_name, dbs in config.items():
dbms_params = dbs.pop('__dbms__')
dbms_processor = dbms_params.pop('processor')
dbms_config = copy.deepcopy(dbms_params)
dbms_config.update(copy.deepcopy(communal_config))
processor_class = database_processors.get(dbms_processor)
if not processor_class:
raise ProcessorNonexistent('database processor [%s] nonexistent.' % dbms_processor)
for db_name, tbs in dbs.items():
db_params = tbs.pop('__db__', {}) if tbs else {}
db_config = copy.deepcopy(dbms_config)
db_config.update(copy.deepcopy(db_params or {}))
database = processor_class(dbms_name, db_name, db_config, tbs or {})
databases.append(database)
return communal_config, databases
def __run_shell_as_file(command, su_user):
if os.linesep in command:
return True
if su_user and '"' in command and "'" in command:
return True
return False
def run_shell(command, user=None, cwd=os.getcwd(), wait=True):
quotation_marks = '"' if "'" in command else "'"
su_prefix = 'su %s -c %s' % (user, quotation_marks) if user else ''
su_postfix = quotation_marks if user else ''
if __run_shell_as_file(command, user):
file_path = TMP_SHELL_FILE_PREFIX + ''.join(random.sample(string.ascii_letters, 20))
content = '#! /bin/sh%s%s' % (os.linesep, command)
with open(file_path, 'w') as file:
file.write(content)
os.system('chmod og+rx ' + file_path)
log.debug('run shell: %s%s%s %s %s', su_prefix, file_path, su_postfix, os.linesep, command)
process = subprocess.Popen(su_prefix + file_path + su_postfix, shell=True, stdout=subprocess.PIPE, cwd=cwd)
if wait:
process.wait()
else:
time.sleep(0.1)
os.unlink(file_path)
else:
log.debug('run shell: %s%s%s', su_prefix, command, su_postfix)
process = subprocess.Popen(su_prefix + command + su_postfix, shell=True, stdout=subprocess.PIPE, cwd=cwd)
if wait:
process.wait()
return process
def multiply(expression):
"""multiplication calculation
:param expression: string e.g. "1024*1024*50"
:return: integer
"""
value = 1
for n in expression.split('*'):
value *= int(n)
return value
class CustomizeLog(logging.Formatter):
def __init__(self, fmt=None, date_fmt=None):
logging.Formatter.__init__(self, fmt, date_fmt)
def format(self, record):
if record.levelname == 'WARNING': record.levelname = 'WARN '
if record.levelname == 'CRITICAL': record.levelname = 'FATAL'
record.levelname = record.levelname.ljust(5)
return logging.Formatter.format(self, record)
def set_logging(config, root_path=''):
"""setting logging
:param config: config dict
:param root_path:
"""
default_format = CustomizeLog(config['format'])
handlers = {}
# console handler
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(default_format)
handlers['stream'] = handler
# customize handler
for handler_name, params in config['handler'].items():
handler_format = CustomizeLog(params['format']) if params.get('format') else default_format
handler_params = config['class'][params['class']].copy()
handler_params.update(params)
# create log dir
logfile = params['path'] if params['path'].startswith('/') else os.path.join(root_path, params['path'])
if not os.path.exists(os.path.dirname(logfile)):
os.makedirs(os.path.dirname(logfile))
# create handler
backup_count = handler_params['backup_count']
# which switches from one file to the next when the current file reaches a certain size.
if params['class'] == 'rotating_file':
max_size = multiply(handler_params['max_size'])
handler = RotatingFileHandler(logfile, 'a', max_size, backup_count, encoding='utf-8')
# rotating the log file at certain timed intervals.
elif params['class'] == 'time_rotating_file':
when = handler_params['when']
interval = handler_params['interval']
handler = TimedRotatingFileHandler(logfile, when, interval, backup_count, encoding='utf-8')
handler.setFormatter(handler_format)
handlers[handler_name] = handler
for module, params in config['logger'].items():
level = params['level'].upper()
handler_names = params['handler'].split()
propagate = params.get('propagate') or config['propagate']
if module == 'default': # define root log
logger = logging.getLogger()
else:
logger = logging.getLogger(module) # define module's logging
logger.propagate = propagate # judge whether repeatedly output to default logger
for handler_name in handler_names:
logger.addHandler(handlers[handler_name])
logger.setLevel(level)
|
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.math
~~~~~~~~~~~~~~~~~~~~
Lexers for math languages.
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, include, \
combined, do_insertions
from pygments.token import Comment, String, Punctuation, Keyword, Name, \
Operator, Number, Text, Generic
from pygments.lexers.agile import PythonLexer
from pygments.lexers import _scilab_builtins
__all__ = ['JuliaLexer', 'JuliaConsoleLexer', 'MuPADLexer', 'MatlabLexer',
'MatlabSessionLexer', 'OctaveLexer', 'ScilabLexer', 'NumPyLexer',
'RConsoleLexer', 'SLexer', 'JagsLexer', 'BugsLexer', 'StanLexer']
class JuliaLexer(RegexLexer):
name = 'Julia'
aliases = ['julia','jl']
filenames = ['*.jl']
mimetypes = ['text/x-julia','application/x-julia']
builtins = [
'exit','whos','edit','load','is','isa','isequal','typeof','tuple',
'ntuple','uid','hash','finalizer','convert','promote','subtype',
'typemin','typemax','realmin','realmax','sizeof','eps','promote_type',
'method_exists','applicable','invoke','dlopen','dlsym','system',
'error','throw','assert','new','Inf','Nan','pi','im',
]
tokens = {
'root': [
(r'\n', Text),
(r'[^\S\n]+', Text),
(r'#.*$', Comment),
(r'[]{}:(),;[@]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
# keywords
(r'(begin|while|for|in|return|break|continue|'
r'macro|quote|let|if|elseif|else|try|catch|end|'
r'bitstype|ccall|do)\b', Keyword),
(r'(local|global|const)\b', Keyword.Declaration),
(r'(module|import|export)\b', Keyword.Reserved),
(r'(Bool|Int|Int8|Int16|Int32|Int64|Uint|Uint8|Uint16|Uint32|Uint64'
r'|Float32|Float64|Complex64|Complex128|Any|Nothing|None)\b',
Keyword.Type),
# functions
(r'(function)((?:\s|\\\s)+)',
bygroups(Keyword,Name.Function), 'funcname'),
# types
(r'(type|typealias|abstract)((?:\s|\\\s)+)',
bygroups(Keyword,Name.Class), 'typename'),
# operators
(r'==|!=|<=|>=|->|&&|\|\||::|<:|[-~+/*%=<>&^|.?!$]', Operator),
(r'\.\*|\.\^|\.\\|\.\/|\\', Operator),
# builtins
('(' + '|'.join(builtins) + r')\b', Name.Builtin),
# backticks
(r'`(?s).*?`', String.Backtick),
# chars
(r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,3}|\\u[a-fA-F0-9]{1,4}|\\U[a-fA-F0-9]{1,6}|[^\\\'\n])'", String.Char),
# try to match trailing transpose
(r'(?<=[.\w\)\]])\'+', Operator),
# strings
(r'(?:[IL])"', String, 'string'),
(r'[E]?"', String, combined('stringescape', 'string')),
# names
(r'@[a-zA-Z0-9_.]+', Name.Decorator),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
# numbers
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0b[01]+', Number.Binary),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer)
],
'funcname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop'),
('\([^\s\w{]{1,2}\)', Operator, '#pop'),
('[^\s\w{]{1,2}', Operator, '#pop'),
],
'typename': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'string': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
(r'\$(\([a-zA-Z0-9_]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?',
String.Interpol),
(r'[^\\"$]+', String),
# quotes, dollar signs, and backslashes must be parsed one at a time
(r'["\\]', String),
# unhandled string formatting sign
(r'\$', String)
],
}
def analyse_text(text):
return shebang_matches(text, r'julia')
line_re = re.compile('.*?\n')
class JuliaConsoleLexer(Lexer):
"""
For Julia console sessions. Modeled after MatlabSessionLexer.
"""
name = 'Julia console'
aliases = ['jlcon']
def get_tokens_unprocessed(self, text):
jllexer = JuliaLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('julia>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:3])]))
curcode += line[3:]
elif line.startswith(' '):
idx = len(curcode)
# without is showing error on same line as before...?
line = "\n" + line
token = (0, Generic.Traceback, line)
insertions.append((idx, [token]))
else:
if curcode:
for item in do_insertions(
insertions, jllexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode: # or item:
for item in do_insertions(
insertions, jllexer.get_tokens_unprocessed(curcode)):
yield item
class MuPADLexer(RegexLexer):
"""
A `MuPAD <http://www.mupad.com>`_ lexer.
Contributed by Christopher Creutzig <[email protected]>.
*New in Pygments 0.8.*
"""
name = 'MuPAD'
aliases = ['mupad']
filenames = ['*.mu']
tokens = {
'root' : [
(r'//.*?$', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'"(?:[^"\\]|\\.)*"', String),
(r'\(|\)|\[|\]|\{|\}', Punctuation),
(r'''(?x)\b(?:
next|break|end|
axiom|end_axiom|category|end_category|domain|end_domain|inherits|
if|%if|then|elif|else|end_if|
case|of|do|otherwise|end_case|
while|end_while|
repeat|until|end_repeat|
for|from|to|downto|step|end_for|
proc|local|option|save|begin|end_proc|
delete|frame
)\b''', Keyword),
(r'''(?x)\b(?:
DOM_ARRAY|DOM_BOOL|DOM_COMPLEX|DOM_DOMAIN|DOM_EXEC|DOM_EXPR|
DOM_FAIL|DOM_FLOAT|DOM_FRAME|DOM_FUNC_ENV|DOM_HFARRAY|DOM_IDENT|
DOM_INT|DOM_INTERVAL|DOM_LIST|DOM_NIL|DOM_NULL|DOM_POLY|DOM_PROC|
DOM_PROC_ENV|DOM_RAT|DOM_SET|DOM_STRING|DOM_TABLE|DOM_VAR
)\b''', Name.Class),
(r'''(?x)\b(?:
PI|EULER|E|CATALAN|
NIL|FAIL|undefined|infinity|
TRUE|FALSE|UNKNOWN
)\b''',
Name.Constant),
(r'\b(?:dom|procname)\b', Name.Builtin.Pseudo),
(r'\.|,|:|;|=|\+|-|\*|/|\^|@|>|<|\$|\||!|\'|%|~=', Operator),
(r'''(?x)\b(?:
and|or|not|xor|
assuming|
div|mod|
union|minus|intersect|in|subset
)\b''',
Operator.Word),
(r'\b(?:I|RDN_INF|RD_NINF|RD_NAN)\b', Number),
#(r'\b(?:adt|linalg|newDomain|hold)\b', Name.Builtin),
(r'''(?x)
((?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)
(?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*)(\s*)([(])''',
bygroups(Name.Function, Text, Punctuation)),
(r'''(?x)
(?:[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)
(?:::[a-zA-Z_#][a-zA-Z_#0-9]*|`[^`]*`)*''', Name.Variable),
(r'[0-9]+(?:\.[0-9]*)?(?:e[0-9]+)?', Number),
(r'\.[0-9]+(?:e[0-9]+)?', Number),
(r'.', Text)
],
'comment' : [
(r'[^*/]', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
]
}
class MatlabLexer(RegexLexer):
"""
For Matlab source code.
*New in Pygments 0.10.*
"""
name = 'Matlab'
aliases = ['matlab']
filenames = ['*.m']
mimetypes = ['text/matlab']
#
# These lists are generated automatically.
# Run the following in bash shell:
#
# for f in elfun specfun elmat; do
# echo -n "$f = "
# matlab -nojvm -r "help $f;exit;" | perl -ne \
# 'push(@c,$1) if /^ (\w+)\s+-/; END {print q{["}.join(q{","},@c).qq{"]\n};}'
# done
#
# elfun: Elementary math functions
# specfun: Special Math functions
# elmat: Elementary matrices and matrix manipulation
#
# taken from Matlab version 7.4.0.336 (R2007a)
#
elfun = ["sin","sind","sinh","asin","asind","asinh","cos","cosd","cosh",
"acos","acosd","acosh","tan","tand","tanh","atan","atand","atan2",
"atanh","sec","secd","sech","asec","asecd","asech","csc","cscd",
"csch","acsc","acscd","acsch","cot","cotd","coth","acot","acotd",
"acoth","hypot","exp","expm1","log","log1p","log10","log2","pow2",
"realpow","reallog","realsqrt","sqrt","nthroot","nextpow2","abs",
"angle","complex","conj","imag","real","unwrap","isreal","cplxpair",
"fix","floor","ceil","round","mod","rem","sign"]
specfun = ["airy","besselj","bessely","besselh","besseli","besselk","beta",
"betainc","betaln","ellipj","ellipke","erf","erfc","erfcx",
"erfinv","expint","gamma","gammainc","gammaln","psi","legendre",
"cross","dot","factor","isprime","primes","gcd","lcm","rat",
"rats","perms","nchoosek","factorial","cart2sph","cart2pol",
"pol2cart","sph2cart","hsv2rgb","rgb2hsv"]
elmat = ["zeros","ones","eye","repmat","rand","randn","linspace","logspace",
"freqspace","meshgrid","accumarray","size","length","ndims","numel",
"disp","isempty","isequal","isequalwithequalnans","cat","reshape",
"diag","blkdiag","tril","triu","fliplr","flipud","flipdim","rot90",
"find","end","sub2ind","ind2sub","bsxfun","ndgrid","permute",
"ipermute","shiftdim","circshift","squeeze","isscalar","isvector",
"ans","eps","realmax","realmin","pi","i","inf","nan","isnan",
"isinf","isfinite","j","why","compan","gallery","hadamard","hankel",
"hilb","invhilb","magic","pascal","rosser","toeplitz","vander",
"wilkinson"]
tokens = {
'root': [
# line starting with '!' is sent as a system command. not sure what
# label to use...
(r'^!.*', String.Other),
(r'%\{\s*\n', Comment.Multiline, 'blockcomment'),
(r'%.*$', Comment),
(r'^\s*function', Keyword, 'deffunc'),
# from 'iskeyword' on version 7.11 (R2010):
(r'(break|case|catch|classdef|continue|else|elseif|end|enumerated|'
r'events|for|function|global|if|methods|otherwise|parfor|'
r'persistent|properties|return|spmd|switch|try|while)\b', Keyword),
("(" + "|".join(elfun+specfun+elmat) + r')\b', Name.Builtin),
# line continuation with following comment:
(r'\.\.\..*$', Comment),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation),
(r'=|:|;', Punctuation),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w\)\]])\'', Operator),
(r'(?<![\w\)\]])\'', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'.', Text),
],
'string': [
(r'[^\']*\'', String, '#pop')
],
'blockcomment': [
(r'^\s*%\}', Comment.Multiline, '#pop'),
(r'^.*\n', Comment.Multiline),
(r'.', Comment.Multiline),
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation,
Text.Whitespace, Name.Function, Punctuation, Text,
Punctuation, Text.Whitespace), '#pop'),
],
}
def analyse_text(text):
if re.match('^\s*%', text, re.M): # comment
return 0.9
elif re.match('^!\w+', text, re.M): # system cmd
return 0.9
return 0.1
line_re = re.compile('.*?\n')
class MatlabSessionLexer(Lexer):
"""
For Matlab sessions. Modeled after PythonConsoleLexer.
Contributed by Ken Schutte <[email protected]>.
*New in Pygments 0.10.*
"""
name = 'Matlab session'
aliases = ['matlabsession']
def get_tokens_unprocessed(self, text):
mlexer = MatlabLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>>'):
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:3])]))
curcode += line[3:]
elif line.startswith('???'):
idx = len(curcode)
# without is showing error on same line as before...?
line = "\n" + line
token = (0, Generic.Traceback, line)
insertions.append((idx, [token]))
else:
if curcode:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode: # or item:
for item in do_insertions(
insertions, mlexer.get_tokens_unprocessed(curcode)):
yield item
class OctaveLexer(RegexLexer):
"""
For GNU Octave source code.
*New in Pygments 1.5.*
"""
name = 'Octave'
aliases = ['octave']
filenames = ['*.m']
mimetypes = ['text/octave']
# These lists are generated automatically.
# Run the following in bash shell:
#
# First dump all of the Octave manual into a plain text file:
#
# $ info octave --subnodes -o octave-manual
#
# Now grep through it:
# for i in \
# "Built-in Function" "Command" "Function File" \
# "Loadable Function" "Mapping Function";
# do
# perl -e '@name = qw('"$i"');
# print lc($name[0]),"_kw = [\n"';
#
# perl -n -e 'print "\"$1\",\n" if /-- '"$i"': .* (\w*) \(/;' \
# octave-manual | sort | uniq ;
# echo "]" ;
# echo;
# done
# taken from Octave Mercurial changeset 8cc154f45e37 (30-jan-2011)
builtin_kw = [ "addlistener", "addpath", "addproperty", "all",
"and", "any", "argnames", "argv", "assignin",
"atexit", "autoload",
"available_graphics_toolkits", "beep_on_error",
"bitand", "bitmax", "bitor", "bitshift", "bitxor",
"cat", "cell", "cellstr", "char", "class", "clc",
"columns", "command_line_path",
"completion_append_char", "completion_matches",
"complex", "confirm_recursive_rmdir", "cputime",
"crash_dumps_octave_core", "ctranspose", "cumprod",
"cumsum", "debug_on_error", "debug_on_interrupt",
"debug_on_warning", "default_save_options",
"dellistener", "diag", "diff", "disp",
"doc_cache_file", "do_string_escapes", "double",
"drawnow", "e", "echo_executing_commands", "eps",
"eq", "errno", "errno_list", "error", "eval",
"evalin", "exec", "exist", "exit", "eye", "false",
"fclear", "fclose", "fcntl", "fdisp", "feof",
"ferror", "feval", "fflush", "fgetl", "fgets",
"fieldnames", "file_in_loadpath", "file_in_path",
"filemarker", "filesep", "find_dir_in_path",
"fixed_point_format", "fnmatch", "fopen", "fork",
"formula", "fprintf", "fputs", "fread", "freport",
"frewind", "fscanf", "fseek", "fskipl", "ftell",
"functions", "fwrite", "ge", "genpath", "get",
"getegid", "getenv", "geteuid", "getgid",
"getpgrp", "getpid", "getppid", "getuid", "glob",
"gt", "gui_mode", "history_control",
"history_file", "history_size",
"history_timestamp_format_string", "home",
"horzcat", "hypot", "ifelse",
"ignore_function_time_stamp", "inferiorto",
"info_file", "info_program", "inline", "input",
"intmax", "intmin", "ipermute",
"is_absolute_filename", "isargout", "isbool",
"iscell", "iscellstr", "ischar", "iscomplex",
"isempty", "isfield", "isfloat", "isglobal",
"ishandle", "isieee", "isindex", "isinteger",
"islogical", "ismatrix", "ismethod", "isnull",
"isnumeric", "isobject", "isreal",
"is_rooted_relative_filename", "issorted",
"isstruct", "isvarname", "kbhit", "keyboard",
"kill", "lasterr", "lasterror", "lastwarn",
"ldivide", "le", "length", "link", "linspace",
"logical", "lstat", "lt", "make_absolute_filename",
"makeinfo_program", "max_recursion_depth", "merge",
"methods", "mfilename", "minus", "mislocked",
"mkdir", "mkfifo", "mkstemp", "mldivide", "mlock",
"mouse_wheel_zoom", "mpower", "mrdivide", "mtimes",
"munlock", "nargin", "nargout",
"native_float_format", "ndims", "ne", "nfields",
"nnz", "norm", "not", "numel", "nzmax",
"octave_config_info", "octave_core_file_limit",
"octave_core_file_name",
"octave_core_file_options", "ones", "or",
"output_max_field_width", "output_precision",
"page_output_immediately", "page_screen_output",
"path", "pathsep", "pause", "pclose", "permute",
"pi", "pipe", "plus", "popen", "power",
"print_empty_dimensions", "printf",
"print_struct_array_contents", "prod",
"program_invocation_name", "program_name",
"putenv", "puts", "pwd", "quit", "rats", "rdivide",
"readdir", "readlink", "read_readline_init_file",
"realmax", "realmin", "rehash", "rename",
"repelems", "re_read_readline_init_file", "reset",
"reshape", "resize", "restoredefaultpath",
"rethrow", "rmdir", "rmfield", "rmpath", "rows",
"save_header_format_string", "save_precision",
"saving_history", "scanf", "set", "setenv",
"shell_cmd", "sighup_dumps_octave_core",
"sigterm_dumps_octave_core", "silent_functions",
"single", "size", "size_equal", "sizemax",
"sizeof", "sleep", "source", "sparse_auto_mutate",
"split_long_rows", "sprintf", "squeeze", "sscanf",
"stat", "stderr", "stdin", "stdout", "strcmp",
"strcmpi", "string_fill_char", "strncmp",
"strncmpi", "struct", "struct_levels_to_print",
"strvcat", "subsasgn", "subsref", "sum", "sumsq",
"superiorto", "suppress_verbose_help_message",
"symlink", "system", "tic", "tilde_expand",
"times", "tmpfile", "tmpnam", "toc", "toupper",
"transpose", "true", "typeinfo", "umask", "uminus",
"uname", "undo_string_escapes", "unlink", "uplus",
"upper", "usage", "usleep", "vec", "vectorize",
"vertcat", "waitpid", "warning", "warranty",
"whos_line_format", "yes_or_no", "zeros",
"inf", "Inf", "nan", "NaN"]
command_kw = [ "close", "load", "who", "whos", ]
function_kw = [ "accumarray", "accumdim", "acosd", "acotd",
"acscd", "addtodate", "allchild", "ancestor",
"anova", "arch_fit", "arch_rnd", "arch_test",
"area", "arma_rnd", "arrayfun", "ascii", "asctime",
"asecd", "asind", "assert", "atand",
"autoreg_matrix", "autumn", "axes", "axis", "bar",
"barh", "bartlett", "bartlett_test", "beep",
"betacdf", "betainv", "betapdf", "betarnd",
"bicgstab", "bicubic", "binary", "binocdf",
"binoinv", "binopdf", "binornd", "bitcmp",
"bitget", "bitset", "blackman", "blanks",
"blkdiag", "bone", "box", "brighten", "calendar",
"cast", "cauchy_cdf", "cauchy_inv", "cauchy_pdf",
"cauchy_rnd", "caxis", "celldisp", "center", "cgs",
"chisquare_test_homogeneity",
"chisquare_test_independence", "circshift", "cla",
"clabel", "clf", "clock", "cloglog", "closereq",
"colon", "colorbar", "colormap", "colperm",
"comet", "common_size", "commutation_matrix",
"compan", "compare_versions", "compass",
"computer", "cond", "condest", "contour",
"contourc", "contourf", "contrast", "conv",
"convhull", "cool", "copper", "copyfile", "cor",
"corrcoef", "cor_test", "cosd", "cotd", "cov",
"cplxpair", "cross", "cscd", "cstrcat", "csvread",
"csvwrite", "ctime", "cumtrapz", "curl", "cut",
"cylinder", "date", "datenum", "datestr",
"datetick", "datevec", "dblquad", "deal",
"deblank", "deconv", "delaunay", "delaunayn",
"delete", "demo", "detrend", "diffpara", "diffuse",
"dir", "discrete_cdf", "discrete_inv",
"discrete_pdf", "discrete_rnd", "display",
"divergence", "dlmwrite", "dos", "dsearch",
"dsearchn", "duplication_matrix", "durbinlevinson",
"ellipsoid", "empirical_cdf", "empirical_inv",
"empirical_pdf", "empirical_rnd", "eomday",
"errorbar", "etime", "etreeplot", "example",
"expcdf", "expinv", "expm", "exppdf", "exprnd",
"ezcontour", "ezcontourf", "ezmesh", "ezmeshc",
"ezplot", "ezpolar", "ezsurf", "ezsurfc", "factor",
"factorial", "fail", "fcdf", "feather", "fftconv",
"fftfilt", "fftshift", "figure", "fileattrib",
"fileparts", "fill", "findall", "findobj",
"findstr", "finv", "flag", "flipdim", "fliplr",
"flipud", "fpdf", "fplot", "fractdiff", "freqz",
"freqz_plot", "frnd", "fsolve",
"f_test_regression", "ftp", "fullfile", "fzero",
"gamcdf", "gaminv", "gampdf", "gamrnd", "gca",
"gcbf", "gcbo", "gcf", "genvarname", "geocdf",
"geoinv", "geopdf", "geornd", "getfield", "ginput",
"glpk", "gls", "gplot", "gradient",
"graphics_toolkit", "gray", "grid", "griddata",
"griddatan", "gtext", "gunzip", "gzip", "hadamard",
"hamming", "hankel", "hanning", "hggroup",
"hidden", "hilb", "hist", "histc", "hold", "hot",
"hotelling_test", "housh", "hsv", "hurst",
"hygecdf", "hygeinv", "hygepdf", "hygernd",
"idivide", "ifftshift", "image", "imagesc",
"imfinfo", "imread", "imshow", "imwrite", "index",
"info", "inpolygon", "inputname", "interpft",
"interpn", "intersect", "invhilb", "iqr", "isa",
"isdefinite", "isdir", "is_duplicate_entry",
"isequal", "isequalwithequalnans", "isfigure",
"ishermitian", "ishghandle", "is_leap_year",
"isletter", "ismac", "ismember", "ispc", "isprime",
"isprop", "isscalar", "issquare", "isstrprop",
"issymmetric", "isunix", "is_valid_file_id",
"isvector", "jet", "kendall",
"kolmogorov_smirnov_cdf",
"kolmogorov_smirnov_test", "kruskal_wallis_test",
"krylov", "kurtosis", "laplace_cdf", "laplace_inv",
"laplace_pdf", "laplace_rnd", "legend", "legendre",
"license", "line", "linkprop", "list_primes",
"loadaudio", "loadobj", "logistic_cdf",
"logistic_inv", "logistic_pdf", "logistic_rnd",
"logit", "loglog", "loglogerr", "logm", "logncdf",
"logninv", "lognpdf", "lognrnd", "logspace",
"lookfor", "ls_command", "lsqnonneg", "magic",
"mahalanobis", "manova", "matlabroot",
"mcnemar_test", "mean", "meansq", "median", "menu",
"mesh", "meshc", "meshgrid", "meshz", "mexext",
"mget", "mkpp", "mode", "moment", "movefile",
"mpoles", "mput", "namelengthmax", "nargchk",
"nargoutchk", "nbincdf", "nbininv", "nbinpdf",
"nbinrnd", "nchoosek", "ndgrid", "newplot", "news",
"nonzeros", "normcdf", "normest", "norminv",
"normpdf", "normrnd", "now", "nthroot", "null",
"ocean", "ols", "onenormest", "optimget",
"optimset", "orderfields", "orient", "orth",
"pack", "pareto", "parseparams", "pascal", "patch",
"pathdef", "pcg", "pchip", "pcolor", "pcr",
"peaks", "periodogram", "perl", "perms", "pie",
"pink", "planerot", "playaudio", "plot",
"plotmatrix", "plotyy", "poisscdf", "poissinv",
"poisspdf", "poissrnd", "polar", "poly",
"polyaffine", "polyarea", "polyderiv", "polyfit",
"polygcd", "polyint", "polyout", "polyreduce",
"polyval", "polyvalm", "postpad", "powerset",
"ppder", "ppint", "ppjumps", "ppplot", "ppval",
"pqpnonneg", "prepad", "primes", "print",
"print_usage", "prism", "probit", "qp", "qqplot",
"quadcc", "quadgk", "quadl", "quadv", "quiver",
"qzhess", "rainbow", "randi", "range", "rank",
"ranks", "rat", "reallog", "realpow", "realsqrt",
"record", "rectangle_lw", "rectangle_sw",
"rectint", "refresh", "refreshdata",
"regexptranslate", "repmat", "residue", "ribbon",
"rindex", "roots", "rose", "rosser", "rotdim",
"rref", "run", "run_count", "rundemos", "run_test",
"runtests", "saveas", "saveaudio", "saveobj",
"savepath", "scatter", "secd", "semilogx",
"semilogxerr", "semilogy", "semilogyerr",
"setaudio", "setdiff", "setfield", "setxor",
"shading", "shift", "shiftdim", "sign_test",
"sinc", "sind", "sinetone", "sinewave", "skewness",
"slice", "sombrero", "sortrows", "spaugment",
"spconvert", "spdiags", "spearman", "spectral_adf",
"spectral_xdf", "specular", "speed", "spencer",
"speye", "spfun", "sphere", "spinmap", "spline",
"spones", "sprand", "sprandn", "sprandsym",
"spring", "spstats", "spy", "sqp", "stairs",
"statistics", "std", "stdnormal_cdf",
"stdnormal_inv", "stdnormal_pdf", "stdnormal_rnd",
"stem", "stft", "strcat", "strchr", "strjust",
"strmatch", "strread", "strsplit", "strtok",
"strtrim", "strtrunc", "structfun", "studentize",
"subplot", "subsindex", "subspace", "substr",
"substruct", "summer", "surf", "surface", "surfc",
"surfl", "surfnorm", "svds", "swapbytes",
"sylvester_matrix", "symvar", "synthesis", "table",
"tand", "tar", "tcdf", "tempdir", "tempname",
"test", "text", "textread", "textscan", "tinv",
"title", "toeplitz", "tpdf", "trace", "trapz",
"treelayout", "treeplot", "triangle_lw",
"triangle_sw", "tril", "trimesh", "triplequad",
"triplot", "trisurf", "triu", "trnd", "tsearchn",
"t_test", "t_test_regression", "type", "unidcdf",
"unidinv", "unidpdf", "unidrnd", "unifcdf",
"unifinv", "unifpdf", "unifrnd", "union", "unique",
"unix", "unmkpp", "unpack", "untabify", "untar",
"unwrap", "unzip", "u_test", "validatestring",
"vander", "var", "var_test", "vech", "ver",
"version", "view", "voronoi", "voronoin",
"waitforbuttonpress", "wavread", "wavwrite",
"wblcdf", "wblinv", "wblpdf", "wblrnd", "weekday",
"welch_test", "what", "white", "whitebg",
"wienrnd", "wilcoxon_test", "wilkinson", "winter",
"xlabel", "xlim", "ylabel", "yulewalker", "zip",
"zlabel", "z_test", ]
loadable_kw = [ "airy", "amd", "balance", "besselh", "besseli",
"besselj", "besselk", "bessely", "bitpack",
"bsxfun", "builtin", "ccolamd", "cellfun",
"cellslices", "chol", "choldelete", "cholinsert",
"cholinv", "cholshift", "cholupdate", "colamd",
"colloc", "convhulln", "convn", "csymamd",
"cummax", "cummin", "daspk", "daspk_options",
"dasrt", "dasrt_options", "dassl", "dassl_options",
"dbclear", "dbdown", "dbstack", "dbstatus",
"dbstop", "dbtype", "dbup", "dbwhere", "det",
"dlmread", "dmperm", "dot", "eig", "eigs",
"endgrent", "endpwent", "etree", "fft", "fftn",
"fftw", "filter", "find", "full", "gcd",
"getgrent", "getgrgid", "getgrnam", "getpwent",
"getpwnam", "getpwuid", "getrusage", "givens",
"gmtime", "gnuplot_binary", "hess", "ifft",
"ifftn", "inv", "isdebugmode", "issparse", "kron",
"localtime", "lookup", "lsode", "lsode_options",
"lu", "luinc", "luupdate", "matrix_type", "max",
"min", "mktime", "pinv", "qr", "qrdelete",
"qrinsert", "qrshift", "qrupdate", "quad",
"quad_options", "qz", "rand", "rande", "randg",
"randn", "randp", "randperm", "rcond", "regexp",
"regexpi", "regexprep", "schur", "setgrent",
"setpwent", "sort", "spalloc", "sparse", "spparms",
"sprank", "sqrtm", "strfind", "strftime",
"strptime", "strrep", "svd", "svd_driver", "syl",
"symamd", "symbfact", "symrcm", "time", "tsearch",
"typecast", "urlread", "urlwrite", ]
mapping_kw = [ "abs", "acos", "acosh", "acot", "acoth", "acsc",
"acsch", "angle", "arg", "asec", "asech", "asin",
"asinh", "atan", "atanh", "beta", "betainc",
"betaln", "bincoeff", "cbrt", "ceil", "conj", "cos",
"cosh", "cot", "coth", "csc", "csch", "erf", "erfc",
"erfcx", "erfinv", "exp", "finite", "fix", "floor",
"fmod", "gamma", "gammainc", "gammaln", "imag",
"isalnum", "isalpha", "isascii", "iscntrl",
"isdigit", "isfinite", "isgraph", "isinf",
"islower", "isna", "isnan", "isprint", "ispunct",
"isspace", "isupper", "isxdigit", "lcm", "lgamma",
"log", "lower", "mod", "real", "rem", "round",
"roundb", "sec", "sech", "sign", "sin", "sinh",
"sqrt", "tan", "tanh", "toascii", "tolower", "xor",
]
builtin_consts = [ "EDITOR", "EXEC_PATH", "I", "IMAGE_PATH", "NA",
"OCTAVE_HOME", "OCTAVE_VERSION", "PAGER",
"PAGER_FLAGS", "SEEK_CUR", "SEEK_END", "SEEK_SET",
"SIG", "S_ISBLK", "S_ISCHR", "S_ISDIR", "S_ISFIFO",
"S_ISLNK", "S_ISREG", "S_ISSOCK", "WCONTINUE",
"WCOREDUMP", "WEXITSTATUS", "WIFCONTINUED",
"WIFEXITED", "WIFSIGNALED", "WIFSTOPPED", "WNOHANG",
"WSTOPSIG", "WTERMSIG", "WUNTRACED", ]
tokens = {
'root': [
#We should look into multiline comments
(r'[%#].*$', Comment),
(r'^\s*function', Keyword, 'deffunc'),
# from 'iskeyword' on hg changeset 8cc154f45e37
(r'(__FILE__|__LINE__|break|case|catch|classdef|continue|do|else|'
r'elseif|end|end_try_catch|end_unwind_protect|endclassdef|'
r'endevents|endfor|endfunction|endif|endmethods|endproperties|'
r'endswitch|endwhile|events|for|function|get|global|if|methods|'
r'otherwise|persistent|properties|return|set|static|switch|try|'
r'until|unwind_protect|unwind_protect_cleanup|while)\b', Keyword),
("(" + "|".join( builtin_kw + command_kw
+ function_kw + loadable_kw
+ mapping_kw) + r')\b', Name.Builtin),
("(" + "|".join(builtin_consts) + r')\b', Name.Constant),
# operators in Octave but not Matlab:
(r'-=|!=|!|/=|--', Operator),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators in Octave but not Matlab requiring escape for re:
(r'\*=|\+=|\^=|\/=|\\=|\*\*|\+\+|\.\*\*',Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'\[|\]|\(|\)|\{|\}|:|@|\.|,', Punctuation),
(r'=|:|;', Punctuation),
(r'"[^"]*"', String),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w\)\]])\'', Operator),
(r'(?<![\w\)\]])\'', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'.', Text),
],
'string': [
(r"[^']*'", String, '#pop'),
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation,
Text.Whitespace, Name.Function, Punctuation, Text,
Punctuation, Text.Whitespace), '#pop'),
],
}
def analyse_text(text):
if re.match('^\s*[%#]', text, re.M): #Comment
return 0.9
return 0.1
class ScilabLexer(RegexLexer):
"""
For Scilab source code.
*New in Pygments 1.5.*
"""
name = 'Scilab'
aliases = ['scilab']
filenames = ['*.sci', '*.sce', '*.tst']
mimetypes = ['text/scilab']
tokens = {
'root': [
(r'//.*?$', Comment.Single),
(r'^\s*function', Keyword, 'deffunc'),
(r'(__FILE__|__LINE__|break|case|catch|classdef|continue|do|else|'
r'elseif|end|end_try_catch|end_unwind_protect|endclassdef|'
r'endevents|endfor|endfunction|endif|endmethods|endproperties|'
r'endswitch|endwhile|events|for|function|get|global|if|methods|'
r'otherwise|persistent|properties|return|set|static|switch|try|'
r'until|unwind_protect|unwind_protect_cleanup|while)\b', Keyword),
("(" + "|".join(_scilab_builtins.functions_kw +
_scilab_builtins.commands_kw +
_scilab_builtins.macros_kw
) + r')\b', Name.Builtin),
(r'(%s)\b' % "|".join(map(re.escape, _scilab_builtins.builtin_consts)),
Name.Constant),
# operators:
(r'-|==|~=|<|>|<=|>=|&&|&|~|\|\|?', Operator),
# operators requiring escape for re:
(r'\.\*|\*|\+|\.\^|\.\\|\.\/|\/|\\', Operator),
# punctuation:
(r'[\[\](){}@.,=:;]', Punctuation),
(r'"[^"]*"', String),
# quote can be transpose, instead of string:
# (not great, but handles common cases...)
(r'(?<=[\w\)\]])\'', Operator),
(r'(?<![\w\)\]])\'', String, 'string'),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
(r'.', Text),
],
'string': [
(r"[^']*'", String, '#pop'),
(r'.', String, '#pop'),
],
'deffunc': [
(r'(\s*)(?:(.+)(\s*)(=)(\s*))?(.+)(\()(.*)(\))(\s*)',
bygroups(Text.Whitespace, Text, Text.Whitespace, Punctuation,
Text.Whitespace, Name.Function, Punctuation, Text,
Punctuation, Text.Whitespace), '#pop'),
],
}
class NumPyLexer(PythonLexer):
"""
A Python lexer recognizing Numerical Python builtins.
*New in Pygments 0.10.*
"""
name = 'NumPy'
aliases = ['numpy']
# override the mimetypes to not inherit them from python
mimetypes = []
filenames = []
EXTRA_KEYWORDS = set([
'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose',
'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append',
'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh',
'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin',
'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal',
'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange',
'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray',
'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett',
'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial',
'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman',
'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_',
'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type',
'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate',
'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov',
'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate',
'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide',
'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty',
'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye',
'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill',
'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud',
'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer',
'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring',
'generic', 'get_array_wrap', 'get_include', 'get_numarray_include',
'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize',
'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater',
'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram',
'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0',
'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info',
'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d',
'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj',
'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf',
'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_',
'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_',
'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort',
'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace',
'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype',
'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min',
'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan',
'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum',
'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer',
'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones',
'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload',
'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv',
'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers',
'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close',
'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require',
'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll',
'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_',
'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select',
'set_numeric_ops', 'set_printoptions', 'set_string_function',
'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj',
'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape',
'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh',
'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source',
'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std',
'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot',
'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose',
'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict',
'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index',
'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises',
'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like'
])
def get_tokens_unprocessed(self, text):
for index, token, value in \
PythonLexer.get_tokens_unprocessed(self, text):
if token is Name and value in self.EXTRA_KEYWORDS:
yield index, Keyword.Pseudo, value
else:
yield index, token, value
class RConsoleLexer(Lexer):
"""
For R console transcripts or R CMD BATCH output files.
"""
name = 'RConsole'
aliases = ['rconsole', 'rout']
filenames = ['*.Rout']
def get_tokens_unprocessed(self, text):
slexer = SLexer(**self.options)
current_code_block = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>') or line.startswith('+'):
# Colorize the prompt as such,
# then put rest of line into current_code_block
insertions.append((len(current_code_block),
[(0, Generic.Prompt, line[:2])]))
current_code_block += line[2:]
else:
# We have reached a non-prompt line!
# If we have stored prompt lines, need to process them first.
if current_code_block:
# Weave together the prompts and highlight code.
for item in do_insertions(insertions,
slexer.get_tokens_unprocessed(current_code_block)):
yield item
# Reset vars for next code block.
current_code_block = ''
insertions = []
# Now process the actual line itself, this is output from R.
yield match.start(), Generic.Output, line
# If we happen to end on a code block with nothing after it, need to
# process the last code block. This is neither elegant nor DRY so
# should be changed.
if current_code_block:
for item in do_insertions(insertions,
slexer.get_tokens_unprocessed(current_code_block)):
yield item
class SLexer(RegexLexer):
"""
For S, S-plus, and R source code.
*New in Pygments 0.10.*
"""
name = 'S'
aliases = ['splus', 's', 'r']
filenames = ['*.S', '*.R', '.Rhistory', '.Rprofile']
mimetypes = ['text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r',
'text/x-R', 'text/x-r-history', 'text/x-r-profile']
tokens = {
'comments': [
(r'#.*$', Comment.Single),
],
'valid_name': [
(r'[a-zA-Z][0-9a-zA-Z\._]*', Text),
# can begin with ., but not if that is followed by a digit
(r'\.[a-zA-Z_][0-9a-zA-Z\._]*', Text),
],
'punctuation': [
(r'\[{1,2}|\]{1,2}|\(|\)|;|,', Punctuation),
],
'keywords': [
(r'(if|else|for|while|repeat|in|next|break|return|switch|function)'
r'(?![0-9a-zA-Z\._])',
Keyword.Reserved)
],
'operators': [
(r'<<?-|->>?|-|==|<=|>=|<|>|&&?|!=|\|\|?|\?', Operator),
(r'\*|\+|\^|/|!|%[^%]*%|=|~|\$|@|:{1,3}', Operator)
],
'builtin_symbols': [
(r'(NULL|NA(_(integer|real|complex|character)_)?|'
r'Inf|TRUE|FALSE|NaN|\.\.(\.|[0-9]+))'
r'(?![0-9a-zA-Z\._])',
Keyword.Constant),
(r'(T|F)\b', Keyword.Variable),
],
'numbers': [
# hex number
(r'0[xX][a-fA-F0-9]+([pP][0-9]+)?[Li]?', Number.Hex),
# decimal number
(r'[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+)([eE][+-]?[0-9]+)?[Li]?',
Number),
],
'statements': [
include('comments'),
# whitespaces
(r'\s+', Text),
(r'`.*?`', String.Backtick),
(r'\'', String, 'string_squote'),
(r'\"', String, 'string_dquote'),
include('builtin_symbols'),
include('numbers'),
include('keywords'),
include('punctuation'),
include('operators'),
include('valid_name'),
],
'root': [
include('statements'),
# blocks:
(r'\{|\}', Punctuation),
#(r'\{', Punctuation, 'block'),
(r'.', Text),
],
#'block': [
# include('statements'),
# ('\{', Punctuation, '#push'),
# ('\}', Punctuation, '#pop')
#],
'string_squote': [
(r'([^\'\\]|\\.)*\'', String, '#pop'),
],
'string_dquote': [
(r'([^"\\]|\\.)*"', String, '#pop'),
],
}
def analyse_text(text):
return '<-' in text
class BugsLexer(RegexLexer):
"""
Pygments Lexer for OpenBugs and WinBugs models.
*New in Pygments 1.6.*
"""
name = 'BUGS'
aliases = ['bugs', 'winbugs', 'openbugs']
filenames = ['*.bug']
_FUNCTIONS = [
# Scalar functions
'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
'cloglog', 'cos', 'cosh', 'cumulative', 'cut', 'density', 'deviance',
'equals', 'expr', 'gammap', 'ilogit', 'icloglog', 'integral', 'log',
'logfact', 'loggam', 'logit', 'max', 'min', 'phi', 'post.p.value',
'pow', 'prior.p.value', 'probit', 'replicate.post', 'replicate.prior',
'round', 'sin', 'sinh', 'solution', 'sqrt', 'step', 'tan', 'tanh',
'trunc',
# Vector functions
'inprod', 'interp.lin', 'inverse', 'logdet', 'mean', 'eigen.vals',
'ode', 'prod', 'p.valueM', 'rank', 'ranked', 'replicate.postM',
'sd', 'sort', 'sum',
## Special
'D', 'I', 'F', 'T', 'C']
""" OpenBUGS built-in functions
From http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAII
This also includes
- T, C, I : Truncation and censoring.
``T`` and ``C`` are in OpenBUGS. ``I`` in WinBUGS.
- D : ODE
- F : Functional http://www.openbugs.info/Examples/Functionals.html
"""
_DISTRIBUTIONS = ['dbern', 'dbin', 'dcat', 'dnegbin', 'dpois',
'dhyper', 'dbeta', 'dchisqr', 'ddexp', 'dexp',
'dflat', 'dgamma', 'dgev', 'df', 'dggamma', 'dgpar',
'dloglik', 'dlnorm', 'dlogis', 'dnorm', 'dpar',
'dt', 'dunif', 'dweib', 'dmulti', 'ddirch', 'dmnorm',
'dmt', 'dwish']
""" OpenBUGS built-in distributions
Functions from
http://www.openbugs.info/Manuals/ModelSpecification.html#ContentsAI
"""
tokens = {
'whitespace' : [
(r"\s+", Text),
],
'comments' : [
# Comments
(r'#.*$', Comment.Single),
],
'root': [
# Comments
include('comments'),
include('whitespace'),
# Block start
(r'(?s)(model)(\s+)({)',
bygroups(Keyword.Namespace, Text, Punctuation)),
# Reserved Words
(r'(for|in)(?![0-9a-zA-Z\._])', Keyword.Reserved),
# Built-in Functions
(r'(%s)(?=\s*\()'
% r'|'.join(_FUNCTIONS + _DISTRIBUTIONS),
Name.Builtin),
# Regular variable names
(r'[A-Za-z][A-Za-z0-9_.]*', Name),
# Number Literals
(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
# Punctuation
(r'\[|\]|\(|\)|:|,|;', Punctuation),
# Assignment operators
# SLexer makes these tokens Operators.
(r'<-|~', Operator),
# Infix and prefix operators
(r'\+|-|\*|/', Operator),
# Block
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r"^\s*model\s*{", text, re.M):
return 0.7
else:
return 0.0
class JagsLexer(RegexLexer):
"""
Pygments Lexer for JAGS.
*New in Pygments 1.6.*
"""
name = 'JAGS'
aliases = ['jags']
filenames = ['*.jag', '*.bug']
## JAGS
_FUNCTIONS = [
'abs', 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctanh',
'cos', 'cosh', 'cloglog',
'equals', 'exp', 'icloglog', 'ifelse', 'ilogit', 'log', 'logfact',
'loggam', 'logit', 'phi', 'pow', 'probit', 'round', 'sin', 'sinh',
'sqrt', 'step', 'tan', 'tanh', 'trunc', 'inprod', 'interp.lin',
'logdet', 'max', 'mean', 'min', 'prod', 'sum', 'sd', 'inverse',
'rank', 'sort', 't', 'acos', 'acosh', 'asin', 'asinh', 'atan',
# Truncation/Censoring (should I include)
'T', 'I']
# Distributions with density, probability and quartile functions
_DISTRIBUTIONS = ['[dpq]%s' % x for x in
['bern', 'beta', 'dchiqsqr', 'ddexp', 'dexp',
'df', 'gamma', 'gen.gamma', 'logis', 'lnorm',
'negbin', 'nchisqr', 'norm', 'par', 'pois', 'weib']]
# Other distributions without density and probability
_OTHER_DISTRIBUTIONS = [
'dt', 'dunif', 'dbetabin', 'dbern', 'dbin', 'dcat', 'dhyper',
'ddirch', 'dmnorm', 'dwish', 'dmt', 'dmulti', 'dbinom', 'dchisq',
'dnbinom', 'dweibull', 'ddirich']
tokens = {
'whitespace' : [
(r"\s+", Text),
],
'names' : [
# Regular variable names
(r'[a-zA-Z][a-zA-Z0-9_.]*\b', Name),
],
'comments' : [
# do not use stateful comments
(r'(?s)/\*.*?\*/', Comment.Multiline),
# Comments
(r'#.*$', Comment.Single),
],
'root': [
# Comments
include('comments'),
include('whitespace'),
# Block start
(r'(?s)(model|data)(\s+)({)',
bygroups(Keyword.Namespace, Text, Punctuation)),
(r'var(?![0-9a-zA-Z\._])', Keyword.Declaration),
# Reserved Words
(r'(for|in)(?![0-9a-zA-Z\._])', Keyword.Reserved),
# Builtins
# Need to use lookahead because . is a valid char
(r'(%s)(?=\s*\()' % r'|'.join(_FUNCTIONS
+ _DISTRIBUTIONS
+ _OTHER_DISTRIBUTIONS),
Name.Builtin),
# Names
include('names'),
# Number Literals
(r'[-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?', Number),
(r'\[|\]|\(|\)|:|,|;', Punctuation),
# Assignment operators
(r'<-|~', Operator),
# # JAGS includes many more than OpenBUGS
(r'\+|-|\*|\/|\|\|[&]{2}|[<>=]=?|\^|%.*?%', Operator),
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r'^\s*model\s*\{', text, re.M):
if re.search(r'^\s*data\s*\{', text, re.M):
return 0.9
elif re.search(r'^\s*var', text, re.M):
return 0.9
else:
return 0.3
else:
return 0
class StanLexer(RegexLexer):
"""
Pygments Lexer for Stan models.
*New in Pygments 1.6.*
"""
name = 'Stan'
aliases = ['stan']
filenames = ['*.stan']
_RESERVED = ('for', 'in', 'while', 'repeat', 'until', 'if',
'then', 'else', 'true', 'false', 'T')
_TYPES = ('int', 'real', 'vector', 'simplex', 'ordered', 'row_vector',
'matrix', 'corr_matrix', 'cov_matrix')
# STAN 1.0 Manual, Chapter 20
_CONSTANTS = ['pi', 'e', 'sqrt2', 'log2', 'log10', 'nan', 'infinity',
'epsilon', 'negative_epsilon']
_FUNCTIONS = ['abs', 'int_step', 'min', 'max',
'if_else', 'step',
'fabs', 'fdim',
'fmin', 'fmax',
'fmod',
'floor', 'ceil', 'round', 'trunc',
'sqrt', 'cbrt', 'square', 'exp', 'exp2', 'expm1',
'log', 'log2', 'log10', 'pow', 'logit', 'inv_logit',
'inv_cloglog', 'hypot', 'cos', 'sin', 'tan', 'acos',
'asin', 'atan', 'atan2', 'cosh', 'sinh', 'tanh',
'acosh', 'asinh', 'atanh', 'erf', 'erfc', 'Phi',
'log_loss', 'tgamma', 'lgamma', 'lmgamma', 'lbeta',
'binomial_coefficient_log',
'fma', 'multiply_log', 'log1p', 'log1m', 'log1p_exp',
'log_sum_exp',
'rows', 'cols',
'dot_product', 'prod', 'mean', 'variance', 'sd',
'diagonal', 'diag_matrix', 'col', 'row',
'softmax', 'trace', 'determinant', 'inverse', 'eigenvalue',
'eigenvalues_sym', 'cholesky', 'singular_values',
'(log)?normal_p', 'exponential_p', 'gamma_p', 'weibull_p']
_DISTRIBUTIONS = ['bernoulli', 'bernoulli_logit', 'binomial',
'beta_binomial', 'hypergeometric', 'categorical',
'ordered_logistic', 'neg_binomial', 'poisson',
'multinomial', 'normal', 'student_t',
'cauchy', 'double_exponential', 'logistic',
'lognormal', 'chi_square', 'inv_chi_square',
'scaled_inv_chi_square', 'exponential',
'gamma', 'inv_gamma', 'weibull', 'pareto',
'beta', 'uniform', 'dirichlet', 'multi_normal',
'multi_normal_cholesky', 'multi_student_t',
'wishart', 'inv_wishart', 'lkj_cov',
'lkj_corr_cholesky']
tokens = {
'whitespace' : [
(r"\s+", Text),
],
'comments' : [
(r'(?s)/\*.*?\*/', Comment.Multiline),
# Comments
(r'(//|#).*$', Comment.Single),
],
'root': [
# Comments
include('comments'),
# block start
include('whitespace'),
# Block start
(r'(?s)(%s)(\s*)({)' %
r'|'.join(('data', r'transformed\s+?data',
'parameters', r'transformed\s+parameters',
'model', r'generated\s+quantities')),
bygroups(Keyword.Namespace, Text, Punctuation)),
# Reserved Words
(r'(%s)\b' % r'|'.join(_RESERVED), Keyword.Reserved),
# Data types
(r'(%s)\b' % r'|'.join(_TYPES), Keyword.Type),
# Punctuation
(r"[;:,\[\]()<>]", Punctuation),
# Builtin
(r'(%s)(?=\s*\()'
% r'|'.join(_FUNCTIONS
+ _DISTRIBUTIONS
+ ['%s_log' % x for x in _DISTRIBUTIONS]),
Name.Builtin),
(r'(%s)(?=\s*\()'
% r'|'.join(_CONSTANTS),
Keyword.Constant),
# Special names ending in __, like lp__
(r'[A-Za-z][A-Za-z0-9_]*__\b', Name.Builtin.Pseudo),
# Regular variable names
(r'[A-Za-z][A-Za-z0-9_]*\b', Name),
# Real Literals
(r'-?[0-9]+(\.[0-9]+)?[eE]-?[0-9]+', Number.Float),
(r'-?[0-9]*\.[0-9]*', Number.Float),
# Integer Literals
(r'-?[0-9]+', Number.Integer),
# Assignment operators
# SLexer makes these tokens Operators.
(r'<-|~', Operator),
# Infix and prefix operators (and = )
(r"\+|-|\.?\*|\.?/|\\|'|=", Operator),
# Block delimiters
(r'[{}]', Punctuation),
]
}
def analyse_text(text):
if re.search(r'^\s*parameters\s*\{', text, re.M):
return 1.0
else:
return 0.0
|
|
# Copyright 2016 EMC Corporation
import time
from vmax_smis_base import VmaxSmisBase, get_ecom_int
THINPROVISIONINGCOMPOSITE = 32768
THINPROVISIONING = 5
class VmaxSmisDevices(object):
def __init__(self, **kwargs):
self.devices_refresh = False
self.devices = None
self.interval = 180
self.refresh_time = 0
for attr in kwargs.keys():
setattr(self, attr, kwargs[attr])
if not hasattr(self, 'smis_base'):
self.smis_base = VmaxSmisBase(**kwargs)
def _reset(self):
current_time = time.time()
if (current_time > self.refresh_time) or ((current_time + self.interval) < self.refresh_time):
self.refresh_time = current_time + self.interval
self.devices_refresh = True
def _load_all_devices(self):
if self.devices_refresh or self.devices is None:
self.devices = self.smis_base.list_storage_volumes(property_list=['ElementName', 'SystemName',
'DeviceID', 'SpaceConsumed',
'ConsumableBlocks', 'BlockSize',
'EMCIsMapped', 'IsComposite',
'Caption', 'Usage'])
self.devices_refresh = False
return self.devices
def get_volume_instance(self, system_name, device_id):
for volume in self._load_all_devices():
if volume['SystemName'] == system_name and volume['DeviceID'] == device_id:
break
else:
raise ReferenceError('%s - %s: volume not found' % (system_name, device_id))
return volume
def get_volume_instance_name(self, system_name, device_id):
volume_instance = self.get_volume_instance(system_name, device_id)
return volume_instance.path
def get_volume_instance_names(self, system_name, device_ids):
volumes = []
for dev in device_ids:
volumes.append(self.get_volume_instance_name(system_name, dev))
return volumes
def get_volume_by_name(self, system_name, volume_name):
for volume in self._load_all_devices():
if volume['SystemName'] == system_name and volume['ElementName'] == volume_name:
break
else:
raise ReferenceError('%s - %s: volume not found' % (system_name, volume_name))
return volume['DeviceID']
def list_all_devices(self, system_name):
devices = []
for volume in self._load_all_devices():
if volume['SystemName'] == system_name:
devices.append(volume['DeviceID'])
return devices
def list_all_devices_by_name(self, system_name):
device_names = []
for volume in self._load_all_devices():
if volume['SystemName'] == system_name:
device_names.append(volume['ElementName'])
return device_names
def get_space_consumed(self, system_name, device_id):
volume = self.get_volume_instance(system_name, device_id)
return volume['SpaceConsumed']
def get_volume_name(self, system_name, device_id):
volume = self.get_volume_instance(system_name, device_id)
return volume['ElementName']
def rename_volume(self, volume_instance, new_name):
volume_instance['ElementName'] = unicode(new_name)
self.smis_base.modify_instance(volume_instance, property_list=['ElementName'])
def get_extended_volume(self, system_name, device_id, property_list=None):
volume = self.get_volume_instance(system_name, device_id)
return self.smis_base.get_instance(volume.path, property_list=property_list)
def get_volume_size(self, system_name, device_id):
volume = self.get_volume_instance(system_name, device_id)
block_size = volume['ConsumableBlocks']
num_blocks = volume['BlockSize']
return num_blocks * block_size
def get_volume_properties(self, system_name, device_id, property_list=None):
extended_volume = self.get_extended_volume(system_name, device_id, property_list=property_list)
properties = {}
properties_list = extended_volume.properties.items()
for p in properties_list:
if property_list is None or len(property_list) == 0:
cim_properties = p[1]
properties[p[0]] = cim_properties.value
else:
for req in property_list:
if req == p[0]:
cim_properties = p[1]
properties[p[0]] = cim_properties.value
break
return properties
def _list_pool_instance_names(self, system_name):
system_instance_name = self.smis_base.find_storage_system(system_name)
if self.smis_base.is_array_v3(system_name):
pool_instance_names = self.smis_base.find_srp_storage_pool(system_instance_name)
else:
pool_instance_names = self.smis_base.find_virtual_provisioning_pool(system_instance_name)
return pool_instance_names
def get_pool_instance(self, system_name, pool_instance_id):
pool_instance_names = self._list_pool_instance_names(system_name)
for name in pool_instance_names:
if name['InstanceID'] == pool_instance_id:
break
else:
raise ReferenceError('%s: pool instance not found' % pool_instance_id)
return name
def get_pool_instance_name(self, system_name, pool_instance_id):
name = self.get_pool_instance(system_name, pool_instance_id)
return name.path
def list_storage_pools(self, system_name):
pool_instance_names = self._list_pool_instance_names(system_name)
pool_instance_ids = []
for name in pool_instance_names:
pool_instance_ids.append(name['InstanceID'])
return pool_instance_ids
def get_storage_group(self, system_name, device_id):
volume = self.get_volume_instance(system_name, device_id)
instances = self.smis_base.list_storage_groups_from_volume(volume.path)
groups = []
for i in instances:
groups.append(i['InstanceID'])
return groups
def get_pool_name(self, system_name, device_id):
volume = self.get_volume_instance(system_name, device_id)
if self.smis_base.is_array_v3(system_name):
storage_pools = self.smis_base.find_srp_storage_pool(volume.path)
else:
storage_pools = self.smis_base.find_virtual_provisioning_pool(volume.path)
pool = None
if storage_pools is not None and len(storage_pools) == 1:
pool = storage_pools[0]['PoolID']
return pool
def create_volumes(self, system_name, volume_name, pool_instance_id, volume_size, count=1):
pool_instance_name = self.get_pool_instance_name(system_name, pool_instance_id)
rc, job = self.smis_base.invoke_storage_method('CreateOrModifyElementFromStoragePool', system_name,
ElementName=volume_name, InPool=pool_instance_name,
ElementType=get_ecom_int(THINPROVISIONING, '16'),
Size=get_ecom_int(volume_size, '64'),
EMCNumberOfDevices=get_ecom_int(count, '32'))
if rc != 0:
rc, errordesc = self.smis_base.wait_for_job_complete(job['job'])
if rc != 0:
exception_message = "Error Create Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." \
% {'volumeName': volume_name,
'rc': rc,
'error': errordesc}
raise RuntimeError(exception_message)
inst_names = self.smis_base.associators(job['job'], result_class='EMC_StorageVolume')
self.devices_refresh = True
device_ids = []
for inst_name in inst_names:
device_ids.append(inst_name['DeviceID'])
return device_ids
def destroy_volumes(self, system_name, device_ids):
elements = []
for device_id in device_ids:
volume_instance = self.get_volume_instance(system_name, device_id)
elements.append(volume_instance.path)
rc, job = self.smis_base.invoke_storage_method('ReturnElementsToStoragePool', system_name,
TheElements=elements)
if rc != 0:
rc, errordesc = self.smis_base.wait_for_job_complete(job['job'])
if rc != 0:
exception_message = "Error Delete Volume: %(volumeName)s. Return code: %(rc)lu. Error: %(error)s." \
% {'volumeName': str(device_ids),
'rc': rc,
'error': errordesc}
raise RuntimeError(exception_message)
return rc
|
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import os
import sys
import app.log
import app.regex
_todo = r"TODO\([\S@.]+\)"
__common_keywords = ["break", "continue", "else", "for", "if", "return", "while"]
__c_keywords = __common_keywords + [
"case",
"const",
"default",
"do",
"enum",
"goto",
"sizeof",
"static",
"struct",
"switch",
"typedef",
]
__linux_commands = [
"ag",
"basename",
"bash",
"cd",
"chmod",
"cp",
"dircolors",
"dirname",
"echo",
"egrep",
"find",
"grep",
"inotify",
"inotifywait",
"ixoff",
"ixon",
"lesspipe",
"ln",
"ls",
"mkdir",
"read",
"rm",
"rmdir",
"rxvt",
"sed",
"sh",
"shell",
"sleep",
"ssh",
"tput",
"uname",
"wc",
"which",
"xargs",
]
if sys.version_info[0] == 2:
# The Python2 re limits the number of named groups. Reduce the keywords
# recognized.
__cpp_keywords = [
"auto",
"break",
"case",
"catch",
"class",
"const",
"constexpr",
"continue",
"default",
"delete",
"do",
"else",
"enum",
"export",
"false",
"for",
"friend",
"if",
"inline",
"mutable",
"namespace",
"new",
"noexcept",
"nullptr",
"override",
"private",
"protected",
"public",
"return",
"sizeof",
"static",
"struct",
"switch",
"template",
"this",
"throw",
"true",
"typedef",
"typename",
"virtual",
"while",
]
__c_primitive_types = [
"bool",
"char",
"double",
"float",
"int",
"int8_t",
"int16_t",
"int32_t",
"int64_t",
"int_max_t",
"int8_t",
"int16_t",
"int32_t",
"int64_t",
"intptr_t",
"ptrdiff_t",
"size_t",
"long",
"signed",
"short",
"uint8_t",
"uint16_t",
"uint32_t",
"uint_max_t",
"uintptr_t",
"unsigned",
"void",
"wchar_t",
]
else:
__cpp_keywords = [
"alignas",
"alignof",
"and",
"and_eq",
"asm",
"audit",
"auto",
"axiom",
"bitand",
"bitor",
"break",
"case",
"catch",
"class",
"compl",
"concept",
"const",
"const_cast",
"consteval",
"constexpr",
"continue",
"decltype",
"default",
"delete",
"do",
"dynamic_cast",
"else",
"enum",
"explicit",
"export",
"extern",
"false",
"final",
"for",
"friend",
"goto",
"if",
"inline",
"mutable",
"namespace",
"new",
"noexcept",
"not",
"not_eq",
"nullptr",
"operator",
"or",
"or_eq",
"override",
"private",
"protected",
"public",
"register",
"reinterpret_cast",
"return",
"sizeof",
"static",
"static_assert",
"static_cast",
"struct",
"switch",
"template",
"this",
"thread_local",
"throw",
"true",
"typedef",
"typename",
"virtual",
"volatile",
"while",
"xor",
"xor_eq",
]
__c_primitive_types = [
"bool",
"char",
"double",
"float",
"int",
"int8_t",
"int16_t",
"int32_t",
"int64_t",
"int_fast8_t",
"int_fast16_t",
"int_fast32_t",
"int_fast64_t",
"int_least8_t",
"int_least16_t",
"int_least32_t",
"int_least64_t",
"int_max_t",
"int8_t",
"int16_t",
"int32_t",
"int64_t",
"intptr_t",
"ptrdiff_t",
"size_t",
"long",
"signed",
"short",
"uint8_t",
"uint16_t",
"uint32_t",
"uint64_t",
"uint_fast8_t",
"uint_fast16_t",
"uint_fast32_t",
"uint_fast64_t",
"uint_least8_t",
"uint_least16_t",
"uint_least32_t",
"uint_least64_t",
"uint_max_t",
"uintptr_t",
"unsigned",
"void",
"wchar_t",
]
__chrome_extension = r"""\b[a-z]{32}\b"""
__sha_1 = r"""\b[a-z0-9]{40}\b"""
__special_string_escapes = [
r"\\\\",
r"\\b",
r"\\f",
r"\\n",
r"\\r",
r"\\t",
r"\\v",
r"\\0[0-7]{0,3}",
__chrome_extension,
__sha_1,
]
color8 = {
"_pre_selection": 1,
"bracket": 1,
"c": 0,
"c_path_bracketed_file": 3,
"c_path_quoted_file": 3,
"c_preprocessor": 1,
"c_preprocessor_include": 1,
"c_raw_string1": 3,
"c_raw_string2": 3,
"c_string1": 3,
"c_string2": 3,
"context_menu": 1,
"cpp_block_comment": 2,
"cpp_line_comment": 2,
"cpp_string_literal": 2,
"current_line": 1,
"dart_nesting_block_comment": 2,
"debug_window": 1,
"default": 0,
"doc_block_comment": 3,
"error": 7,
"found_find": 1,
"highlight": 3,
"html_block_comment": 2,
"html_element": 1,
"html_element_end": 1,
"js_string": 3,
"keyword": 1,
"line_number": 7,
"line_number_current": 6,
"line_overflow": 7,
"logo": 7,
"matching_bracket": 1,
"matching_find": 1,
"md_code": 2,
"md_heading": 3,
"md_link": 2,
"message_line": 3,
"misspelling": 3,
"number": 1,
"outside_document": 7,
"popup_window": 0,
"pound_comment": 3,
"py_import": 1,
"py_import_file": 2,
"py_raw_string1": 2,
"py_raw_string2": 2,
"py_string1": 2,
"py_string2": 2,
"quoted_string2": 2,
"regex_string": 2,
"right_column": 6,
"rs_block_comment": 2,
"rs_byte_string1": 3,
"rs_byte_string2": 3,
"rs_label_or_char": 3,
"rs_raw_string": 3,
"selected": 5,
"special": 1,
"status_line": 7,
"status_line_error": 7,
"text": 0,
"top_info": 7,
"trailing_space": 1,
"type": 1,
}
for i in color8.values():
assert 0 <= i < 8, i
commentColor16Index = 2
defaultColor16Index = 1
foundColor16Index = 3
keywordsColor16Index = 2
pathColor16Index = 6
selectedColor16Index = 4 # Active find is a selection.
specialsColor16Index = 5
stringColor16Index = 6
outsideOfBufferColor16Index = 7
borderColor16Index = 8
borderHighlightColor16Index = 9
color16 = {
"_pre_selection": stringColor16Index,
"bracket": 6,
"c": defaultColor16Index,
"c_path_bracketed_file": pathColor16Index,
"c_path_quoted_file": pathColor16Index,
"c_preprocessor": 1,
"c_preprocessor_include": specialsColor16Index,
"c_raw_string1": stringColor16Index,
"c_raw_string2": stringColor16Index,
"c_string1": stringColor16Index,
"c_string2": stringColor16Index,
"context_menu": 15,
"cpp_block_comment": commentColor16Index,
"cpp_line_comment": commentColor16Index,
"cpp_string_literal": stringColor16Index,
"current_line": 15,
"dart_nesting_block_comment": commentColor16Index,
"debug_window": defaultColor16Index,
"default": defaultColor16Index,
"doc_block_comment": commentColor16Index,
"error": 9,
"found_find": foundColor16Index,
"highlight": 15,
"html_block_comment": commentColor16Index,
"html_element": keywordsColor16Index,
"html_element_end": keywordsColor16Index,
"js_string": stringColor16Index,
"keyword": keywordsColor16Index,
"line_number": borderColor16Index,
"line_number_current": borderHighlightColor16Index,
"line_overflow": 15,
"logo": borderColor16Index,
"matching_bracket": 15,
"matching_find": 9,
"md_code": stringColor16Index,
"md_heading": specialsColor16Index,
"md_link": stringColor16Index,
"message_line": borderColor16Index,
"misspelling": 9,
"number": 2,
"outside_document": outsideOfBufferColor16Index,
"popup_window": borderColor16Index,
"pound_comment": commentColor16Index,
"py_import": keywordsColor16Index,
"py_import_file": stringColor16Index,
"py_raw_string1": stringColor16Index,
"py_raw_string2": stringColor16Index,
"py_string1": stringColor16Index,
"py_string2": stringColor16Index,
"quoted_string2": stringColor16Index,
"regex_string": stringColor16Index,
"right_column": outsideOfBufferColor16Index,
"rs_block_comment": commentColor16Index,
"rs_byte_string1": stringColor16Index,
"rs_byte_string2": stringColor16Index,
"rs_label_or_char": stringColor16Index,
"rs_raw_string": stringColor16Index,
"selected": selectedColor16Index,
"special": specialsColor16Index,
"status_line": borderColor16Index,
"status_line_error": borderHighlightColor16Index,
"text": defaultColor16Index,
"top_info": borderColor16Index,
"trailing_space": 15,
"type": keywordsColor16Index,
}
for i in color16.values():
assert 0 <= i < 16, i
commentColorIndex = 2
defaultColorIndex = 18
foundColorIndex = 32
keywordsColorIndex = 21
pathColorIndex = 30
selectedColor = 64 # Active find is a selection.
specialsColorIndex = 20
stringColorIndex = 5
outsideOfBufferColorIndex = 211
color256 = {
"_pre_selection": stringColorIndex,
"bracket": 6,
"c": defaultColorIndex,
"c_path_bracketed_file": pathColorIndex,
"c_path_quoted_file": pathColorIndex,
"c_preprocessor": 1,
"c_preprocessor_include": specialsColorIndex,
"c_raw_string1": stringColorIndex,
"c_raw_string2": stringColorIndex,
"c_string1": stringColorIndex,
"c_string2": stringColorIndex,
"context_menu": 201,
"cpp_block_comment": commentColorIndex,
"cpp_line_comment": commentColorIndex,
"cpp_string_literal": stringColorIndex,
"current_line": 180,
"dart_nesting_block_comment": commentColorIndex,
"debug_window": defaultColorIndex,
"default": defaultColorIndex,
"doc_block_comment": commentColorIndex,
"error": 9,
"found_find": foundColorIndex,
"highlight": 96,
"html_block_comment": commentColorIndex,
"html_element": keywordsColorIndex,
"html_element_end": keywordsColorIndex,
"js_string": stringColorIndex,
"keyword": keywordsColorIndex,
"line_number": 168,
"line_number_current": 146,
"line_overflow": 105,
"logo": 168,
"matching_bracket": 201,
"matching_find": 9,
"md_code": stringColorIndex,
"md_heading": specialsColorIndex,
"md_link": stringColorIndex,
"message_line": 3,
"misspelling": 9,
"number": 31,
"outside_document": outsideOfBufferColorIndex,
"popup_window": 117,
"pound_comment": commentColorIndex,
"py_import": keywordsColorIndex,
"py_import_file": stringColorIndex,
"py_raw_string1": stringColorIndex,
"py_raw_string2": stringColorIndex,
"py_string1": stringColorIndex,
"py_string2": stringColorIndex,
"quoted_string2": stringColorIndex,
"regex_string": stringColorIndex,
"right_column": outsideOfBufferColorIndex,
"rs_block_comment": commentColorIndex,
"rs_byte_string1": stringColorIndex,
"rs_byte_string2": stringColorIndex,
"rs_label_or_char": stringColorIndex,
"rs_raw_string": stringColorIndex,
"selected": selectedColor,
"special": specialsColorIndex,
"status_line": 168,
"status_line_error": 161,
"text": defaultColorIndex,
"top_info": 168,
"trailing_space": 180,
"type": keywordsColorIndex,
}
for i in color256.values():
assert 0 <= i < 256, i
# Please keep these color dictionaries in sync.
assert color8.keys() == color256.keys()
assert color16.keys() == color256.keys()
# These prefs are not fully working.
prefs = {
"color": {},
"devTest": {},
# TODO(dschuyler): provide a UI to enable selected dictionaries.
u"dictionaries": {
# The base dictionaries are loaded at startup. They are active for all
# documents.
"base": [
"acronyms",
"coding",
"contractions",
"cpp",
"css",
"en-abbreviations",
"en-gb",
"en-misc",
"en-us",
"html",
"name",
"user",
],
# If the expanded path to the current document contains |key| the list
# of dictionaries are applied.
"path_match": {
u"/chromium/": [u"chromium"],
u"/fuchsia/": [u"fuchsia"],
},
},
"editor": {
# Whether to automatically indent after pressing carriage return.
"autoIndent": True,
# E.g. When key "(" is pressed, "()" is typed.
"autoInsertClosingCharacter": False,
# When opening a path that starts with "//", the value is used to
# replace the first slash in a double slash prefix.
"baseDirEnv": u"/", # u"${FUCHSIA_DIR}",
# Scroll the window to keep the cursor on screen.
"captiveCursor": False,
"colorScheme": "default",
# Show hidden files in file list.
"filesShowDotFiles": True,
# Show the size on disk for files in the file list.
"filesShowSizes": True,
"filesShowModifiedDates": True,
"filesSortAscendingByName": True,
"filesSortAscendingBySize": None,
"filesSortAscendingByModifiedDate": None,
"findDotAll": False,
"findIgnoreCase": True,
"findLocale": False,
"findMultiLine": False,
"findUnicode": True,
"findUseRegex": True,
"findVerbose": False,
"findWholeWord": False,
# An example indentation. If the grammar has its own indent that can
# override this value.
"indentation": " ",
"lineLimitIndicator": 80,
# When the mouse wheel is moved, which way should the window scroll.
"naturalScrollDirection": True,
"onSaveStripTrailingSpaces": True,
# Ratio of columns: 0 left, 1.0 right.
"optimalCursorCol": 0.98,
# Ratio of rows: 0 top, 0.5 middle, 1.0 bottom.
"optimalCursorRow": 0.28,
"palette": "default",
"palette8": "default8",
"palette16": "default16",
"palette256": "default256",
"predictionShowOpenFiles": True,
"predictionShowAlternateFiles": True,
"predictionShowRecentFiles": True,
"predictionSortAscendingByPrediction": True,
"predictionSortAscendingByType": None,
"predictionSortAscendingByName": None,
"predictionSortAscendingByStatus": None,
"saveUndo": True,
"showLineNumbers": True,
"showStatusLine": True,
"showTopInfo": True,
# Convert/expand tabs to spaces (see tabSize).
"tabToSpaces": True,
# When expanding tabs to spaces, how many spaces to use. This is not
# used for indentation, see "indentation" or grammar "indent".
"tabSize": 8,
# Use a background thread to process changes and parse grammars.
"useBgThread": True,
},
"fileType": {
"bash": {
"ext": [".bash", ".sh"],
"grammar": "bash",
"tabToSpaces": True,
},
"bazel": {
"ext": [],
"grammar": "bazel",
"name": ["BUILD"],
"tabToSpaces": True,
},
"binary": {
"ext": [
".exe",
".gz",
".gzip",
".jar",
".jpg",
".jpeg",
".o",
".obj",
".png",
".pyc",
".pyo",
".tgz",
".tiff",
".zip",
],
"grammar": "binary",
"tabToSpaces": False,
},
"c": {
"ext": [".c"],
"grammar": "c",
"tabToSpaces": True,
},
"cpp": {
"ext": [
".cc",
".cpp",
".cxx",
".c++",
".hpp",
".hxx",
".h++",
".inc",
".h", # Hmm, some source uses .h for cpp headers.
],
"grammar": "cpp",
"tabToSpaces": True,
},
"css": {
"ext": [".css", "_css.html"],
"grammar": "css",
"tabToSpaces": True,
},
"dart": {
"ext": [
".dart",
],
"grammar": "dart",
"tabToSpaces": True,
},
"fidl": {
"ext": [".fidl"],
"grammar": "fidl",
"tabToSpaces": True,
},
"gn": {
"ext": [".gn", ".gni"],
"grammar": "gn",
"tabToSpaces": True,
},
"golang": {
"ext": [
".go",
],
"grammar": "golang",
"tabToSpaces": True,
},
"grd": {
"ext": [".grd", ".grdp"],
"grammar": "grd",
"tabToSpaces": True,
},
"html": {
"ext": [".htm", ".html"],
"grammar": "html",
"tabToSpaces": True,
},
"java": {
"ext": [
".java",
],
"grammar": "java",
"tabToSpaces": True,
},
"js": {
"ext": [".json", ".js"],
"grammar": "js",
"tabToSpaces": True,
},
"json5": {
"ext": [".json5"],
"grammar": "json5",
"tabToSpaces": True,
},
"make": {
"ext": [],
"grammar": "make",
"name": ["Makefile"],
"tabToSpaces": False,
},
"md": {
"ext": [".md"],
"grammar": "md",
"tabToSpaces": True,
},
"proto": {
"ext": [".proto"],
"grammar": "proto",
"tabToSpaces": True,
},
"python": {
"ext": [".py"],
"grammar": "py",
"tabToSpaces": True,
},
"rust": {
"ext": [".rs"],
"grammar": "rs",
"tabToSpaces": True,
},
"text": {
"ext": [".txt"],
"grammar": "text",
"tabToSpaces": False,
},
"words": {
"ext": [".words", ""],
"grammar": "words",
"tabToSpaces": True,
},
},
"grammar": {
# A grammar is
# "grammar_name": {
# "begin": None or regex,
# "continuation": None or string,
# Prefixed used when continuing to another line,
# "end": None or regex; a value of None means that the "begin" regex
# contains the entire pattern (a leaf grammar),
# "end_key": None or regex to determine dynamic end tag. For "here
# documents" and c++ string literals.
# "error": None or list of string.
# "escaped": None or regex,
# "indent": None or string,
# "next": other grammars that may follow this grammar without nesting
# within it. (Contrast with "contains").
# "numbers": None or list of string,
# "keywords": None or list of string. Matches whole words only (wraps
# values in \b).
# "single_line": Boolean, Whether entire grammar must be on a single
# line,
# "special": None or list of string.
# "tabToSpaces": Boolean, Convert/expand tabs to spaces (see tabSize).
# "type": text or binary. default: text.
# "contains": other grammars that may be contained within this
# grammar.
# }
# The entries for "error", "keywords", and "special" are very similar.
# Other than "keywords" being wrapped in \b markers, the difference
# between them is just how they are drawn (color and style).
"_pre": {
"contains": ["_pre_selection"],
"spelling": False,
},
"_pre_selection": {
"begin": r"-->",
"end": r"<--",
"spelling": False,
},
# Bash shell.
"bash": {
"indent": " ",
"keywords": [
"break",
"case",
"continue",
"declare",
"do",
"done",
"echo",
"elif",
"else",
"esac",
"exit",
"fi",
"if",
"for",
"return",
"switch",
"then",
"while",
],
# Not really types.
"types": __linux_commands,
"contains": ["c_string1", "c_string2", "pound_comment"],
},
# Bazel build script. See https://www.bazel.build/
"bazel": {
"indent": " ",
"keywords": ["testonly"],
"types": "",
"contains": ["py_string1", "py_string2", "pound_comment"],
},
"binary": {
"spelling": False,
"type": "binary",
},
# C language.
"c": {
"indent": " ",
"keywords": __c_keywords,
"types": __c_primitive_types,
"contains": [
"cpp_block_comment",
"cpp_line_comment",
"c_preprocessor",
"c_string1",
"c_string2",
],
},
# C++ language.
"cpp": {
"indent": " ",
"keywords": __cpp_keywords,
"namespaces": [
"::",
"std::",
],
"types": __c_primitive_types
+ [
"char8_t",
"char16_t",
"char32_t",
],
"contains": [
"cpp_block_comment",
"cpp_line_comment",
"c_preprocessor",
"cpp_string_literal",
"c_string1",
"c_string2",
],
},
"cpp_block_comment": {
"begin": r"/\*",
"continuation": " * ",
"end": r"\*/",
"indent": " ",
"keywords": [],
"special": [
r"\bNOTE:",
_todo,
__chrome_extension,
__sha_1,
],
},
"cpp_line_comment": {
"begin": "//",
"continuation": "// ",
"end": r"(?<!\\)\n",
"indent": " ",
"keywords": [],
"special": [
r"\bNOTE:",
_todo,
__chrome_extension,
__sha_1,
],
},
"c_preprocessor": {
"begin": r"^#",
"end": r"(?<!\\)\n",
"indent": " ",
"special": [
r"\bdefine\b",
r"\bdefined\b",
r"\belif\b",
r"\belif\b",
r"\belse\b",
r"\bendif\b",
r"\bif\b",
r"\bifdef\b",
r"\bifndef\b",
r"\binclude\b",
r"\bpragma\b",
r"\bundef\b",
],
"next": [
"c_preprocessor_include",
],
},
"c_preprocessor_include": {
"begin": r"\binclude",
"end": r"(?<!\\)\n",
"contains": ["c_path_quoted_file", "c_path_bracketed_file"],
},
"c_raw_string1": {
"begin": "[uU]?[rR]'",
"end": "'",
"escaped": r"\\'",
"indent": " ",
"single_line": True,
"special": __special_string_escapes + [r"\\'"],
},
"c_raw_string2": {
"begin": '[uU]?[rR]"',
"end": '"',
"escaped": '\\\\"',
"indent": " ",
"single_line": True,
"special": __special_string_escapes + ['\\\\"'],
},
"cpp_string_literal": {
"begin": 'R"',
# TODO(dschuyler): backslash and whitespace are invalid in the
# |end_key|.
"end_key": """R\"([^(]*)\\(""",
"end": '\\)\\0"',
"single_line": False,
},
"c_string1": {
"begin": "'(?!'')",
"end": "'",
"escaped": r"\\'",
"indent": " ",
"special": __special_string_escapes + [r"\\'"],
"single_line": True,
},
"c_string2": {
"begin": '"(?!"")',
"end": '"',
"escaped": '\\\\"',
"indent": " ",
"special": __special_string_escapes
+ [
'\\\\"',
r"%:#?-?[0-9]*\.?[0-9]*z?\w",
],
"single_line": True,
},
"c_path_bracketed_file": {
# Paths in includes don't allow escapes.
"begin": """<[^>\\n]*>""",
"end": None, # Leaf grammar.
"link_type": "c<", # C system include file.
},
"c_path_quoted_file": {
# Paths in includes don't allow escapes.
"begin": '''"[^"\\n]*"''',
"end": None, # Leaf grammar.
"link_type": 'c"', # C non-system include file.
},
"carriage_return": {
"begin": "\\n",
"end": None, # Leaf grammar.
},
# Cascading Style Sheet.
"css": {
"begin": "<style",
"end": "</style>",
"indent": " ",
"keywords": [
"host",
"slotted",
],
"special": [r"#[\w-]+"],
"contains": ["cpp_block_comment", "css_block"],
},
"css_block": {
"begin": r"\\{",
"end": r"\\}",
"indent": " ",
"keywords": [
"background-color",
"color",
"display",
"font-family",
"font-size",
"height",
"max-height",
"min-height",
"width",
"max-width",
"min-width",
],
"special": [
r"@apply\b",
],
"contains": ["cpp_block_comment", "css_value"],
},
"css_value": {
"begin": ":",
"end": ";",
"errors": [
r"#(?:[^;]{1,2}|[^;]{5}|[^;]{7}|[^;]{9,})\b",
],
"indent": " ",
"keywords": [
"absolute",
"attr",
"block",
"border-box",
"calc",
"center",
"default",
"ease",
"hidden",
"inherit",
"left",
"none",
"px",
"rgb",
"rgba",
"right",
"rotate[XYZ]?",
"scale[XYZ]?",
"solid",
"transform",
"translate[XYZ]?",
"transparent",
"var",
],
"special": [
r"@apply\b",
r"\d+deg\b",
r"\d+em\b",
r"\d+px\b",
r"\d+rem\b",
r"#(?:[0-9a-fA-F]{8}|[0-9a-fA-F]{6}|[0-9a-fA-F]{3,4})",
],
"contains": [
"cpp_block_comment",
],
},
# Dart language.
"dart": {
"indent": " ",
"keywords": [
"abstract",
"as",
"assert",
"async",
"async",
"await",
"break",
"case",
"catch",
"class",
"const",
"continue",
"covariant",
"default",
"deferred",
"do",
"dynamic",
"else",
"enum",
"export",
"extends",
"external",
"factory",
"false",
"final",
"finally",
"for",
"get",
"if",
"implements",
"import",
"in",
"interface",
"is",
"library",
"mixin",
"new",
"null",
"operator",
"part",
"rethrow",
"return",
"set",
"static",
"super",
"switch",
"sync",
"this",
"throw",
"true",
"try",
"typedef",
"var",
"void",
"while",
"with",
"yield",
],
"types": [
"bool",
"Iterable",
"String",
"Map",
"Int64",
"Future",
"int",
"Timestamp",
],
"special": [
"@immutable",
"@override",
"@protected",
"@required",
],
"contains": [
# This list is carefully ordered. Don"t sort it.
"py_string1",
"py_string2",
"py_raw_string1",
"py_raw_string2",
"c_raw_string1",
"c_raw_string2",
"c_string1",
"c_string2",
"cpp_line_comment",
"dart_nesting_block_comment",
],
},
"dart_nesting_block_comment": {
"begin": r"/\*",
"continuation": " * ",
"end": r"\*/",
"indent": " ",
"keywords": [],
"special": [
r"\bNOTE:",
_todo,
__chrome_extension,
__sha_1,
],
"contains": [
"dart_nesting_block_comment",
],
},
"doc_block_comment": {
"begin": r"/\*\*",
"continuation": " * ",
"end": r"\*/",
"indent": " ",
"keywords": [],
"special": [
r"@param\b",
r"@private\b",
r"@protected\b",
r"@type\b",
r"@typedef\b",
r"@return\b",
r"\bNOTE:",
_todo,
],
"types": ["Array", "boolean", "string", "Object"],
},
"error": {
"indent": " ",
"spelling": False,
},
"fidl": {
"indent": " ",
"keywords": [
"bits",
"compose",
"enum",
"error",
"library",
"protocol",
"struct",
"table",
"union",
"using",
],
"special": ["\[Discoverable\]"],
"types": [
"array",
"handle",
"int8",
"int16",
"int32",
"int64",
"string",
"uint8",
"uint16",
"uint32",
"uint64",
"vector",
],
"contains": [
"cpp_line_comment",
"c_string1",
"c_string2",
],
},
# Generate Ninja language.
"gn": {
"indent": " ",
"keywords": ["else", "false", "foreach", "if", "import", "true"],
"special": [],
"types": [],
"contains": [
"pound_comment",
"c_string1",
"c_string2",
],
},
# Go Language.
"golang": {
"indent": " ",
"keywords": [
"break",
"case",
"chan",
"const",
"continue",
"default",
"defer",
"else",
"fallthrough",
"for",
"func",
"go",
"goto",
"if",
"import",
"interface",
"map",
"nil",
"package",
"range",
"return",
"select",
"struct",
"switch",
"type",
"var",
],
"special": [
# r"(?<!\w)__.*?__(?!\w)",
],
"contains": [
"cpp_block_comment",
"cpp_line_comment",
"cpp_string_literal",
"c_string1",
"c_string2",
],
},
"grd": {
"keywords": ["flattenhtml", "allowexternalscript"],
},
"html": {
"begin": "<html>",
"end": app.regex.kNonMatchingRegex,
"errors": [
"</br>",
"</hr>",
"</img>",
"</input>",
],
"indent": " ",
"keywords": [
# "body", "button", "div", "head", "html", "href", "img",
# "input", "script", "select", "span", "style",
],
"special": [
r"&.{1,5}?;",
'<if\s+expr="[^"]*[^>]*>',
"</if>",
],
"contains": [
"quoted_string1",
"quoted_string2",
"css",
"html_block_comment",
"js",
"html_element",
"html_element_end",
],
},
"html_block_comment": {
"begin": "<!--",
"end": "-->",
"indent": " ",
},
"html_element": {
"begin": r"<[\w-]+", # The "-" is used by Polymer.
"contains": [
"html_element_attribute",
],
"end": ">",
"special": [
r"\\w+",
],
},
"html_element_attribute": {
"begin": '\\??="',
"end": '"',
},
"html_element_end": {
"begin": r"</\\w+",
"end": ">",
},
"java": {
"indent": " ",
"keywords": __common_keywords
+ [
"case",
"class",
"default",
"do",
"false",
"interface",
"switch",
"this",
"true",
],
"contains": [
"c_string1",
"c_string2",
"cpp_block_comment",
"cpp_line_comment",
],
},
# JavaScript language.
"js": {
"begin": "<script",
"end": "</script>",
"indent": " ",
"keywords": [
"arguments",
"break",
"case",
"class",
"const",
"continue",
"default",
"delete",
"document",
"else",
"false",
"for",
"function",
"if",
"instanceof",
"let",
"of",
"return",
"static",
"switch",
"super",
"this",
"true",
"undefined",
"var",
"while",
"yield",
],
"special": [
"\bsetTimeout\b",
"\brequestCallback\b",
"\bconsole\b",
"\bwindow\b",
],
"contains": [
"c_string1",
"c_string2",
"doc_block_comment",
"cpp_block_comment",
"cpp_line_comment",
"regex_string",
"js_string",
],
},
"js_string": {
"begin": r"`",
"end": r"`",
"escaped": r"\\`",
"indent": " ",
"special": __special_string_escapes + [r"\\`", r"(?<!\\)\$\{[^}]*\}"],
"single_line": False,
},
"keyword": {
"indent": " ",
"spelling": False,
},
# JSON5 configuration data.
"json5": {
"indent": " ",
"keywords": [
],
"special": [
r"\w+:",
],
"contains": [
"c_string1",
"c_string2",
"doc_block_comment",
"cpp_block_comment",
"cpp_line_comment",
"regex_string",
"js_string",
],
},
# Makefile
"make": {
"indent": "\t",
"keywords": [
"ifeq",
"endif",
"ifneq",
"break",
"case",
"continue",
"do",
"done",
"echo",
"else",
"esac",
"exit",
"fi",
"if",
"for",
"return",
"switch",
"then",
"while",
],
# Not really types.
"types": __linux_commands,
"contains": ["c_string1", "c_string2", "pound_comment"],
},
# Markdown language.
"md": {
"indent": " ",
"keywords": [],
# "special": [ r"#.*" ],
# "special": [r"\[[^]]+\]\([^)]+\)"],
"contains": [
"md_link",
"md_code",
"md_heading",
# "quoted_string1", "quoted_string2"
],
},
"md_code": {
"begin": "`",
"end": "`",
"indent": " ",
},
"md_heading": {
"begin": "^#",
"continuation": "# ",
"end": r"\n",
"indent": " ",
"keywords": [],
"special": [
r"\bNOTE:",
_todo,
],
},
"md_link": {
"begin": "\[",
"end": "\]",
"escaped": r"\\\]",
"indent": " ",
},
"none": {
"spelling": False,
},
# Proto buffer language.
"proto": {
"indent": " ",
"keywords": __common_keywords
+ ["message", "option", "package", "returns", "rpc", "syntax"],
"namespaces": [],
"special": [
# r"(?<!\w)__.*?__(?!\w)",
],
"types": [
"bool",
"bytes",
"double",
"enum",
"float",
"int8",
"int16",
"int32",
"int64",
"optional",
"repeated",
"required",
"string",
"uint8",
"uint16",
"uint32",
"uint64",
],
"contains": [
# This list is carefully ordered. Don"t sort it.
"c_string1",
"c_string2",
"cpp_line_comment",
],
},
# Python language.
"py": {
"indent": " ",
"keywords": __common_keywords
+ [
"and",
"as",
"assert",
"class",
"def",
"del",
"dict",
"elif",
"except",
"False",
"finally",
"from",
"global",
"import",
"in",
"is",
"len",
"list",
"None",
"not",
"or",
"pass",
"raise",
"range",
"self",
"True",
"try",
"tuple",
"until",
"with",
"yield",
],
"namespaces": [
"os\.",
"os\.path\.",
"sys\.",
"traceback\.",
"re\.",
],
"special": [
# r"(?<!\w)__.*?__(?!\w)",
],
"types": [
"Exception",
],
"contains": [
# This list is carefully ordered. Don"t sort it.
"py_string1",
"py_string2",
"py_raw_string1",
"py_raw_string2",
"c_raw_string1",
"c_raw_string2",
"c_string1",
"c_string2",
"pound_comment",
"py_from",
"py_import",
],
},
"pound_comment": {
"begin": "#",
"continuation": "# ",
"end": r"\n",
"indent": " ",
"keywords": [],
"special": [
r"\bNOTE:",
_todo,
],
},
"py_from": {
"begin": "from",
"end": r"\n",
"contains": ["py_import_file"],
"next": ["py_import_after_from", "pound_comment"],
"spelling": False,
},
"py_import_after_from": {
"begin": "import",
"end": None,
},
"py_import": {
"begin": "import",
"end": r"\n",
"keywords": [
"as",
],
"contains": ["py_import_file"],
"next": ["pound_comment"],
"spelling": False,
},
"py_import_file": {
"begin": "[\.\w]+",
"end": None, # Leaf grammar.
"link_type": r"pi", # Python import
"spelling": False,
},
"py_raw_string1": {
"begin": "[uU]?[rR]'''",
"end": "'''",
"escaped": r"\\'",
"indent": " ",
# "special": ["\"\"?\"?$"],
},
"py_raw_string2": {
"begin": '[uU]?[rR]"""',
"end": '"""',
"escaped": '\\\\"',
"indent": " ",
# "special": ["\\\\\""],
},
"py_string1": {
"begin": "[uU]?'''",
"end": "'''",
"escaped": r"\\'",
# "indent": " ",
"single_line": False,
"special": __special_string_escapes + [r"\\'"],
},
"py_string2": {
"begin": '[uU]?"""',
"end": '"""',
"escaped": r'\\"',
# "indent": " ",
"single_line": False,
"special": __special_string_escapes + [r'\\"'],
},
"quoted_string1": {
# This is not a programming string, there are no escape chars.
"begin": "'",
"end": "'",
},
"quoted_string2": {
# This is not a programming string, there are no escape chars.
"begin": '"',
"end": '"',
},
"regex_string": {
"begin": r"(?<=[\n=:;([{,])(?:\s*)/(?![/*])",
"end": "/",
"escaped": r"\\.",
"indent": " ",
"special": __special_string_escapes + [r"\\/"],
"single_line": True,
},
# Rust language.
"rs": {
"indent": " ",
"keywords": [
"abstract",
"alignof",
"as",
"async",
"await",
"become",
"box",
"break",
"const",
"continue",
"crate",
"do",
"dyn",
"else",
"enum",
"extern",
"false",
"final",
"fn",
"for",
"if",
"impl",
"in",
"let",
"loop",
"macro",
"match",
"mod",
"move",
"mut",
"offsetof",
"override",
"priv",
"pub",
"pure",
"ref",
"return",
"Self",
"self",
"sizeof",
"static",
"struct",
"super",
"trait",
"true",
"type",
"typeof",
"try",
"unsafe",
"unsized",
"use",
"virtual",
"where",
"while",
"yield",
],
"special": [
"\\b\\w+!",
"\\bNone\\b",
"\\bOption\\b",
"\\bResult\\b",
"\\bSome\\b",
"\\bunimplemented!",
],
"types": [
"bool",
"char",
"i8",
"i16",
"i32",
"i64",
"isize",
"u8",
"u16",
"u32",
"u64",
"usize",
"array",
"slice",
"tuple",
],
"contains": [
"rs_block_comment",
"cpp_line_comment",
# Using "rs_label_or_char" rather than "c_string1".
"c_string2",
"rs_byte_string1",
"rs_byte_string2",
"rs_label_or_char",
"rs_raw_string",
],
},
# Rust block comments may be nested (unlike cpp_block_comments).
"rs_block_comment": {
"begin": r"/\*",
"continuation": " * ",
"end": r"\*/",
"indent": " ",
"keywords": [],
"special": [
r"\bNOTE:",
_todo,
__chrome_extension,
__sha_1,
],
"contains": [
"rs_block_comment",
],
},
"rs_byte_string1": {
"begin": "b'",
"end": "'",
},
"rs_byte_string2": {
"begin": 'b"',
"end": '"',
},
# Rust uses ' for both the start of a label and the start of a character
# literal.
"rs_label_or_char": {
"begin": "'\\w*'?",
"end": None,
"single_line": True,
},
"rs_raw_string": {
"begin": 'b?r#*"',
# TODO(dschuyler): backslash and whitespace are invalid in the
# |end_key|.
"end_key": """b?r(#*)\"""",
"end": '"\\0',
"single_line": False,
},
"special": {
"indent": " ",
"spelling": False,
},
"tabs": {
"indent": "",
"spelling": False,
},
"text": {
"special": [
__sha_1,
],
"contains": ["quoted_string1", "quoted_string2"],
},
"type": {
"indent": " ",
"spelling": False,
},
# Dictionary file for ci_edit.
"words": {
"contains": [
"pound_comment",
],
},
},
"palette": {
# Note: index 0 of each palette is not set, it remains as the system
# entry.
"test": {
# Same foreground color in all 256 slots.
"foregroundIndexes": [18] * 256,
# Separate background color in every slot.
"backgroundIndexes": [i for i in range(0, 256)],
},
"dark": {
# This series repeats 8 times (32 * 8 = 256).
"foregroundIndexes": [
14,
202,
39,
39,
39,
39,
39,
39,
39,
39,
39,
39,
12,
13,
14,
15,
202,
14,
14,
202,
202,
202,
22,
23,
24,
25,
26,
27,
28,
29,
30,
57,
]
* 8,
# Each of the foreground colors repeat 8 times (32 * 8 = 256).
"backgroundIndexes": [232] * 32
+ [229] * 32
+ [6] * 32
+ [221] * 32
+ [245] * 32
+ [244] * 32
+ [243] * 32
+ [225] * 32,
},
"default8": {
# With only 8 colors, make a custom pair for each slot.
# 0: black, 1: red, 2: green, 3: yellow, 4: blue, 5: pink, 6: cyan,
# 7: gray.
"foregroundIndexes": [1, 4, 2, 3, 4, 5, 6, 0],
"backgroundIndexes": [0, 6, 7, 7, 7, 7, 7, 7],
},
"default16": {
# With only 16 colors, make a custom pair for each slot.
# 0: black, 1: red, 2: green, 3: yellow, 4: blue, 5: pink, 6: cyan,
# 7: gray.
"foregroundIndexes": [0, 1, 2, 3, 4, 5, 6, 7, 8, 4, 2, 3, 4, 5, 6, 0],
"backgroundIndexes": [
0,
15,
15,
15,
15,
15,
15,
15,
7,
7,
7,
7,
7,
7,
7,
15,
],
},
"default256": {
# This series repeats 8 times (32 * 8 = 256).
"foregroundIndexes": [
18,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
94,
134,
0,
240,
138,
21,
22,
23,
24,
25,
26,
27,
28,
29,
30,
57,
]
* 8,
# Each of the foreground colors repeat 8 times (32 * 8 = 256).
"backgroundIndexes": [231] * 32
+ [229] * 32
+ [14] * 32
+ [221] * 32
+ [255] * 32
+ [254] * 32
+ [253] * 32
+ [225] * 32,
},
},
"status": {
"showTips": False,
},
"userData": {
"homePath": os.path.expanduser("~/.ci_edit"),
"historyPath": os.path.join(os.path.expanduser("~/.ci_edit"), "history.dat"),
},
}
# Alias for old palette name.
prefs[u"palette"][u"default"] = prefs[u"palette"][u"default256"]
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Flavien Chantelot (@Dorn-)
# Copyright: (c) 2018, Antoine Levy-Lambert (@antoinell)
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'supported_by': 'community',
'status': ['preview']
}
DOCUMENTATION = r'''
---
module: postgresql_tablespace
short_description: Add or remove PostgreSQL tablespaces from remote hosts
description:
- Adds or removes PostgreSQL tablespaces from remote hosts
U(https://www.postgresql.org/docs/current/sql-createtablespace.html),
U(https://www.postgresql.org/docs/current/manage-ag-tablespaces.html).
version_added: '2.8'
options:
tablespace:
description:
- Name of the tablespace to add or remove.
required: true
type: str
aliases:
- name
location:
description:
- Path to the tablespace directory in the file system.
- Ensure that the location exists and has right privileges.
type: path
aliases:
- path
state:
description:
- Tablespace state.
- I(state=present) implies the tablespace must be created if it doesn't exist.
- I(state=absent) implies the tablespace must be removed if present.
I(state=absent) is mutually exclusive with I(location), I(owner), i(set).
- See the Notes section for information about check mode restrictions.
type: str
default: present
choices: [ absent, present ]
owner:
description:
- Name of the role to set as an owner of the tablespace.
- If this option is not specified, the tablespace owner is a role that creates the tablespace.
type: str
set:
description:
- Dict of tablespace options to set. Supported from PostgreSQL 9.0.
- For more information see U(https://www.postgresql.org/docs/current/sql-createtablespace.html).
- When reset is passed as an option's value, if the option was set previously, it will be removed
U(https://www.postgresql.org/docs/current/sql-altertablespace.html).
type: dict
rename_to:
description:
- New name of the tablespace.
- The new name cannot begin with pg_, as such names are reserved for system tablespaces.
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
db:
description:
- Name of database to connect to and run queries against.
type: str
aliases:
- login_db
notes:
- I(state=absent) and I(state=present) (the second one if the tablespace doesn't exist) do not
support check mode because the corresponding PostgreSQL DROP and CREATE TABLESPACE commands
can not be run inside the transaction block.
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- To avoid "Peer authentication failed for user postgres" error,
use postgres user as a I(become_user).
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module.
- If the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host.
- For Ubuntu-based systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
requirements: [ psycopg2 ]
author:
- Flavien Chantelot (@Dorn-)
- Antoine Levy-Lambert (@antoinell)
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
- name: Create a new tablespace called acme and set bob as an its owner
postgresql_tablespace:
name: acme
owner: bob
location: /data/foo
- name: Create a new tablespace called bar with tablespace options
postgresql_tablespace:
name: bar
set:
random_page_cost: 1
seq_page_cost: 1
- name: Reset random_page_cost option
postgresql_tablespace:
name: bar
set:
random_page_cost: reset
- name: Rename the tablespace from bar to pcie_ssd
postgresql_tablespace:
name: bar
rename_to: pcie_ssd
- name: Drop tablespace called bloat
postgresql_tablespace:
name: bloat
state: absent
'''
RETURN = r'''
queries:
description: List of queries that was tried to be executed.
returned: always
type: str
sample: [ "CREATE TABLESPACE bar LOCATION '/incredible/ssd'" ]
tablespace:
description: Tablespace name.
returned: always
type: str
sample: 'ssd'
owner:
description: Tablespace owner.
returned: always
type: str
sample: 'Bob'
options:
description: Tablespace options.
returned: always
type: dict
sample: { 'random_page_cost': 1, 'seq_page_cost': 1 }
location:
description: Path to the tablespace in the file system.
returned: always
type: str
sample: '/incredible/fast/ssd'
newname:
description: New tablespace name
returned: if existent
type: str
sample: new_ssd
state:
description: Tablespace state at the end of execution.
returned: always
type: str
sample: 'present'
'''
try:
import psycopg2
HAS_PSYCOPG2 = True
except ImportError:
HAS_PSYCOPG2 = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.database import SQLParseError, pg_quote_identifier
from ansible.module_utils.postgres import postgres_common_argument_spec
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
def connect_to_db(module, kw, autocommit=False):
try:
db_connection = psycopg2.connect(**kw)
if autocommit:
if psycopg2.__version__ >= '2.4.2':
db_connection.set_session(autocommit=True)
else:
db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
except TypeError as e:
if 'sslrootcert' in e.args[0]:
module.fail_json(msg='Postgresql server must be at least '
'version 8.4 to support sslrootcert')
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
return db_connection
class PgTablespace(object):
def __init__(self, module, cursor, name):
self.module = module
self.cursor = cursor
self.name = name
self.exists = False
self.owner = ''
self.settings = {}
self.location = ''
self.executed_queries = []
self.new_name = ''
self.opt_not_supported = False
# Collect info:
self.get_info()
def get_info(self):
# Check that spcoptions exists:
opt = self.__exec_sql("SELECT 1 FROM information_schema.columns "
"WHERE table_name = 'pg_tablespace' "
"AND column_name = 'spcoptions'", add_to_executed=False)
# For 9.1 version and earlier:
location = self.__exec_sql("SELECT 1 FROM information_schema.columns "
"WHERE table_name = 'pg_tablespace' "
"AND column_name = 'spclocation'", add_to_executed=False)
if location:
location = 'spclocation'
else:
location = 'pg_tablespace_location(t.oid)'
if not opt:
self.opt_not_supported = True
query = ("SELECT r.rolname, (SELECT Null), %s "
"FROM pg_catalog.pg_tablespace AS t "
"JOIN pg_catalog.pg_roles AS r "
"ON t.spcowner = r.oid "
"WHERE t.spcname = '%s'" % (location, self.name))
else:
query = ("SELECT r.rolname, t.spcoptions, %s "
"FROM pg_catalog.pg_tablespace AS t "
"JOIN pg_catalog.pg_roles AS r "
"ON t.spcowner = r.oid "
"WHERE t.spcname = '%s'" % (location, self.name))
res = self.__exec_sql(query, add_to_executed=False)
if not res:
self.exists = False
return False
if res[0][0]:
self.exists = True
self.owner = res[0][0]
if res[0][1]:
# Options exist:
for i in res[0][1]:
i = i.split('=')
self.settings[i[0]] = i[1]
if res[0][2]:
# Location exists:
self.location = res[0][2]
def create(self, location):
query = ("CREATE TABLESPACE %s LOCATION '%s'" % (pg_quote_identifier(self.name, 'database'), location))
return self.__exec_sql(query, ddl=True)
def drop(self):
return self.__exec_sql("DROP TABLESPACE %s" % pg_quote_identifier(self.name, 'database'), ddl=True)
def set_owner(self, new_owner):
if new_owner == self.owner:
return False
query = "ALTER TABLESPACE %s OWNER TO %s" % (pg_quote_identifier(self.name, 'database'), new_owner)
return self.__exec_sql(query, ddl=True)
def rename(self, newname):
query = "ALTER TABLESPACE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'database'), newname)
self.new_name = newname
return self.__exec_sql(query, ddl=True)
def set_settings(self, new_settings):
# settings must be a dict {'key': 'value'}
if self.opt_not_supported:
return False
changed = False
# Apply new settings:
for i in new_settings:
if new_settings[i] == 'reset':
if i in self.settings:
changed = self.__reset_setting(i)
self.settings[i] = None
elif (i not in self.settings) or (str(new_settings[i]) != self.settings[i]):
changed = self.__set_setting("%s = '%s'" % (i, new_settings[i]))
return changed
def __reset_setting(self, setting):
query = "ALTER TABLESPACE %s RESET (%s)" % (pg_quote_identifier(self.name, 'database'), setting)
return self.__exec_sql(query, ddl=True)
def __set_setting(self, setting):
query = "ALTER TABLESPACE %s SET (%s)" % (pg_quote_identifier(self.name, 'database'), setting)
return self.__exec_sql(query, ddl=True)
def __exec_sql(self, query, ddl=False, add_to_executed=True):
try:
self.cursor.execute(query)
if add_to_executed:
self.executed_queries.append(query)
if not ddl:
res = self.cursor.fetchall()
return res
return True
except SQLParseError as e:
self.module.fail_json(msg=to_native(e))
except psycopg2.ProgrammingError as e:
self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
return False
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
tablespace=dict(type='str', aliases=['name']),
state=dict(type='str', default="present", choices=["absent", "present"]),
location=dict(type='path', aliases=['path']),
owner=dict(type='str'),
set=dict(type='dict'),
rename_to=dict(type='str'),
db=dict(type='str', aliases=['login_db']),
port=dict(type='int', default=5432, aliases=['login_port']),
ssl_mode=dict(type='str', default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
ca_cert=dict(type='str', aliases=['ssl_rootcert']),
session_role=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=(('positional_args', 'named_args'),),
supports_check_mode=True,
)
if not HAS_PSYCOPG2:
module.fail_json(msg=missing_required_lib('psycopg2'))
tablespace = module.params["tablespace"]
state = module.params["state"]
location = module.params["location"]
owner = module.params["owner"]
rename_to = module.params["rename_to"]
settings = module.params["set"]
sslrootcert = module.params["ca_cert"]
session_role = module.params["session_role"]
if state == 'absent' and (location or owner or rename_to or settings):
module.fail_json(msg="state=absent is mutually exclusive location, "
"owner, rename_to, and set")
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"db": "database",
"ssl_mode": "sslmode",
"ca_cert": "sslrootcert"
}
kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != '' and v is not None)
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
if psycopg2.__version__ < '2.4.3' and sslrootcert:
module.fail_json(msg='psycopg2 must be at least 2.4.3 '
'in order to user the ca_cert parameter')
db_connection = connect_to_db(module, kw, autocommit=True)
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
# Switch role, if specified:
if session_role:
try:
cursor.execute('SET ROLE %s' % session_role)
except Exception as e:
module.fail_json(msg="Could not switch role: %s" % to_native(e))
# Change autocommit to False if check_mode:
if module.check_mode:
if psycopg2.__version__ >= '2.4.2':
db_connection.set_session(autocommit=False)
else:
db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
# Set defaults:
autocommit = False
changed = False
##############
# Create PgTablespace object and do main job:
tblspace = PgTablespace(module, cursor, tablespace)
# If tablespace exists with different location, exit:
if tblspace.exists and location and location != tblspace.location:
module.fail_json(msg="Tablespace '%s' exists with different location '%s'" % (tblspace.name, tblspace.location))
# Create new tablespace:
if not tblspace.exists and state == 'present':
if rename_to:
module.fail_json(msg="Tablespace %s does not exist, nothing to rename" % tablespace)
if not location:
module.fail_json(msg="'location' parameter must be passed with "
"state=present if the tablespace doesn't exist")
# Because CREATE TABLESPACE can not be run inside the transaction block:
autocommit = True
if psycopg2.__version__ >= '2.4.2':
db_connection.set_session(autocommit=True)
else:
db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
changed = tblspace.create(location)
# Drop non-existing tablespace:
elif not tblspace.exists and state == 'absent':
# Nothing to do:
module.fail_json(msg="Tries to drop nonexistent tablespace '%s'" % tblspace.name)
# Drop existing tablespace:
elif tblspace.exists and state == 'absent':
# Because DROP TABLESPACE can not be run inside the transaction block:
autocommit = True
if psycopg2.__version__ >= '2.4.2':
db_connection.set_session(autocommit=True)
else:
db_connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
changed = tblspace.drop()
# Rename tablespace:
elif tblspace.exists and rename_to:
if tblspace.name != rename_to:
changed = tblspace.rename(rename_to)
if state == 'present':
# Refresh information:
tblspace.get_info()
# Change owner and settings:
if state == 'present' and tblspace.exists:
if owner:
changed = tblspace.set_owner(owner)
if settings:
changed = tblspace.set_settings(settings)
tblspace.get_info()
# Rollback if it's possible and check_mode:
if not autocommit:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
cursor.close()
db_connection.close()
# Make return values:
kw = dict(
changed=changed,
state='present',
tablespace=tblspace.name,
owner=tblspace.owner,
queries=tblspace.executed_queries,
options=tblspace.settings,
location=tblspace.location,
)
if state == 'present':
kw['state'] = 'present'
if tblspace.new_name:
kw['newname'] = tblspace.new_name
elif state == 'absent':
kw['state'] = 'absent'
module.exit_json(**kw)
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
"""Base for drivers
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from pykern import pkconfig, pkio, pkinspect, pkcollections, pkconfig, pkjson
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdp, pkdlog, pkdc, pkdexc, pkdformat
from sirepo import job
import asyncio
import importlib
import pykern.pkio
import re
import sirepo.auth
import sirepo.events
import sirepo.sim_db_file
import sirepo.simulation_db
import sirepo.srcontext
import sirepo.srdb
import sirepo.tornado
import time
import tornado.gen
import tornado.ioloop
import tornado.locks
import tornado.queues
KILL_TIMEOUT_SECS = 3
#: map of driver names to class
_CLASSES = None
#: default class when not determined by op
_DEFAULT_CLASS = None
_DEFAULT_MODULE = 'local'
cfg = None
OPS_THAT_NEED_SLOTS = frozenset((job.OP_ANALYSIS, job.OP_RUN))
_UNTIMED_OPS = frozenset((job.OP_ALIVE, job.OP_CANCEL, job.OP_ERROR, job.OP_KILL, job.OP_OK))
class AgentMsg(PKDict):
async def receive(self):
# Agent messages do not block the supervisor
DriverBase.receive(self)
def assign_instance_op(op):
m = op.msg
if m.jobRunMode == job.SBATCH:
res = _CLASSES[job.SBATCH].get_instance(op)
else:
res = _DEFAULT_CLASS.get_instance(op)
assert m.uid == res.uid, \
'op.msg.uid={} is not same as db.uid={} for jid={}'.format(
m.uid,
res.uid,
m.computeJid,
)
res.ops[op.opId] = op
return res
class DriverBase(PKDict):
__instances = PKDict()
_AGENT_STARTING_SECS_DEFAULT = 5
def __init__(self, op):
super().__init__(
driver_details=PKDict({'type': self.__class__.__name__}),
kind=op.kind,
ops=PKDict(),
#TODO(robnagler) sbatch could override OP_RUN, but not OP_ANALYSIS
# because OP_ANALYSIS touches the directory sometimes. Reasonably
# there should only be one OP_ANALYSIS running on an agent at one time.
op_slot_q=PKDict({k: job_supervisor.SlotQueue() for k in OPS_THAT_NEED_SLOTS}),
uid=op.msg.uid,
_agentId=job.unique_key(),
_agent_start_lock=tornado.locks.Lock(),
_agent_starting_timeout=None,
_idle_timer=None,
_websocket=None,
_websocket_ready=sirepo.tornado.Event(),
)
self._sim_db_file_token = sirepo.sim_db_file.token_for_user(self.uid)
# Drivers persist for the life of the program so they are never removed
self.__instances[self._agentId] = self
pkdlog('{}', self)
def destroy_op(self, op):
"""Clear our op and (possibly) free cpu slot"""
self.ops.pkdel(op.opId)
op.cpu_slot.free()
if op.op_slot:
op.op_slot.free()
if 'lib_dir_symlink' in op:
# lib_dir_symlink is unique_key so not dangerous to remove
pykern.pkio.unchecked_remove(op.pkdel('lib_dir_symlink'))
def free_resources(self, internal_error=None):
"""Remove holds on all resources and remove self from data structures"""
pkdlog('{} internal_error={}', self, internal_error)
try:
self._agent_starting_done()
self._websocket_ready.clear()
w = self._websocket
self._websocket = None
if w:
# Will not call websocket_on_close()
w.sr_close()
for o in list(self.ops.values()):
o.destroy(internal_error=internal_error)
self._websocket_free()
except Exception as e:
pkdlog('{} error={} stack={}', self, e, pkdexc())
async def kill(self):
raise NotImplementedError(
'DriverBase subclasses need to implement their own kill',
)
def make_lib_dir_symlink(self, op):
import sirepo.auth_db
m = op.msg
with sirepo.auth_db.session(), \
sirepo.auth.set_user_outside_of_http_request(m.uid):
d = sirepo.simulation_db.simulation_lib_dir(m.simulationType)
op.lib_dir_symlink = job.LIB_FILE_ROOT.join(
job.unique_key()
)
op.lib_dir_symlink.mksymlinkto(d, absolute=True)
m.pkupdate(
libFileUri=job.supervisor_file_uri(
self.cfg.supervisor_uri,
job.LIB_FILE_URI,
op.lib_dir_symlink.basename,
),
libFileList=[f.basename for f in d.listdir()],
)
def op_is_untimed(self, op):
return op.opName in _UNTIMED_OPS
def pkdebug_str(self):
return pkdformat(
'{}(a={:.4} k={} u={:.4} {})',
self.__class__.__name__,
self._agentId,
self.kind,
self.uid,
list(self.ops.values()),
)
async def prepare_send(self, op):
"""Sends the op
Returns:
bool: True if the op was actually sent
"""
await self._agent_ready(op)
await self._slots_ready(op)
@classmethod
def receive(cls, msg):
"""Receive message from agent"""
a = cls.__instances.get(msg.content.agentId)
if a:
a._receive(msg)
return
pkdlog('unknown agent, sending kill; msg={}', msg)
try:
msg.handler.write_message(PKDict(opName=job.OP_KILL))
except Exception as e:
pkdlog('error={} stack={}', e, pkdexc())
def send(self, op):
pkdlog(
'{} {} runDir={}',
self,
op,
op.msg.get('runDir')
)
self._websocket.write_message(pkjson.dump_bytes(op.msg))
@classmethod
async def terminate(cls):
for d in list(cls.__instances.values()):
try:
#TODO(robnagler) need timeout
await d.kill()
except job_supervisor.Awaited:
pass
except Exception as e:
# If one kill fails still try to kill the rest
pkdlog('error={} stack={}', e, pkdexc())
def websocket_on_close(self):
pkdlog('{}', self)
self.free_resources()
def _agent_cmd_stdin_env(self, **kwargs):
return job.agent_cmd_stdin_env(
('sirepo', 'job_agent', 'start'),
env=self._agent_env(),
**kwargs,
)
def _agent_env(self, env=None):
return job.agent_env(
env=(env or PKDict()).pksetdefault(
PYKERN_PKDEBUG_WANT_PID_TIME='1',
SIREPO_PKCLI_JOB_AGENT_SUPERVISOR_SIM_DB_FILE_URI=job.supervisor_file_uri(
self.cfg.supervisor_uri,
job.SIM_DB_FILE_URI,
sirepo.simulation_db.USER_ROOT_DIR,
self.uid,
),
SIREPO_PKCLI_JOB_AGENT_SUPERVISOR_SIM_DB_FILE_TOKEN=self._sim_db_file_token,
SIREPO_PKCLI_JOB_AGENT_AGENT_ID=self._agentId,
SIREPO_PKCLI_JOB_AGENT_START_DELAY=self.get('_agent_start_delay', 0),
SIREPO_PKCLI_JOB_AGENT_SUPERVISOR_URI=self.cfg.supervisor_uri.replace(
#TODO(robnagler) figure out why we need ws (wss, implicit)
'http',
'ws',
1,
) + job.AGENT_URI
),
uid=self.uid,
)
async def _agent_ready(self, op):
if self._websocket_ready.is_set():
return
await self._agent_start(op)
pkdlog('{} {} await _websocket_ready', self, op)
await self._websocket_ready.wait()
pkdc('{} websocket alive', op)
raise job_supervisor.Awaited()
async def _agent_start(self, op):
if self._agent_starting_timeout:
return
async with self._agent_start_lock:
# POSIT: we do not have to raise Awaited(), because
# this is the first thing an op waits on.
if self._agent_starting_timeout or self._websocket_ready.is_set():
return
try:
t = self.cfg.agent_starting_secs
if pkconfig.channel_in_internal_test():
x = op.msg.pkunchecked_nested_get('data.models.dog.favoriteTreat')
if x:
x = re.search(r'agent_start_delay=(\d+)', x)
if x:
self._agent_start_delay = int(x.group(1))
t += self._agent_start_delay
pkdlog('op={} agent_start_delay={}', op, self._agent_start_delay)
pkdlog('{} {} await _do_agent_start', self, op)
# All awaits must be after this. If a call hangs the timeout
# handler will cancel this task
self._agent_starting_timeout = tornado.ioloop.IOLoop.current().call_later(
t,
self._agent_starting_timeout_handler,
)
# POSIT: Canceled errors aren't smothered by any of the below calls
await self.kill()
await self._do_agent_start(op)
except Exception as e:
pkdlog('{} error={} stack={}', self, e, pkdexc())
self.free_resources(internal_error='failure starting agent')
raise
def _agent_starting_done(self):
self._start_idle_timeout()
if self._agent_starting_timeout:
tornado.ioloop.IOLoop.current().remove_timeout(
self._agent_starting_timeout
)
self._agent_starting_timeout = None
async def _agent_starting_timeout_handler(self):
pkdlog('{} timeout={}', self, self.cfg.agent_starting_secs)
await self.kill()
self.free_resources(internal_error='timeout waiting for agent to start')
def _receive(self, msg):
c = msg.content
i = c.get('opId')
if (
('opName' not in c or c.opName == job.OP_ERROR)
or ('reply' in c and c.reply.get('state') == job.ERROR)
):
pkdlog('{} error msg={}', self, c)
elif c.opName == job.OP_JOB_CMD_STDERR:
pkdlog('{} stderr from job_cmd msg={}', self, c)
return
else:
pkdlog('{} opName={} o={:.4}', self, c.opName, i)
if i:
if 'reply' not in c:
pkdlog('{} no reply={}', self, c)
c.reply = PKDict(state='error', error='no reply')
if i in self.ops:
#SECURITY: only ops known to this driver can be replied to
self.ops[i].reply_put(c.reply)
else:
pkdlog(
'{} not pending opName={} o={:.4}',
self,
i,
c.opName,
)
else:
getattr(self, '_receive_' + c.opName)(msg)
def _receive_alive(self, msg):
"""Receive an ALIVE message from our agent
Save the websocket and register self with the websocket
"""
self._agent_starting_done()
if self._websocket:
if self._websocket != msg.handler:
pkdlog('{} new websocket', self)
# New _websocket so bind
self.free_resources()
self._websocket = msg.handler
self._websocket_ready.set()
self._websocket.sr_driver_set(self)
def __str__(self):
return f'{type(self).__name__}({self._agentId:.4}, {self.uid:.4}, ops={list(self.ops.values())})'
def _receive_error(self, msg):
#TODO(robnagler) what does this mean? Just a way of logging? Document this.
pkdlog('{} msg={}', self, msg)
async def _slots_ready(self, op):
"""Only one op of each type allowed"""
n = op.opName
if n in (job.OP_CANCEL, job.OP_KILL):
return
if n == job.OP_SBATCH_LOGIN:
l = [o for o in self.ops.values() if o.opId != op.opId]
assert not l, \
'received {} but have other ops={}'.format(op, l)
return
await op.op_slot.alloc('Waiting for another simulation to complete')
await op.run_dir_slot.alloc('Waiting for access to simulation state')
# once job-op relative resources are acquired, ask for global resources
# so we only acquire on global resources, once we know we are ready to go.
await op.cpu_slot.alloc('Waiting for CPU resources')
def _start_idle_timeout(self):
async def _kill_if_idle():
self._idle_timer = None
if not self.ops:
pkdlog('{}', self)
await self.kill()
else:
self._start_idle_timeout()
if not self._idle_timer:
self._idle_timer = tornado.ioloop.IOLoop.current().call_later(
cfg.idle_check_secs,
_kill_if_idle,
)
def _websocket_free(self):
pass
def init(job_supervisor_module):
global cfg, _CLASSES, _DEFAULT_CLASS, job_supervisor
assert not cfg
job_supervisor = job_supervisor_module
cfg = pkconfig.init(
modules=((_DEFAULT_MODULE,), set, 'available job driver modules'),
idle_check_secs=(1800, pkconfig.parse_seconds, 'how many seconds to wait between checks'),
)
_CLASSES = PKDict()
p = pkinspect.this_module().__name__
for n in cfg.modules:
m = importlib.import_module(pkinspect.module_name_join((p, n)))
_CLASSES[n] = m.CLASS.init_class(job_supervisor)
_DEFAULT_CLASS = _CLASSES.get('docker') or _CLASSES.get(_DEFAULT_MODULE)
pkdlog('modules={}', sorted(_CLASSES.keys()))
async def terminate():
await DriverBase.terminate()
|
|
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
User interface module for rbuild.
"""
import getpass
import fcntl
import os
import struct
import sys
import termios
import time
from conary.lib import keystore
from rbuild import errors
from rbuild.internal import logger
class UserInterface(object):
_last_length = None
def __init__(self, cfg, outStream=None, errorStream=None,
logRoot=None):
if outStream is None:
outStream = sys.stdout
if errorStream is None:
errorStream = sys.stderr
self.outStream = outStream
self.errorStream = errorStream
self.cfg = cfg
self._log = None
self._logRoot = logRoot
self.resetLogFile(logRoot)
def getTerminalSize(self):
s = struct.pack('HHHH', 0, 0, 0, 0)
fd = self.outStream.fileno() if self.outStream.isatty() else 1
result = fcntl.ioctl(fd, termios.TIOCGWINSZ, s)
rows, cols = struct.unpack('HHHH', result)[0:2]
return rows, cols
def resetLogFile(self, logRoot):
'''
Closes the current log (if any), opening a new log under
the directory C{logRoot} unless logRoot evaluates to C{False}.
The C{logRoot} directory will be created (if possible) if
it does not already exist.
If the old log path is different from the current log path,
a message noting the continuation will be written to the
current log before closing it.
@param logRoot: directory into which to put log file, or
C{False} to disable logging.
'''
if self._log and logRoot and logRoot != self._logRoot:
self._log('Command log continued in %s/log', logRoot)
if logRoot:
self._logRoot = logRoot
if self._log and not logRoot:
del self._log
if not logRoot:
self._log = None
return
try:
if not os.path.exists(logRoot):
os.mkdir(logRoot, 0700)
elif not os.access(logRoot, os.W_OK):
self._log = None
return
except:
# fall back gracefully if we can't create the directory
self._log = None
return
self._log = logger.Logger(logRoot + '/log')
def write(self, msg='', *args):
self.outStream.write('%s\n' % (msg % args, ))
self.outStream.flush()
if self._log:
self._log(msg, *args)
def pushContext(self, msg='', *args):
if self._log:
self._log.pushContext(msg, *args)
def popContext(self, *args):
if self._log:
self._log.popContext(*args)
def writeError(self, errorMsg, *args):
self.errorStream.write('warning: %s\n' % (errorMsg % args, ))
def writeProgress(self, msg='', *args):
timeStamp = time.ctime(time.time())
self.outStream.write('[%s] %s\n' % (timeStamp, msg % args))
self.outStream.flush()
def info(self, msg, *args):
if not self.cfg.quiet:
self.write(msg, *args)
elif self._log:
# self.write() already logs, so do this only in quiet mode
self._log(msg, *args)
def warning(self, msg, *args):
self.writeError(msg, *args)
if self._log:
self._log.warn(msg, *args)
def debug(self, msg, *args):
if self._log:
self._log.debug(msg, *args)
def progress(self, msg, *args):
'''
Writes progress message; used to indicate that a potentially
long-running operation has been started, unless quiet mode
has been selected in the configuration. This is intended to
be overrideable in different UIs in ways that could include
not displaying the message at all or display it only transiently
while the operation is underway. This method should not
be used to display important information that should not be
missed by the user.
@param msg: printf-style string
@param args: printf-style variable arguments
'''
if not self.cfg.quiet:
self.writeProgress(msg, *args)
if self._log:
self._log(msg, *args)
def input(self, prompt):
try:
return raw_input(prompt)
except EOFError:
raise errors.RbuildError(
"Ran out of input while reading for '%s'" % prompt)
def multiLineInput(self, prompt):
'''Reads input until it catches an EOFError, and returns a newline-
concatenated string or the empty string if no input was received.
:param prompt: user prompt
:type prompt: str
:returns: user's input
:rtype: str
'''
response = None
try:
response = [raw_input(prompt)]
while True:
response.append(raw_input())
except EOFError:
return '\n'.join(response) if response else ''
def inputPassword(self, prompt):
return getpass.getpass(prompt)
def getPassword(self, prompt, default=None, validationFn=None, verify=False):
"""Ask the user for a password. If `verify` is true, then the user must
type the password in twice.
:param str prompt: user prompt
:param str default: default value of password
:param func validationFn: a function to call to validate the password
:param bool verify: make the user type in the password twice
"""
if default:
defaultvis = '<obscured>'
else:
defaultvis = None
ret = self.getResponse(prompt, default=defaultvis,
validationFn=validationFn,
inputFn=self.inputPassword)
while verify:
if ret == defaultvis:
# user accepted the default, no need to verify
break
verify = self.getResponse("Retype password",
validationFn=validationFn,
inputFn=self.inputPassword)
if (ret == verify):
break
# reprompt for password
self.write("Sorry, passwords do not match.")
ret = self.getResponse(prompt, default=defaultvis,
validationFn=validationFn,
inputFn=self.inputPassword)
if ret == defaultvis:
return default
else:
return ret
def getYn(self, prompt, default=True):
'''
Get a yes/no response from the user. Return a bool representing
the user's choice. Note that any "yes" response is always True,
and any "no" response is always false, whether or not it is the
default. C{ui.getYn('really?', False)} will return False if the
user presses return or enters C{N}.
@param prompt: string to display
@type prompt: str
@param default: default value: True for yes, False for no
@type default: bool
@return: True for yes, False for no
@rtype: bool
'''
if default:
defaultChar = 'Y'
else:
defaultChar = 'N'
response = self.getResponse(
prompt,
default=defaultChar,
validationFn=lambda r: r.lower() in ('y', 'n', 'yes', 'no'),
)
return response.lower() in ('y', 'yes')
def getResponse(self, prompt, default=None, validationFn=None,
inputFn=None, required=False):
if inputFn is None:
inputFn = self.input
if default:
prompt += ' (Default: %s): ' % default
else:
prompt += ': '
while True:
response = inputFn(prompt)
if not response:
if not default:
if required:
self.warning('Empty response not allowed.')
continue
response = None
elif default:
return default
if validationFn is not None:
if not validationFn(response):
continue
return response
def _getChoiceResponse(self, prompt, choices, prePrompt='', default=None,
pageSize=None, promptDefault=None):
choices = list(choices)
assert choices
pad = len(str(len(choices)))
if pageSize:
pageNo = 0
pageCount, remainder = divmod(len(choices), pageSize)
if remainder:
pageCount += 1
while True:
hasPrev = hasNext = None
self.write(prePrompt)
if pageSize:
idxStart = pageNo * pageSize
idxEnd = (pageNo + 1) * pageSize
hasPrev = (pageNo > 0)
chunk = ((idxStart + x, y)
for x, y in enumerate(choices[idxStart:idxEnd]))
else:
chunk = enumerate(choices)
for n, choice in chunk:
self.write(' %*d. %s' % (pad, n + 1, choice))
if hasPrev:
self.write(' %*s %s' % (pad, '<', "(previous page)"))
if pageSize and pageNo < pageCount - 1:
self.write(' %*s %s' % (pad, '>', "(next page)"))
hasNext = True
response = self.getResponse(
prompt + ' [%d-%d]' % (1, len(choices)),
default=promptDefault,
)
if hasPrev and response == '<':
pageNo -= 1
continue
if hasNext and response == '>':
pageNo += 1
continue
try:
response = int(response)
except (ValueError, TypeError):
if isinstance(response, list):
return response
continue
return response
def getChoice(self, prompt, choices, prePrompt='Choose one:', default=None,
pageSize=None):
"""
Present a list of choices to the user and have them select one by
index. Returns a 0-indexed integer into the original list of choices.
@param prompt: string to display in the final prompt
@type prompt: str
@param choices: list of items to display, in desired order
@type choices: list
@param prePrompt: optional string to display before the list of choices
@type prePrompt: str
@param default: index of default response
@type default: int
@param pageSize: (optional) number of items per page. Defaults to no pagination.
@type pageSize: int
"""
if default is None:
promptDefault = default
else:
# prompts are 1-indexed
promptDefault = default + 1
while True:
response = self._getChoiceResponse(
prompt, choices, prePrompt, default, pageSize, promptDefault)
if not (0 < response <= len(choices)):
continue
return response - 1
def getChoices(self, prompt, choices, prePrompt='Choose:', default=None,
pageSize=None):
"""
Present a list of choices to the user and have them select one or more
by index. Returns a 0-indexed integer into the original list of
choices.
@param prompt: string to display in the final prompt
@type prompt: str
@param choices: list of items to display, in desired order
@type choices: list
@param prePrompt: optional string to display before the list of choices
@type prePrompt: str
@param default: default choice(s)
@type default: list of ints
@param pageSize: (optional) number of items per page. Defaults to no
pagination.
@type pageSize: int
"""
promptDefault = set(d + 1 for d in default) if default else set()
while True:
response = self._getChoiceResponse(
prompt, choices, prePrompt, default, pageSize,
list(promptDefault))
if isinstance(response, list):
return [r - 1 for r in response]
if not (0 < response <= len(choices)):
continue
# toggle response
if response in promptDefault:
promptDefault.discard(response)
else:
promptDefault.add(response)
def promptPassword(self, keyDesc, prompt, promptDesc, validateCallback):
passwd = keystore.getPassword(keyDesc)
if passwd:
if validateCallback(passwd):
return passwd
else:
# Invalidate the cached password (if Conary is new enough to
# support that)
try:
keystore.invalidatePassword(keyDesc)
except AttributeError:
pass
for x in range(3):
self.write(promptDesc)
try:
passwd = self.inputPassword(prompt)
except EOFError:
break
if not passwd:
# User wants out but getpass eats Ctrl-C
break
if validateCallback(passwd):
keystore.setPassword(keyDesc, passwd)
return passwd
self.write("The specified credentials were not valid.\n")
return None
def lineOutProgress(self, msg, *args):
'''
Writes progress message; used to indicate that a potentially
long-running operation has been started, unless quiet mode
has been selected in the configuration, but only display it
transiently while the operation is underway. This method should
not be used to display important information that should not be
missed by the user.
@param msg: printf-style string
@param args: printf-style variable arguments
'''
if not self.cfg.quiet:
if self.outStream.isatty():
timeStamp = time.ctime(time.time())
length = len(msg)
self.outStream.write('\r[%s] %s' % (timeStamp, msg % args, ))
if length < self._last_length:
i = (self._last_length - length) + 1
self.outStream.write(' ' * i + '\b' * i)
self.outStream.flush()
self._last_length = length
if self._log:
self._log(msg, *args)
else:
self.progress(msg, *args)
def writeTable(self, rows, headers=None, padded=True):
'''
Writes a table; used to display data that is best displayed in rows and
columns. If 'headers' is not provided, then we assume the first row is
the header. Regardless, only the columns listed in the header will be
displayed, any other elements in the rows will be ignored.
@param rows: the data to be displayed
@type rows: list of tuples
@param headers: table headers
@type headers: tuple of strings
@param padded: pad each row element so columns are aligned
@type padded: bool
'''
if headers is None:
headers = rows.pop(0)
columns = len(headers)
padding = [''] * (columns - 1)
if padded:
padding = [len(h) for h in headers[:-1]]
for row in rows:
for idx, elem in enumerate(row[:columns - 1]):
padding[idx] = max(padding[idx], len(elem))
# create a padded format string, but do not pad the last column
format_string = ' '.join(
['{%d:%s}' % x for x in zip(range(columns - 1), padding)]
+ ['{%d:%s}' % (columns - 1, '')])
output = format_string.format(*headers)
self.outStream.write('%s\n' % output)
self._log(output)
for row in rows:
if len(row) < columns:
# extend row with empty strings
row = row + ('',) * (columns - len(row))
output = format_string.format(*row[:columns])
self.outStream.write('%s\n' % output)
self._log(output)
self.outStream.flush()
|
|
<<<<<<< HEAD
<<<<<<< HEAD
"""
Generic framework path manipulation
"""
import re
__all__ = ['framework_info']
STRICT_FRAMEWORK_RE = re.compile(r"""(?x)
(?P<location>^.*)(?:^|/)
(?P<name>
(?P<shortname>\w+).framework/
(?:Versions/(?P<version>[^/]+)/)?
(?P=shortname)
(?:_(?P<suffix>[^_]+))?
)$
""")
def framework_info(filename):
"""
A framework name can take one of the following four forms:
Location/Name.framework/Versions/SomeVersion/Name_Suffix
Location/Name.framework/Versions/SomeVersion/Name
Location/Name.framework/Name_Suffix
Location/Name.framework/Name
returns None if not found, or a mapping equivalent to:
dict(
location='Location',
name='Name.framework/Versions/SomeVersion/Name_Suffix',
shortname='Name',
version='SomeVersion',
suffix='Suffix',
)
Note that SomeVersion and Suffix are optional and may be None
if not present
"""
is_framework = STRICT_FRAMEWORK_RE.match(filename)
if not is_framework:
return None
return is_framework.groupdict()
def test_framework_info():
def d(location=None, name=None, shortname=None, version=None, suffix=None):
return dict(
location=location,
name=name,
shortname=shortname,
version=version,
suffix=suffix
)
assert framework_info('completely/invalid') is None
assert framework_info('completely/invalid/_debug') is None
assert framework_info('P/F.framework') is None
assert framework_info('P/F.framework/_debug') is None
assert framework_info('P/F.framework/F') == d('P', 'F.framework/F', 'F')
assert framework_info('P/F.framework/F_debug') == d('P', 'F.framework/F_debug', 'F', suffix='debug')
assert framework_info('P/F.framework/Versions') is None
assert framework_info('P/F.framework/Versions/A') is None
assert framework_info('P/F.framework/Versions/A/F') == d('P', 'F.framework/Versions/A/F', 'F', 'A')
assert framework_info('P/F.framework/Versions/A/F_debug') == d('P', 'F.framework/Versions/A/F_debug', 'F', 'A', 'debug')
if __name__ == '__main__':
test_framework_info()
=======
"""
Generic framework path manipulation
"""
import re
__all__ = ['framework_info']
STRICT_FRAMEWORK_RE = re.compile(r"""(?x)
(?P<location>^.*)(?:^|/)
(?P<name>
(?P<shortname>\w+).framework/
(?:Versions/(?P<version>[^/]+)/)?
(?P=shortname)
(?:_(?P<suffix>[^_]+))?
)$
""")
def framework_info(filename):
"""
A framework name can take one of the following four forms:
Location/Name.framework/Versions/SomeVersion/Name_Suffix
Location/Name.framework/Versions/SomeVersion/Name
Location/Name.framework/Name_Suffix
Location/Name.framework/Name
returns None if not found, or a mapping equivalent to:
dict(
location='Location',
name='Name.framework/Versions/SomeVersion/Name_Suffix',
shortname='Name',
version='SomeVersion',
suffix='Suffix',
)
Note that SomeVersion and Suffix are optional and may be None
if not present
"""
is_framework = STRICT_FRAMEWORK_RE.match(filename)
if not is_framework:
return None
return is_framework.groupdict()
def test_framework_info():
def d(location=None, name=None, shortname=None, version=None, suffix=None):
return dict(
location=location,
name=name,
shortname=shortname,
version=version,
suffix=suffix
)
assert framework_info('completely/invalid') is None
assert framework_info('completely/invalid/_debug') is None
assert framework_info('P/F.framework') is None
assert framework_info('P/F.framework/_debug') is None
assert framework_info('P/F.framework/F') == d('P', 'F.framework/F', 'F')
assert framework_info('P/F.framework/F_debug') == d('P', 'F.framework/F_debug', 'F', suffix='debug')
assert framework_info('P/F.framework/Versions') is None
assert framework_info('P/F.framework/Versions/A') is None
assert framework_info('P/F.framework/Versions/A/F') == d('P', 'F.framework/Versions/A/F', 'F', 'A')
assert framework_info('P/F.framework/Versions/A/F_debug') == d('P', 'F.framework/Versions/A/F_debug', 'F', 'A', 'debug')
if __name__ == '__main__':
test_framework_info()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""
Generic framework path manipulation
"""
import re
__all__ = ['framework_info']
STRICT_FRAMEWORK_RE = re.compile(r"""(?x)
(?P<location>^.*)(?:^|/)
(?P<name>
(?P<shortname>\w+).framework/
(?:Versions/(?P<version>[^/]+)/)?
(?P=shortname)
(?:_(?P<suffix>[^_]+))?
)$
""")
def framework_info(filename):
"""
A framework name can take one of the following four forms:
Location/Name.framework/Versions/SomeVersion/Name_Suffix
Location/Name.framework/Versions/SomeVersion/Name
Location/Name.framework/Name_Suffix
Location/Name.framework/Name
returns None if not found, or a mapping equivalent to:
dict(
location='Location',
name='Name.framework/Versions/SomeVersion/Name_Suffix',
shortname='Name',
version='SomeVersion',
suffix='Suffix',
)
Note that SomeVersion and Suffix are optional and may be None
if not present
"""
is_framework = STRICT_FRAMEWORK_RE.match(filename)
if not is_framework:
return None
return is_framework.groupdict()
def test_framework_info():
def d(location=None, name=None, shortname=None, version=None, suffix=None):
return dict(
location=location,
name=name,
shortname=shortname,
version=version,
suffix=suffix
)
assert framework_info('completely/invalid') is None
assert framework_info('completely/invalid/_debug') is None
assert framework_info('P/F.framework') is None
assert framework_info('P/F.framework/_debug') is None
assert framework_info('P/F.framework/F') == d('P', 'F.framework/F', 'F')
assert framework_info('P/F.framework/F_debug') == d('P', 'F.framework/F_debug', 'F', suffix='debug')
assert framework_info('P/F.framework/Versions') is None
assert framework_info('P/F.framework/Versions/A') is None
assert framework_info('P/F.framework/Versions/A/F') == d('P', 'F.framework/Versions/A/F', 'F', 'A')
assert framework_info('P/F.framework/Versions/A/F_debug') == d('P', 'F.framework/Versions/A/F_debug', 'F', 'A', 'debug')
if __name__ == '__main__':
test_framework_info()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
|
"""
mainpred.py
This file is the main code for running the prediction algorithms for OpenAPS.
The OpenAPS data must be in a subdirectory called "data" with a subdirectory of
the ID number that contains the devicestatus.json file. For example:
./data/01234567/devicestatus.json
where . represents the current directory with the code.
The code requires the following files:
bgdata.py
bgarray.py
datamatrix.py
mlalgorithm.py
ClarkeErrorGrid.py
oldpred.py
This code also requires the following libraries:
pandas
numpy
gatspy
sklearn
MedicineX OpenAPS
2017-7-24
"""
from collections import namedtuple
from bgdata import get_bg_dataframe
from bgdata import get_bg_index
from bgdata import get_new_df_entries_every_5_minutes
from bglomb import get_lomb_data
from datamatrix import make_data_matrix
from oldpred import analyze_old_pred_data
from mlalgorithm import *
from savedata import *
from sklearn import linear_model
from sklearn import kernel_ridge
from sklearn import svm
from sklearn import neural_network
from sklearn import metrics
from itertools import product
"""
MODIFY THE VARIABLES BELOW TO RUN DATA
"""
#Array of the ID to use. Put ID Number as a string (e.g. "00000001")
#["00000001", "00000003", "00000004", "00000007", "00000010", "00000011",
# "00000013", "00000015", "00000016", "00000017", "00000020", "00000021",
# "00000023", "00000024"]
ID_ARRAY = np.array(["00000001"])
#Array of the minutes in the future that the predictions will be made for, AKA the prediction horizon. (e.g. [1,15,30])
#1 refers to the prediction 1 minute after the current time (does not include the current time)
PRED_MINUTES_ARRAY = np.array([30, 45, 60, 90, 120])
#Array of the data minutes that will be tested, AKA data horizon. (e.g. [1,15,30,45,60,75,90,105,120])
#Includes the current minute, so 1 is only the current minute, and 5 includes the current minute and 4 minutes before
DATA_MINUTES_ARRAY = np.array([5])
#Choose whether to run 'eventualBG', 'iob', 'cob', 'acob'. (e.g. ['iob', 'acob']). Leave empty to run none
#['acob', 'cob', 'eventualBG', 'iob']
OLD_PRED_ALGORITHM_ARRAY = np.array(['acob', 'cob', 'eventualBG', 'iob'])
#Array of the algorithms that will be tested
#["Linear Regression", "Ridge Regression", "Lasso Regression", "SVR Linear Kernel", "MLP Regression"]
ALGORITHM_ARRAY = np.array(["Linear Regression", "Ridge Regression", "Lasso Regression", "SVR Linear Kernel", "MLP Regression"])
#Prints every parameter for the grid search.
PRINT_PARAMTERS = False
"""
END
"""
"""
Modify to save/load the data
"""
SAVE_LOMB_DATA = False
LOAD_LOMB_DATA = False
SAVE_PRED_DATA = False
LOAD_PRED_DATA = False
SAVE_ALOGORITHM = False
"""
Modify the plotting variable below if needed
"""
#List of lomb-scargle plots to print
#Leave empty to print none. Otherwise, use something like ['bg','cob']
#['bg','iob','cob']
PLOT_LOMB_ARRAY = np.array([])
#Boolean to show the prediction plot versus the actual bg
SHOW_PRED_PLOT = False
#Boolean to save the prediction plot
SAVE_PRED_PLOT = False
#Boolean to show the Clarke Error Grid plot
SHOW_CLARKE_PLOT = False
#Boolean to save the Clarke Error Grid plot
SAVE_CLARKE_PLOT = False
"""
Constants below
"""
#ALGORITHM CONTANTS
#Values to be tested for the parameters
RIDGE_PARAMETER_ARRAY = np.array([0.01, 0.02, 0.04, 0.08, 0.16, 0.32, 0.64, 1.28, 2.56, 5.12])
LASSO_PARAMETER_ARRAY = np.array([0.0025, 0.005, 0.01, 0.02, 0.04, 0.08, 0.16, 0.32, 0.64, 1.28])
SVR_LINEAR_PARAMETER_ARRAY = np.array([0.005, 0.01, 0.02, 0.04, 0.08, 0.16, 0.32, 0.64, 1.28, 2.56])
SVR_LINEAR_EPSILON_ARRAY = np.array([0.0025, 0.005, 0.01, 0.02, 0.04, 0.08, 0.16, 0.32, 0.64, 1.28])
#Values for MLP parameters
MLP_LEARNING_ARRAY = np.array([1e-05, 1e-04, 1e-03, 1e-02, 1e-01, 1])
MLP_LAYER_ARRAY = np.array([2, 4, 8, 16, 32, 64, 128, 256])
# MLP_FUNCTION_ARRAY = np.array(['identity', 'logistic', 'tanh', 'relu'])
# MLP_OPTIMIZER_ARRAY = np.array(['lbfgs', 'sgd', 'adam'])
ALGORITHM_LIST = ["Linear Regression", "Ridge Regression", "Lasso Regression", "SVR Linear Kernel", "MLP Regression"]
#Returns the linear regression model
def linear_regression_model(parameter_array):
return linear_model.LinearRegression(fit_intercept=True, normalize=True, copy_X=True, n_jobs=1)
#Returns the ridge regression model
def ridge_regression_model(parameter_array):
alpha_value = parameter_array[0]
# ridge_solver = parameter_array[0]
return linear_model.Ridge(alpha=alpha_value, fit_intercept=True, normalize=True, copy_X=True, max_iter=None, tol=0.001, solver='auto', random_state=None)
#Returns the lasso regression model
def lasso_regression_model(parameter_array):
alpha_value = parameter_array[0] #alpha value index is first index
return linear_model.Lasso(alpha=alpha_value, fit_intercept=True, normalize=True, precompute=False, copy_X=True,
max_iter=1000, tol=0.0001, warm_start=False, positive=False, random_state=None, selection='cyclic')
#Returns the SVR Linear Kernel model
def svr_linear_regression(parameter_array):
c_value = parameter_array[0]
# epsilon_value = parameter_array[1]
return svm.SVR(kernel='linear', degree=3, gamma='auto', coef0=0.0, tol=0.001, C=c_value, epsilon=0.1, shrinking=True, cache_size=200, verbose=False, max_iter=-1)
#Returns the mlp regression model
def mlp_regression(parameter_array):
layer_value = parameter_array[0]
second_layer_value = parameter_array[1]
learning_rate = parameter_array[2]
return neural_network.MLPRegressor(hidden_layer_sizes=(layer_value,second_layer_value), activation='identity', solver='adam', alpha=1,
batch_size='auto', learning_rate='constant', learning_rate_init=learning_rate, power_t=0.5,
max_iter=200, shuffle=True, random_state=None, tol=0.0001, verbose=False, warm_start=False,
momentum=0.9, nesterovs_momentum=True, early_stopping=False, validation_fraction=0.1, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
#Dictionary with the name of the algorithm as the key and the function as the value
ALGORITHM_DICT = {
"Linear Regression":linear_regression_model,
"Ridge Regression":ridge_regression_model,
"Lasso Regression":lasso_regression_model,
"SVR Linear Kernel":svr_linear_regression,
"MLP Regression":mlp_regression}
ALGORITHM_PARAMETERS = {
"Linear Regression":np.array([np.array([1])]),
"Ridge Regression":np.array([RIDGE_PARAMETER_ARRAY]),
"Lasso Regression":np.array([LASSO_PARAMETER_ARRAY]),
"SVR Linear Kernel":np.array([SVR_LINEAR_PARAMETER_ARRAY]),
"MLP Regression":np.array([MLP_LAYER_ARRAY,MLP_LAYER_ARRAY,MLP_LEARNING_ARRAY])}
#Dictionary with the name of the algorithm as the key and boolean to apply the StandardScaler transformation as the value
ALGORITHM_TRANSFORM = {
"Linear Regression":False,
"Ridge Regression":False,
"Lasso Regression":False,
"SVR Linear Kernel":True,
"MLP Regression":True}
#Prediction CONSTANTS
MINIMUM_BG = 40
MAXIMUM_BG = 400
#ID CONSTANTS
ID_LIST = [
"00000001", "00000003", "00000004", "00000007", "00000010", "00000011",
"00000013", "00000015", "00000016", "00000017", "00000020", "00000021",
"00000023", "00000024"]
#They work, but they don't have enough days to make accurate data charts
WORK_BUT_SMALL_ID_LIST = ["00000000", "00000005", "00000008", "00000009", "00000012", "00000018", "00000019", "00000025"]
#THESE DON'T WORK!!!!! (e.g. they have loop algorithm only, no data at all)
BAD_ID_LIST = ["00000002", "00000006", "00000014", "00000022"]
#Start and end training set dates
START_TRAIN_DATE_DICT = {
"00000000": "2017-02-03", "00000001": "2016-10-09", "00000002": "2017-02-03",
"00000003": "2017-01-06", "00000004": "2017-01-06", "00000005": "2017-02-09",
"00000006": "2017-01-10", "00000007": "2017-01-07", "00000008": "2017-01-25",
"00000009": "2017-02-10", "00000010": "2017-01-06", "00000011": "2016-12-11",
"00000012": "2017-01-08", "00000013": "2017-05-16", "00000015": "2017-04-02",
"00000016": "2017-06-22", "00000017": "2017-05-19", "00000018": "2016-07-07",
"00000019": "2017-05-27", "00000020": "2017-04-01", "00000021": "2017-06-13",
"00000023": "2016-11-07", "00000024": "2017-04-20", "00000025": "2017-04-20"}
END_TRAIN_DATE_DICT = {
"00000000": "2017-02-07", "00000001": "2016-10-18", "00000002": "2017-02-07",
"00000003": "2017-01-15", "00000004": "2017-01-15", "00000005": "2017-02-13",
"00000006": "2017-01-21", "00000007": "2017-01-16", "00000008": "2017-01-25",
"00000009": "2017-02-15", "00000010": "2017-01-15", "00000011": "2016-12-20",
"00000012": "2017-01-24", "00000013": "2017-05-25", "00000015": "2017-04-11",
"00000016": "2017-07-01", "00000017": "2017-05-28", "00000018": "2016-07-16",
"00000019": "2017-06-02", "00000020": "2017-04-10", "00000021": "2017-06-22",
"00000023": "2016-11-16", "00000024": "2017-04-29", "00000025": "2017-04-27"}
#Start and end validation set dates
START_VALID_DATE_DICT = {
"00000000": "2017-02-08", "00000001": "2016-10-19", "00000002": "2017-02-03",
"00000003": "2017-01-16", "00000004": "2017-01-16", "00000005": "2017-02-09",
"00000006": "2017-01-10", "00000007": "2017-01-17", "00000008": "2017-01-25",
"00000009": "2017-02-10", "00000010": "2017-01-16", "00000011": "2016-12-21",
"00000012": "2017-01-08", "00000013": "2017-05-26", "00000015": "2017-04-12",
"00000016": "2017-07-02", "00000017": "2017-05-29", "00000018": "2016-07-17",
"00000019": "2017-06-03", "00000020": "2017-04-11", "00000021": "2017-06-23",
"00000023": "2016-11-17", "00000024": "2017-04-30", "00000025": "2017-04-28"}
END_VALID_DATE_DICT = {
"00000000": "2017-02-09", "00000001": "2016-10-22", "00000002": "2017-02-07",
"00000003": "2017-01-19", "00000004": "2017-01-19", "00000005": "2017-02-13",
"00000006": "2017-01-21", "00000007": "2017-01-20", "00000008": "2017-01-25",
"00000009": "2017-02-15", "00000010": "2017-01-19", "00000011": "2016-12-24",
"00000012": "2017-01-24", "00000013": "2017-05-29", "00000015": "2017-04-15",
"00000016": "2017-07-05", "00000017": "2017-06-01", "00000018": "2016-07-20",
"00000019": "2017-06-04", "00000020": "2017-04-14", "00000021": "2017-06-26",
"00000023": "2016-11-20", "00000024": "2017-05-03", "00000025": "2017-04-30"}
#Start and end test set dates
START_TEST_DATE_DICT = {
"00000000": "2017-02-10", "00000001": "2016-10-23", "00000002": "2017-02-08",
"00000003": "2017-01-20", "00000004": "2017-01-20", "00000005": "2017-02-14",
"00000006": "2017-01-22", "00000007": "2017-01-21", "00000008": "2017-01-26",
"00000009": "2017-02-16", "00000010": "2017-01-20", "00000011": "2016-12-25",
"00000012": "2017-01-25", "00000013": "2017-05-30", "00000015": "2017-04-16",
"00000016": "2017-07-06", "00000017": "2017-06-02", "00000018": "2016-07-21",
"00000019": "2017-06-05", "00000020": "2017-04-15", "00000021": "2017-06-27",
"00000023": "2016-11-21", "00000024": "2017-05-04", "00000025": "2017-04-31"}
END_TEST_DATE_DICT = {
"00000000": "2017-02-12", "00000001": "2016-10-29", "00000002": "2017-02-09",
"00000003": "2017-01-26", "00000004": "2017-01-26", "00000005": "2017-02-15",
"00000006": "2017-01-11", "00000007": "2017-01-27", "00000008": "2017-01-26",
"00000009": "2017-02-17", "00000010": "2017-01-26", "00000011": "2016-12-31",
"00000012": "2017-01-26", "00000013": "2017-06-05", "00000015": "2017-04-22",
"00000016": "2017-07-12", "00000017": "2017-06-08", "00000018": "2016-07-27",
"00000019": "2017-06-07", "00000020": "2017-04-21", "00000021": "2017-07-03",
"00000023": "2016-11-27", "00000024": "2017-05-10", "00000025": "2017-05-04"}
#Function to return the training, validaiton, and testing set dates given the string of the ID number
def _get_id_dates(id_str):
return START_TRAIN_DATE_DICT[id_str], END_TRAIN_DATE_DICT[id_str], START_VALID_DATE_DICT[id_str], END_VALID_DATE_DICT[id_str], START_TEST_DATE_DICT[id_str], END_TEST_DATE_DICT[id_str]
#Main function
def main():
for id_str in ID_ARRAY:
print "\nID Number: " + id_str
start_train_str, end_train_str, start_valid_str, end_valid_str, start_test_str, end_test_str = _get_id_dates(id_str)
bg_df = get_bg_dataframe(id_str) #imports json file as a pandas dataframe
#get start and stop indices
bg_df, start_train_index, end_train_index = get_bg_index(bg_df, start_train_str, end_train_str, "Training", True)
bg_df, start_valid_index, end_valid_index = get_bg_index(bg_df, start_valid_str, end_valid_str, "Validation", False)
bg_df, start_test_index, end_test_index = get_bg_index(bg_df, start_test_str, end_test_str, "Testing", False)
train_bg_df, start_train_index, end_train_index = get_new_df_entries_every_5_minutes(bg_df, start_train_index, end_train_index, "New Training")
valid_bg_df, start_valid_index, end_valid_index = get_new_df_entries_every_5_minutes(bg_df, start_valid_index, end_valid_index, "New Validation")
test_bg_df, start_test_index, end_test_index = get_new_df_entries_every_5_minutes(bg_df, start_test_index, end_test_index, "New Testing")
if LOAD_LOMB_DATA:
train_lomb_data = load_array(id_str + "_train_lomb_data") #If you have already saved data, then load it
valid_lomb_data = load_array(id_str + "_valid_lomb_data")
test_lomb_data = load_array(id_str + "_test_lomb_data")
else:
#get the lomb-scargle data
train_lomb_data = get_lomb_data(train_bg_df, start_train_index, end_train_index, PLOT_LOMB_ARRAY)
valid_lomb_data = get_lomb_data(valid_bg_df, start_valid_index, end_valid_index, PLOT_LOMB_ARRAY)
test_lomb_data = get_lomb_data(test_bg_df, start_test_index, end_test_index, PLOT_LOMB_ARRAY)
if SAVE_LOMB_DATA:
save_data(train_lomb_data, id_str + "_train_lomb_data") #Save data if you want to
save_data(valid_lomb_data, id_str + "_valid_lomb_data")
save_data(test_lomb_data, id_str + "_test_lomb_data")
for pred_minutes in PRED_MINUTES_ARRAY:
print " Prediction Minutes: " + str(pred_minutes)
#Analyze old pred methods
if len(OLD_PRED_ALGORITHM_ARRAY) != 0:
analyze_old_pred_data(test_bg_df, OLD_PRED_ALGORITHM_ARRAY, start_test_index, end_test_index, pred_minutes,
SHOW_PRED_PLOT, SAVE_PRED_PLOT, SHOW_CLARKE_PLOT, SAVE_CLARKE_PLOT, id_str)
for data_minutes in DATA_MINUTES_ARRAY:
print " Data Minutes: " + str(data_minutes)
#make data matrix inputs and the bg outputs
train_data_matrix, actual_bg_train_array, time_bg_train_array = make_data_matrix(train_bg_df, train_lomb_data, start_train_index, end_train_index, data_minutes, pred_minutes)
valid_data_matrix, actual_bg_valid_array, time_bg_valid_array = make_data_matrix(valid_bg_df, valid_lomb_data, start_valid_index, end_valid_index, data_minutes, pred_minutes)
test_data_matrix, actual_bg_test_array, time_bg_test_array = make_data_matrix(test_bg_df, test_lomb_data, start_test_index, end_test_index, data_minutes, pred_minutes)
#Analyze ml algorithms
for algorithm_str in ALGORITHM_ARRAY:
print " Algorithm: " + algorithm_str
if ALGORITHM_TRANSFORM[algorithm_str]:
input_train_data_matrix, input_valid_data_matrix, input_test_data_matrix = preprocess_data(train_data_matrix, valid_data_matrix, test_data_matrix) #Transform data
else:
input_train_data_matrix, input_valid_data_matrix, input_test_data_matrix = train_data_matrix, valid_data_matrix, test_data_matrix #don't transform data
new_parameter_bool = True
#Iterate over every possible combination of parameters
for parameter_array in product(*ALGORITHM_PARAMETERS[algorithm_str]):
reg_model = ALGORITHM_DICT[algorithm_str](parameter_array)
reg_model = reg_model.fit(input_train_data_matrix, actual_bg_train_array) #Fit model to training data
valid_prediction = reg_model.predict(input_valid_data_matrix) #Predict new data
valid_prediction[valid_prediction < MINIMUM_BG] = MINIMUM_BG #Set minimum bg level
valid_prediction[valid_prediction > MAXIMUM_BG] = MAXIMUM_BG #Set maximum bg level
#The cost function is the root mean squared error between the prediction and the real value.
#We want to use the model that has parameters that lead to the smallest cost function.
error_value = math.sqrt(metrics.mean_squared_error(actual_bg_valid_array, valid_prediction))
#Keep the best model
if new_parameter_bool or error_value < best_error_value:
new_parameter_bool = False
best_error_value = error_value
best_reg_model = reg_model
best_parameter_array = parameter_array
if PRINT_PARAMTERS: print parameter_array, error_value
print " Best Validation RMSE: " + str(best_error_value)
print " Best Validation Parameter Value: " + str(best_parameter_array)
print " Best Validation Parameters: " + str(best_reg_model.get_params())
test_prediction = best_reg_model.predict(input_test_data_matrix)
test_prediction[test_prediction < MINIMUM_BG] = MINIMUM_BG #Set minimum bg level
test_prediction[test_prediction > MAXIMUM_BG] = MAXIMUM_BG #Set maximum bg level
analyze_ml_data(actual_bg_test_array, test_prediction, time_bg_test_array, SHOW_PRED_PLOT, SAVE_PRED_PLOT, SHOW_CLARKE_PLOT, SAVE_CLARKE_PLOT, id_str, algorithm_str,
"Pred" + str(pred_minutes) + "Data" + str(data_minutes)) #Analyze data
if SAVE_PRED_DATA:
save_data(actual_bg_test_array, "{}pred{}data{}{}_actual_bg_array".format(id_str, str(pred_minutes), str(data_minutes), algorithm_str))
save_data(test_prediction, "{}pred{}data{}{}_pred_bg_array".format(id_str, str(pred_minutes), str(data_minutes), algorithm_str))
if SAVE_ALOGORITHM:
save_data(best_reg_model, "{}pred{}data{}{}_object".format(id_str, str(pred_minutes), str(data_minutes), algorithm_str))
#Run the main function
if __name__ == "__main__":
main()
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import page_sets
import re
from measurements import timeline_controller
from metrics import speedindex
from telemetry import benchmark
from telemetry.core import util
from telemetry.page import page_test
from telemetry.timeline import async_slice as async_slice_module
from telemetry.timeline import slice as slice_module
from telemetry.value import scalar
class _ServiceWorkerTimelineMetric(object):
def AddResultsOfCounters(self, process, counter_regex_string, results):
counter_filter = re.compile(counter_regex_string)
for counter_name, counter in process.counters.iteritems():
if not counter_filter.search(counter_name):
continue
total = sum(counter.totals)
# Results objects cannot contain the '.' character, so remove that here.
sanitized_counter_name = counter_name.replace('.', '_')
results.AddValue(scalar.ScalarValue(
results.current_page, sanitized_counter_name, 'count', total))
results.AddValue(scalar.ScalarValue(
results.current_page, sanitized_counter_name + '_avg', 'count',
total / float(len(counter.totals))))
def AddResultsOfEvents(
self, process, thread_regex_string, event_regex_string, results):
thread_filter = re.compile(thread_regex_string)
event_filter = re.compile(event_regex_string)
for thread in process.threads.itervalues():
thread_name = thread.name.replace('/', '_')
if not thread_filter.search(thread_name):
continue
filtered_events = []
for event in thread.IterAllEvents():
event_name = event.name.replace('.', '_')
if event_filter.search(event_name):
filtered_events.append(event)
async_events_by_name = collections.defaultdict(list)
sync_events_by_name = collections.defaultdict(list)
for event in filtered_events:
if isinstance(event, async_slice_module.AsyncSlice):
async_events_by_name[event.name].append(event)
elif isinstance(event, slice_module.Slice):
sync_events_by_name[event.name].append(event)
for event_name, event_group in async_events_by_name.iteritems():
times = [e.duration for e in event_group]
self._AddResultOfEvent(thread_name, event_name, times, results)
for event_name, event_group in sync_events_by_name.iteritems():
times = [e.self_time for e in event_group]
self._AddResultOfEvent(thread_name, event_name, times, results)
def _AddResultOfEvent(self, thread_name, event_name, times, results):
total = sum(times)
biggest_jank = max(times)
# Results objects cannot contain the '.' character, so remove that here.
sanitized_event_name = event_name.replace('.', '_')
full_name = thread_name + '|' + sanitized_event_name
results.AddValue(scalar.ScalarValue(
results.current_page, full_name, 'ms', total))
results.AddValue(scalar.ScalarValue(
results.current_page, full_name + '_max', 'ms', biggest_jank))
results.AddValue(scalar.ScalarValue(
results.current_page, full_name + '_avg', 'ms', total / len(times)))
class _ServiceWorkerMeasurement(page_test.PageTest):
"""Measure Speed Index and TRACE_EVENTs"""
def __init__(self):
super(_ServiceWorkerMeasurement, self).__init__()
self._timeline_controller = timeline_controller.TimelineController()
self._speed_index = speedindex.SpeedIndexMetric()
self._page_open_times = collections.defaultdict(int)
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs([
'--enable-experimental-web-platform-features'
])
def WillNavigateToPage(self, page, tab):
self._timeline_controller.SetUp(page, tab)
self._timeline_controller.Start(tab)
self._speed_index.Start(page, tab)
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForDocumentReadyStateToBeComplete(40)
self._timeline_controller.Stop(tab, results)
# Retrieve TRACE_EVENTs
timeline_metric = _ServiceWorkerTimelineMetric()
browser_process = self._timeline_controller.model.browser_process
filter_text = '(RegisterServiceWorker|'\
'UnregisterServiceWorker|'\
'ProcessAllocate|'\
'FindRegistrationForDocument|'\
'DispatchFetchEvent)'
timeline_metric.AddResultsOfEvents(
browser_process, 'IOThread', filter_text , results)
# Record Speed Index
def SpeedIndexIsFinished():
return self._speed_index.IsFinished(tab)
util.WaitFor(SpeedIndexIsFinished, 60)
self._speed_index.Stop(page, tab)
# Distinguish the first and second load from the subsequent loads
url = str(page)
chart_prefix = 'page_load'
self._page_open_times[url] += 1
if self._page_open_times[url] == 1:
chart_prefix += '_1st'
elif self._page_open_times[url] == 2:
chart_prefix += '_2nd'
else:
chart_prefix += '_later'
self._speed_index.AddResults(tab, results, chart_prefix)
class _ServiceWorkerMicroBenchmarkMeasurement(page_test.PageTest):
"""Measure JS land values and TRACE_EVENTs"""
def __init__(self):
super(_ServiceWorkerMicroBenchmarkMeasurement, self).__init__()
self._timeline_controller = timeline_controller.TimelineController()
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs([
'--enable-experimental-web-platform-features'
])
def WillNavigateToPage(self, page, tab):
self._timeline_controller.SetUp(page, tab)
self._timeline_controller.Start(tab)
def ValidateAndMeasurePage(self, page, tab, results):
tab.WaitForJavaScriptExpression('window.done', 40)
self._timeline_controller.Stop(tab, results)
# Measure JavaScript-land
json = tab.EvaluateJavaScript('window.results || {}')
for key, value in json.iteritems():
results.AddValue(scalar.ScalarValue(
results.current_page, key, value['units'], value['value']))
# Retrieve TRACE_EVENTs
timeline_metric = _ServiceWorkerTimelineMetric()
browser_process = self._timeline_controller.model.browser_process
filter_text = '(RegisterServiceWorker|'\
'UnregisterServiceWorker|'\
'ProcessAllocate|'\
'FindRegistrationForDocument|'\
'DispatchFetchEvent)'
timeline_metric.AddResultsOfEvents(
browser_process, 'IOThread', filter_text , results)
# TODO(simonhatch): Temporarily disabling (http://crbug.com/433943)
@benchmark.Disabled
class ServiceWorkerPerfTest(benchmark.Benchmark):
"""Performance test on public applications using ServiceWorker"""
test = _ServiceWorkerMeasurement
page_set = page_sets.ServiceWorkerPageSet
@classmethod
def Name(cls):
return 'service_worker.service_worker'
# Disabled due to redness on the tree. crbug.com/442752
# TODO(horo): Enable after the reference build newer than M39 will be rolled.
@benchmark.Disabled('reference')
class ServiceWorkerMicroBenchmarkPerfTest(benchmark.Benchmark):
"""This test measures the performance of pages using ServiceWorker.
As a page set, two benchamrk pages (many registration, many concurrent
fetching) and one application (Trained-to-thrill:
https://jakearchibald.github.io/trained-to-thrill/) are included. Execution
time of these pages will be shown as Speed Index, and TRACE_EVENTs are
subsidiary information to know more detail performance regression.
"""
test = _ServiceWorkerMicroBenchmarkMeasurement
page_set = page_sets.ServiceWorkerMicroBenchmarkPageSet
@classmethod
def Name(cls):
return 'service_worker.service_worker_micro_benchmark'
|
|
# Copyright 2015 Yahoo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from cinder import exception
from cinder import objects
from cinder.objects import fields
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import objects as test_objects
fake_consistencygroup = {
'id': fake.consistency_group_id,
'user_id': fake.user_id,
'project_id': fake.project_id,
'host': 'fake_host',
'availability_zone': 'fake_az',
'name': 'fake_name',
'description': 'fake_description',
'volume_type_id': fake.volume_type_id,
'status': fields.ConsistencyGroupStatus.CREATING,
'cgsnapshot_id': fake.cgsnapshot_id,
'source_cgid': None,
}
fake_cgsnapshot = {
'id': fake.cgsnapshot_id,
'user_id': fake.user_id,
'project_id': fake.project_id,
'name': 'fake_name',
'description': 'fake_description',
'status': 'creating',
'consistencygroup_id': fake.consistency_group_id,
}
class TestConsistencyGroup(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.sqlalchemy.api.consistencygroup_get',
return_value=fake_consistencygroup)
def test_get_by_id(self, consistencygroup_get):
consistencygroup = objects.ConsistencyGroup.get_by_id(
self.context, fake.consistency_group_id)
self._compare(self, fake_consistencygroup, consistencygroup)
consistencygroup_get.assert_called_once_with(
self.context, fake.consistency_group_id)
@mock.patch('cinder.db.sqlalchemy.api.model_query')
def test_get_by_id_no_existing_id(self, model_query):
model_query().filter_by().first.return_value = None
self.assertRaises(exception.ConsistencyGroupNotFound,
objects.ConsistencyGroup.get_by_id, self.context,
123)
@mock.patch('cinder.db.consistencygroup_create',
return_value=fake_consistencygroup)
def test_create(self, consistencygroup_create):
fake_cg = fake_consistencygroup.copy()
del fake_cg['id']
consistencygroup = objects.ConsistencyGroup(context=self.context,
**fake_cg)
consistencygroup.create()
self._compare(self, fake_consistencygroup, consistencygroup)
def test_create_with_id_except_exception(self, ):
consistencygroup = objects.ConsistencyGroup(
context=self.context, **{'id': fake.consistency_group_id})
self.assertRaises(exception.ObjectActionError, consistencygroup.create)
@mock.patch('cinder.db.consistencygroup_update')
def test_save(self, consistencygroup_update):
consistencygroup = objects.ConsistencyGroup._from_db_object(
self.context, objects.ConsistencyGroup(), fake_consistencygroup)
consistencygroup.status = fields.ConsistencyGroupStatus.AVAILABLE
consistencygroup.save()
consistencygroup_update.assert_called_once_with(
self.context,
consistencygroup.id,
{'status': fields.ConsistencyGroupStatus.AVAILABLE})
def test_save_with_cgsnapshots(self):
consistencygroup = objects.ConsistencyGroup._from_db_object(
self.context, objects.ConsistencyGroup(), fake_consistencygroup)
cgsnapshots_objs = [objects.CGSnapshot(context=self.context, id=i)
for i in [fake.cgsnapshot_id, fake.cgsnapshot2_id,
fake.cgsnapshot3_id]]
cgsnapshots = objects.CGSnapshotList(objects=cgsnapshots_objs)
consistencygroup.name = 'foobar'
consistencygroup.cgsnapshots = cgsnapshots
self.assertEqual({'name': 'foobar',
'cgsnapshots': cgsnapshots},
consistencygroup.obj_get_changes())
self.assertRaises(exception.ObjectActionError, consistencygroup.save)
def test_save_with_volumes(self):
consistencygroup = objects.ConsistencyGroup._from_db_object(
self.context, objects.ConsistencyGroup(), fake_consistencygroup)
volumes_objs = [objects.Volume(context=self.context, id=i)
for i in [fake.volume_id, fake.volume2_id,
fake.volume3_id]]
volumes = objects.VolumeList(objects=volumes_objs)
consistencygroup.name = 'foobar'
consistencygroup.volumes = volumes
self.assertEqual({'name': 'foobar',
'volumes': volumes},
consistencygroup.obj_get_changes())
self.assertRaises(exception.ObjectActionError, consistencygroup.save)
@mock.patch('cinder.objects.cgsnapshot.CGSnapshotList.get_all_by_group')
@mock.patch('cinder.objects.volume.VolumeList.get_all_by_group')
def test_obj_load_attr(self, mock_vol_get_all_by_group,
mock_cgsnap_get_all_by_group):
consistencygroup = objects.ConsistencyGroup._from_db_object(
self.context, objects.ConsistencyGroup(), fake_consistencygroup)
# Test cgsnapshots lazy-loaded field
cgsnapshots_objs = [objects.CGSnapshot(context=self.context, id=i)
for i in [fake.cgsnapshot_id, fake.cgsnapshot2_id,
fake.cgsnapshot3_id]]
cgsnapshots = objects.CGSnapshotList(context=self.context,
objects=cgsnapshots_objs)
mock_cgsnap_get_all_by_group.return_value = cgsnapshots
self.assertEqual(cgsnapshots, consistencygroup.cgsnapshots)
mock_cgsnap_get_all_by_group.assert_called_once_with(
self.context, consistencygroup.id)
# Test volumes lazy-loaded field
volume_objs = [objects.Volume(context=self.context, id=i)
for i in [fake.volume_id, fake.volume2_id,
fake.volume3_id]]
volumes = objects.VolumeList(context=self.context, objects=volume_objs)
mock_vol_get_all_by_group.return_value = volumes
self.assertEqual(volumes, consistencygroup.volumes)
mock_vol_get_all_by_group.assert_called_once_with(self.context,
consistencygroup.id)
@mock.patch('cinder.db.consistencygroup_destroy')
def test_destroy(self, consistencygroup_destroy):
consistencygroup = objects.ConsistencyGroup(
context=self.context, id=fake.consistency_group_id)
consistencygroup.destroy()
self.assertTrue(consistencygroup_destroy.called)
admin_context = consistencygroup_destroy.call_args[0][0]
self.assertTrue(admin_context.is_admin)
@mock.patch('cinder.db.sqlalchemy.api.consistencygroup_get')
def test_refresh(self, consistencygroup_get):
db_cg1 = fake_consistencygroup.copy()
db_cg2 = db_cg1.copy()
db_cg2['description'] = 'foobar'
# On the second consistencygroup_get, return the ConsistencyGroup with
# an updated description
consistencygroup_get.side_effect = [db_cg1, db_cg2]
cg = objects.ConsistencyGroup.get_by_id(self.context,
fake.consistency_group_id)
self._compare(self, db_cg1, cg)
# description was updated, so a ConsistencyGroup refresh should have a
# new value for that field
cg.refresh()
self._compare(self, db_cg2, cg)
if six.PY3:
call_bool = mock.call.__bool__()
else:
call_bool = mock.call.__nonzero__()
consistencygroup_get.assert_has_calls([
mock.call(
self.context,
fake.consistency_group_id),
call_bool,
mock.call(
self.context,
fake.consistency_group_id)])
class TestConsistencyGroupList(test_objects.BaseObjectsTestCase):
@mock.patch('cinder.db.consistencygroup_get_all',
return_value=[fake_consistencygroup])
def test_get_all(self, consistencygroup_get_all):
consistencygroups = objects.ConsistencyGroupList.get_all(self.context)
self.assertEqual(1, len(consistencygroups))
TestConsistencyGroup._compare(self, fake_consistencygroup,
consistencygroups[0])
@mock.patch('cinder.db.consistencygroup_get_all_by_project',
return_value=[fake_consistencygroup])
def test_get_all_by_project(self, consistencygroup_get_all_by_project):
consistencygroups = objects.ConsistencyGroupList.get_all_by_project(
self.context, self.project_id)
self.assertEqual(1, len(consistencygroups))
TestConsistencyGroup._compare(self, fake_consistencygroup,
consistencygroups[0])
@mock.patch('cinder.db.consistencygroup_get_all',
return_value=[fake_consistencygroup])
def test_get_all_with_pagination(self, consistencygroup_get_all):
consistencygroups = objects.ConsistencyGroupList.get_all(
self.context, filters={'id': 'fake'}, marker=None, limit=1,
offset=None, sort_keys='id', sort_dirs='asc')
self.assertEqual(1, len(consistencygroups))
consistencygroup_get_all.assert_called_once_with(
self.context, filters={'id': 'fake'}, marker=None, limit=1,
offset=None, sort_keys='id', sort_dirs='asc')
TestConsistencyGroup._compare(self, fake_consistencygroup,
consistencygroups[0])
@mock.patch('cinder.db.consistencygroup_get_all_by_project',
return_value=[fake_consistencygroup])
def test_get_all_by_project_with_pagination(
self, consistencygroup_get_all_by_project):
consistencygroups = objects.ConsistencyGroupList.get_all_by_project(
self.context, self.project_id, filters={'id': 'fake'}, marker=None,
limit=1, offset=None, sort_keys='id', sort_dirs='asc')
self.assertEqual(1, len(consistencygroups))
consistencygroup_get_all_by_project.assert_called_once_with(
self.context, self.project_id, filters={'id': 'fake'}, marker=None,
limit=1, offset=None, sort_keys='id', sort_dirs='asc')
TestConsistencyGroup._compare(self, fake_consistencygroup,
consistencygroups[0])
|
|
"""
Functions
---------
.. autosummary::
:toctree: generated/
fmin_l_bfgs_b
"""
## License for the Python wrapper
## ==============================
## Copyright (c) 2004 David M. Cooke <[email protected]>
## Permission is hereby granted, free of charge, to any person obtaining a
## copy of this software and associated documentation files (the "Software"),
## to deal in the Software without restriction, including without limitation
## the rights to use, copy, modify, merge, publish, distribute, sublicense,
## and/or sell copies of the Software, and to permit persons to whom the
## Software is furnished to do so, subject to the following conditions:
## The above copyright notice and this permission notice shall be included in
## all copies or substantial portions of the Software.
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
## DEALINGS IN THE SOFTWARE.
## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import array, asarray, float64, int32, zeros
from . import _lbfgsb
from .optimize import (MemoizeJac, OptimizeResult,
_check_unknown_options, wrap_function,
_approx_fprime_helper)
from scipy.sparse.linalg import LinearOperator
__all__ = ['fmin_l_bfgs_b', 'LbfgsInvHessProduct']
def fmin_l_bfgs_b(func, x0, fprime=None, args=(),
approx_grad=0,
bounds=None, m=10, factr=1e7, pgtol=1e-5,
epsilon=1e-8,
iprint=-1, maxfun=15000, maxiter=15000, disp=None,
callback=None, maxls=20):
"""
Minimize a function func using the L-BFGS-B algorithm.
Parameters
----------
func : callable f(x,*args)
Function to minimize.
x0 : ndarray
Initial guess.
fprime : callable fprime(x,*args), optional
The gradient of `func`. If None, then `func` returns the function
value and the gradient (``f, g = func(x, *args)``), unless
`approx_grad` is True in which case `func` returns only ``f``.
args : sequence, optional
Arguments to pass to `func` and `fprime`.
approx_grad : bool, optional
Whether to approximate the gradient numerically (in which case
`func` returns only the function value).
bounds : list, optional
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter. Use None or +-inf for one of ``min`` or
``max`` when there is no bound in that direction.
m : int, optional
The maximum number of variable metric corrections
used to define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many terms in an
approximation to it.)
factr : float, optional
The iteration stops when
``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
where ``eps`` is the machine precision, which is automatically
generated by the code. Typical values for `factr` are: 1e12 for
low accuracy; 1e7 for moderate accuracy; 10.0 for extremely
high accuracy. See Notes for relationship to `ftol`, which is exposed
(instead of `factr`) by the `scipy.optimize.minimize` interface to
L-BFGS-B.
pgtol : float, optional
The iteration will stop when
``max{|proj g_i | i = 1, ..., n} <= pgtol``
where ``pg_i`` is the i-th component of the projected gradient.
epsilon : float, optional
Step size used when `approx_grad` is True, for numerically
calculating the gradient
iprint : int, optional
Controls the frequency of output. ``iprint < 0`` means no output;
``iprint = 0`` print only one line at the last iteration;
``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
``iprint = 99`` print details of every iteration except n-vectors;
``iprint = 100`` print also the changes of active set and final x;
``iprint > 100`` print details of every iteration including x and g.
disp : int, optional
If zero, then no output. If a positive number, then this over-rides
`iprint` (i.e., `iprint` gets the value of `disp`).
maxfun : int, optional
Maximum number of function evaluations.
maxiter : int, optional
Maximum number of iterations.
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is the
current parameter vector.
maxls : int, optional
Maximum number of line search steps (per iteration). Default is 20.
Returns
-------
x : array_like
Estimated position of the minimum.
f : float
Value of `func` at the minimum.
d : dict
Information dictionary.
* d['warnflag'] is
- 0 if converged,
- 1 if too many function evaluations or too many iterations,
- 2 if stopped for another reason, given in d['task']
* d['grad'] is the gradient at the minimum (should be 0 ish)
* d['funcalls'] is the number of function calls made.
* d['nit'] is the number of iterations.
See also
--------
minimize: Interface to minimization algorithms for multivariate
functions. See the 'L-BFGS-B' `method` in particular. Note that the
`ftol` option is made available via that interface, while `factr` is
provided via this interface, where `factr` is the factor multiplying
the default machine floating-point precision to arrive at `ftol`:
``ftol = factr * numpy.finfo(float).eps``.
Notes
-----
License of L-BFGS-B (FORTRAN code):
The version included here (in fortran code) is 3.0
(released April 25, 2011). It was written by Ciyou Zhu, Richard Byrd,
and Jorge Nocedal <[email protected]>. It carries the following
condition for use:
This software is freely available, but we expect that all publications
describing work using this software, or all commercial products using it,
quote at least one of the references given below. This software is released
under the BSD License.
References
----------
* R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for Bound
Constrained Optimization, (1995), SIAM Journal on Scientific and
Statistical Computing, 16, 5, pp. 1190-1208.
* C. Zhu, R. H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization (1997),
ACM Transactions on Mathematical Software, 23, 4, pp. 550 - 560.
* J.L. Morales and J. Nocedal. L-BFGS-B: Remark on Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization (2011),
ACM Transactions on Mathematical Software, 38, 1.
"""
# handle fprime/approx_grad
if approx_grad:
fun = func
jac = None
elif fprime is None:
fun = MemoizeJac(func)
jac = fun.derivative
else:
fun = func
jac = fprime
# build options
if disp is None:
disp = iprint
opts = {'disp': disp,
'iprint': iprint,
'maxcor': m,
'ftol': factr * np.finfo(float).eps,
'gtol': pgtol,
'eps': epsilon,
'maxfun': maxfun,
'maxiter': maxiter,
'callback': callback,
'maxls': maxls}
res = _minimize_lbfgsb(fun, x0, args=args, jac=jac, bounds=bounds,
**opts)
d = {'grad': res['jac'],
'task': res['message'],
'funcalls': res['nfev'],
'nit': res['nit'],
'warnflag': res['status']}
f = res['fun']
x = res['x']
return x, f, d
def _minimize_lbfgsb(fun, x0, args=(), jac=None, bounds=None,
disp=None, maxcor=10, ftol=2.2204460492503131e-09,
gtol=1e-5, eps=1e-8, maxfun=15000, maxiter=15000,
iprint=-1, callback=None, maxls=20, **unknown_options):
"""
Minimize a scalar function of one or more variables using the L-BFGS-B
algorithm.
Options
-------
disp : None or int
If `disp is None` (the default), then the supplied version of `iprint`
is used. If `disp is not None`, then it overrides the supplied version
of `iprint` with the behaviour you outlined.
maxcor : int
The maximum number of variable metric corrections used to
define the limited memory matrix. (The limited memory BFGS
method does not store the full hessian but uses this many terms
in an approximation to it.)
ftol : float
The iteration stops when ``(f^k -
f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= ftol``.
gtol : float
The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
<= gtol`` where ``pg_i`` is the i-th component of the
projected gradient.
eps : float
Step size used for numerical approximation of the Jacobian.
maxfun : int
Maximum number of function evaluations.
maxiter : int
Maximum number of iterations.
iprint : int, optional
Controls the frequency of output. ``iprint < 0`` means no output;
``iprint = 0`` print only one line at the last iteration;
``0 < iprint < 99`` print also f and ``|proj g|`` every iprint iterations;
``iprint = 99`` print details of every iteration except n-vectors;
``iprint = 100`` print also the changes of active set and final x;
``iprint > 100`` print details of every iteration including x and g.
callback : callable, optional
Called after each iteration, as ``callback(xk)``, where ``xk`` is the
current parameter vector.
maxls : int, optional
Maximum number of line search steps (per iteration). Default is 20.
Notes
-----
The option `ftol` is exposed via the `scipy.optimize.minimize` interface,
but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The
relationship between the two is ``ftol = factr * numpy.finfo(float).eps``.
I.e., `factr` multiplies the default machine floating-point precision to
arrive at `ftol`.
"""
_check_unknown_options(unknown_options)
m = maxcor
epsilon = eps
pgtol = gtol
factr = ftol / np.finfo(float).eps
x0 = asarray(x0).ravel()
n, = x0.shape
if bounds is None:
bounds = [(None, None)] * n
if len(bounds) != n:
raise ValueError('length of x0 != length of bounds')
# unbounded variables must use None, not +-inf, for optimizer to work properly
bounds = [(None if l == -np.inf else l, None if u == np.inf else u) for l, u in bounds]
if disp is not None:
if disp == 0:
iprint = -1
else:
iprint = disp
n_function_evals, fun = wrap_function(fun, ())
if jac is None:
def func_and_grad(x):
f = fun(x, *args)
g = _approx_fprime_helper(x, fun, epsilon, args=args, f0=f)
return f, g
else:
def func_and_grad(x):
f = fun(x, *args)
g = jac(x, *args)
return f, g
nbd = zeros(n, int32)
low_bnd = zeros(n, float64)
upper_bnd = zeros(n, float64)
bounds_map = {(None, None): 0,
(1, None): 1,
(1, 1): 2,
(None, 1): 3}
for i in range(0, n):
l, u = bounds[i]
if l is not None:
low_bnd[i] = l
l = 1
if u is not None:
upper_bnd[i] = u
u = 1
nbd[i] = bounds_map[l, u]
if not maxls > 0:
raise ValueError('maxls must be positive.')
x = array(x0, float64)
f = array(0.0, float64)
g = zeros((n,), float64)
wa = zeros(2*m*n + 5*n + 11*m*m + 8*m, float64)
iwa = zeros(3*n, int32)
task = zeros(1, 'S60')
csave = zeros(1, 'S60')
lsave = zeros(4, int32)
isave = zeros(44, int32)
dsave = zeros(29, float64)
task[:] = 'START'
n_iterations = 0
while 1:
# x, f, g, wa, iwa, task, csave, lsave, isave, dsave = \
_lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr,
pgtol, wa, iwa, task, iprint, csave, lsave,
isave, dsave, maxls)
task_str = task.tostring()
if task_str.startswith(b'FG'):
# The minimization routine wants f and g at the current x.
# Note that interruptions due to maxfun are postponed
# until the completion of the current minimization iteration.
# Overwrite f and g:
f, g = func_and_grad(x)
elif task_str.startswith(b'NEW_X'):
# new iteration
n_iterations += 1
if callback is not None:
callback(np.copy(x))
if n_iterations >= maxiter:
task[:] = 'STOP: TOTAL NO. of ITERATIONS REACHED LIMIT'
elif n_function_evals[0] > maxfun:
task[:] = ('STOP: TOTAL NO. of f AND g EVALUATIONS '
'EXCEEDS LIMIT')
else:
break
task_str = task.tostring().strip(b'\x00').strip()
if task_str.startswith(b'CONV'):
warnflag = 0
elif n_function_evals[0] > maxfun or n_iterations >= maxiter:
warnflag = 1
else:
warnflag = 2
# These two portions of the workspace are described in the mainlb
# subroutine in lbfgsb.f. See line 363.
s = wa[0: m*n].reshape(m, n)
y = wa[m*n: 2*m*n].reshape(m, n)
# See lbfgsb.f line 160 for this portion of the workspace.
# isave(31) = the total number of BFGS updates prior the current iteration;
n_bfgs_updates = isave[30]
n_corrs = min(n_bfgs_updates, maxcor)
hess_inv = LbfgsInvHessProduct(s[:n_corrs], y[:n_corrs])
return OptimizeResult(fun=f, jac=g, nfev=n_function_evals[0],
nit=n_iterations, status=warnflag, message=task_str,
x=x, success=(warnflag == 0), hess_inv=hess_inv)
class LbfgsInvHessProduct(LinearOperator):
"""Linear operator for the L-BFGS approximate inverse Hessian.
This operator computes the product of a vector with the approximate inverse
of the Hessian of the objective function, using the L-BFGS limited
memory approximation to the inverse Hessian, accumulated during the
optimization.
Objects of this class implement the ``scipy.sparse.linalg.LinearOperator``
interface.
Parameters
----------
sk : array_like, shape=(n_corr, n)
Array of `n_corr` most recent updates to the solution vector.
(See [1]).
yk : array_like, shape=(n_corr, n)
Array of `n_corr` most recent updates to the gradient. (See [1]).
References
----------
.. [1] Nocedal, Jorge. "Updating quasi-Newton matrices with limited
storage." Mathematics of computation 35.151 (1980): 773-782.
"""
def __init__(self, sk, yk):
"""Construct the operator."""
if sk.shape != yk.shape or sk.ndim != 2:
raise ValueError('sk and yk must have matching shape, (n_corrs, n)')
n_corrs, n = sk.shape
super(LbfgsInvHessProduct, self).__init__(
dtype=np.float64, shape=(n, n))
self.sk = sk
self.yk = yk
self.n_corrs = n_corrs
self.rho = 1 / np.einsum('ij,ij->i', sk, yk)
def _matvec(self, x):
"""Efficient matrix-vector multiply with the BFGS matrices.
This calculation is described in Section (4) of [1].
Parameters
----------
x : ndarray
An array with shape (n,) or (n,1).
Returns
-------
y : ndarray
The matrix-vector product
"""
s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
q = np.array(x, dtype=self.dtype, copy=True)
if q.ndim == 2 and q.shape[1] == 1:
q = q.reshape(-1)
alpha = np.zeros(n_corrs)
for i in range(n_corrs-1, -1, -1):
alpha[i] = rho[i] * np.dot(s[i], q)
q = q - alpha[i]*y[i]
r = q
for i in range(n_corrs):
beta = rho[i] * np.dot(y[i], r)
r = r + s[i] * (alpha[i] - beta)
return r
def todense(self):
"""Return a dense array representation of this operator.
Returns
-------
arr : ndarray, shape=(n, n)
An array with the same shape and containing
the same data represented by this `LinearOperator`.
"""
s, y, n_corrs, rho = self.sk, self.yk, self.n_corrs, self.rho
I = np.eye(*self.shape, dtype=self.dtype)
Hk = I
for i in range(n_corrs):
A1 = I - s[i][:, np.newaxis] * y[i][np.newaxis, :] * rho[i]
A2 = I - y[i][:, np.newaxis] * s[i][np.newaxis, :] * rho[i]
Hk = np.dot(A1, np.dot(Hk, A2)) + (rho[i] * s[i][:, np.newaxis] *
s[i][np.newaxis, :])
return Hk
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.